python_code
stringlengths 0
229k
|
---|
import argparse
import time
import sys
import subprocess
from datetime import datetime
from .result_analyzer import analyze
from typing import List
from ..utils import dump_output, get_output_dir, get_output_json, add_path, REPO_PATH
with add_path(REPO_PATH):
from utils.cuda_utils import DEFAULT_CUDA_VERSION, CUDA_VERSION_MAP
BM_NAME = "cuda-compare"
def install_nightlies(dryrun):
default_cuda_version = CUDA_VERSION_MAP[DEFAULT_CUDA_VERSION]["pytorch_url"]
install_cmd = ["pip", "install", "--pre", "torch", "torchvision", "torchaudio",
"-f", f"https://download.pytorch.org/whl/nightly/{default_cuda_version}/torch_nightly.html"]
print(f"Installing pytorch packages: {install_cmd}")
if not dryrun:
subprocess.check_call(install_cmd, cwd=REPO_PATH)
def install_torchbench(dryrun):
install_cmd = [sys.executable, "install.py"]
print(f"Installing torchbench: {install_cmd}")
if not dryrun:
subprocess.check_call(install_cmd, cwd=REPO_PATH)
def run_benchmark(output_path, config, dryrun=False):
benchmark_script = REPO_PATH.joinpath(".github", "scripts", "run-config.py")
benchmark_cmd = [sys.executable, str(benchmark_script), "-c", config, "-b", str(REPO_PATH), "-o", str(output_path)]
print(f"Running benchmark: {benchmark_cmd}")
if not dryrun:
subprocess.check_call(benchmark_cmd, cwd=REPO_PATH)
def dump_result_to_json(metrics):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
def get_timestamp():
return datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
def get_work_dir(output_dir):
work_dir = output_dir.joinpath(f"run-{get_timestamp()}")
work_dir.mkdir(exist_ok=True, parents=True)
return work_dir
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--dryrun", action='store_true', help="Only generate the test scripts. Do not run the benchmark.")
parser.add_argument("--config", "-c", type=str, default="devinfra/cuda-113-116-compare", help="Specify the config file")
parser.add_argument("--analyze", type=str, help="Only analyze the result of the specified work directory.")
args = parser.parse_args(args)
return args
def run(args: List[str]):
args = parse_args(args)
if args.analyze:
metrics = analyze(args.analyze)
dump_result_to_json(metrics)
return
work_dir = get_work_dir(get_output_dir(BM_NAME))
install_nightlies(args.dryrun)
install_torchbench(args.dryrun)
run_benchmark(work_dir, args.config, dryrun=args.dryrun)
if not args.dryrun:
metrics = analyze(work_dir)
dump_result_to_json(metrics)
|
from pathlib import Path
import json
import re
def get_run(test_dir):
run = {}
testdir_name = test_dir.name
regex = "cuda-(.*)-(.*)"
g = re.match(regex, testdir_name).groups()
run["test"] = g[0]
run["cuda_version"] = g[1]
eager_json = test_dir.joinpath("json", "eager.json")
assert eager_json.exists(), f"Expected json path {str(eager_json)} doesn't exist."
with open(eager_json, "r") as ej:
run["result"] = json.load(ej)
return run
def get_runs(work_dir: Path):
runs = []
for subdir in filter(lambda x: x.is_dir(), work_dir.iterdir()):
run = get_run(subdir)
runs.append(run)
return runs
def add_test_results(runs, result_metrics, base_cuda_version):
assert len(runs) >= 2, f"Expected more than 2 runs per group, getting {len(runs)}."
base_run = list(filter(lambda x: x['cuda_version'] == base_cuda_version, runs))[0]
for run in runs:
if run["cuda_version"] == base_cuda_version:
continue
for test in run["result"]:
test_name = f"{test['name']}-{test['test']}-{run['cuda_version']}-speedup"
if test['status'] == 'OK':
base_test = list(filter(lambda x: x['name'] == test['name'] and x['test'] == test['test'], base_run['result']))[0]
result_metrics[test_name] = base_test['results']['latency_ms'] / test['results']['latency_ms']
else:
# status has error
result_metrics[test_name] = "-1.0"
return result_metrics
def analyze(result_dir):
result_dir = Path(result_dir)
assert result_dir.is_dir(), f"Expected directory {str(result_dir)} doesn't exist."
result_metrics = { }
runs = get_runs(result_dir)
cuda_versions = sorted(map(lambda x: x["cuda_version"], runs))
base_cuda_version = cuda_versions[0]
cuda_train = list(filter(lambda x: x["test"] == "train", runs))
add_test_results(cuda_train, result_metrics, base_cuda_version=base_cuda_version)
cuda_eval = list(filter(lambda x: x["test"] == "eval", runs))
add_test_results(cuda_eval, result_metrics, base_cuda_version=base_cuda_version)
return result_metrics
|
import argparse
import os
import yaml
import time
import shutil
import itertools
import subprocess
from datetime import datetime
from git import Repo
from pathlib import Path
from typing import List
from ..utils import dump_output, get_output_dir, get_output_json
from .result_analyzer import analyze
# Expected WORK_DIR structure
# WORK_DIR/
# |---examples/
# |---pytorch-<ver1>-cuda<ver1>/
# |---run.sh
# |---mnist/
# |---mnist-hogwild/
# |---<other-benchmarks>
# |---pytorch-<ver2>-cuda<ver2>/
# |---summary.csv
BM_NAME = "release-test"
EXAMPLE_URL = "https://github.com/pytorch/examples.git"
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "configs")
RUN_TEMPLATE = """
# GENERATED BY userbenchmark/release-test/__init__.py. DO NOT EDIT!
bash {RELEASE_TEST_ROOT}/setup_env.sh '{CUDA_VERSION}' '{MAGMA_VERSION}' '{PYTORCH_VERSION}' '{PYTORCH_CHANNEL}' '{WORK_DIR}'
bash {RELEASE_TEST_ROOT}/run_release_test.sh '{CUDA_VERSION}' '{RESULT_DIR}'
"""
def get_timestamp():
return datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
def get_work_dir(output_dir):
work_dir = output_dir.joinpath(f"run-{get_timestamp()}")
work_dir.mkdir(exist_ok=True, parents=True)
return work_dir
def generate_test_scripts(config, work_dir):
assert "cuda" in config and isinstance(config["cuda"], list), f"Expected CUDA config list, but not found."
assert "pytorch" in config and isinstance(config["pytorch"], list), f"Exptected pytorch version list, but not found."
bm_matrix = [config["cuda"], config["pytorch"]]
run_scripts = {}
for cuda, pytorch in itertools.product(*bm_matrix):
run_key = f"pytorch-{pytorch['version']}-cuda-{cuda['version']}"
run_script = RUN_TEMPLATE.format(RELEASE_TEST_ROOT=CURRENT_DIR,
CUDA_VERSION=cuda["version"],
MAGMA_VERSION=cuda["magma_version"],
PYTORCH_VERSION=pytorch["version"],
PYTORCH_CHANNEL=pytorch["conda_channel"],
WORK_DIR=work_dir,
RESULT_DIR=work_dir.joinpath(run_key))
run_scripts[run_key] = run_script
return run_scripts
def dump_test_scripts(run_scripts, work_dir):
for run_key, run_script in run_scripts.items():
run_script_loc = work_dir.joinpath(run_key)
run_script_loc.mkdir(exist_ok=True)
with open(run_script_loc.joinpath("run.sh"), "w") as rs:
rs.write(run_script)
def dump_result_to_json(metrics):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
def run_benchmark(run_scripts, work_dir):
for run_key, _rscript in run_scripts.items():
run_script_path = work_dir.joinpath(run_key, "run.sh")
# run the benchmark
print(f"Running benchmark {run_key} ...")
subprocess.check_call(["bash", str(run_script_path)])
def get_config(config_name: str):
if os.path.exists(os.path.join(DEFAULT_CONFIG_PATH, config_name)):
config_name = os.path.join(DEFAULT_CONFIG_PATH, config_name)
elif os.path.exists(os.path.join(DEFAULT_CONFIG_PATH, f"{config_name}.yaml")):
config_name = os.path.join(DEFAULT_CONFIG_PATH, f"{config_name}.yaml")
else:
raise ValueError(f"Can't find config name {config_name} in config path {DEFAULT_CONFIG_PATH}.")
with open(config_name, "r") as yfile:
config = yaml.safe_load(yfile)
return config
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--config", "-c", default="1.12.1", type=str, help="Config for release testing")
parser.add_argument("--dry-run", action='store_true', help="Only generate the test scripts. Do not run the benchmark.")
parser.add_argument("--analyze", type=str, help="Only analyze the result of the specified work directory.")
args = parser.parse_args(args)
return args
def prepare_release_tests(args: argparse.Namespace, work_dir: Path):
config = get_config(args.config)
run_scripts = generate_test_scripts(config, work_dir)
dump_test_scripts(run_scripts, work_dir)
# clone the examples repo
Repo.clone_from(EXAMPLE_URL, work_dir.joinpath("examples"))
return run_scripts
def cleanup_release_tests(work_dir):
examples_path = work_dir.joinpath("examples")
if examples_path.exists():
shutil.rmtree(examples_path)
def run(args: List[str]):
args = parse_args(args)
if args.analyze:
analyze(args.analyze)
return
work_dir = get_work_dir(get_output_dir(BM_NAME))
run_scripts = prepare_release_tests(args=args, work_dir=work_dir)
if not args.dry_run:
run_benchmark(run_scripts, work_dir)
metrics = analyze(work_dir)
dump_result_to_json(metrics)
cleanup_release_tests(work_dir)
|
from pathlib import Path
import re
import functools
def is_userbenchmark_runscript(run_script_file):
MAGIC_LINE = "# GENERATED BY userbenchmark/release-test/__init__.py. DO NOT EDIT!"
with open(run_script_file, "r") as rsf:
script = rsf.read()
if MAGIC_LINE in script:
return True
return False
def get_run_keys(work_dir: Path):
run_keys = []
for subdir in filter(lambda x: x.is_dir(), work_dir.iterdir()):
run_script_file = subdir.joinpath("run.sh")
if run_script_file.is_file() and is_userbenchmark_runscript(run_script_file):
run_keys.append(subdir.name)
return run_keys
def get_workloads(run_dir: Path):
return list(map(lambda x: x.name, filter(lambda x: x.is_dir(), run_dir.iterdir())))
def dump_result_csv(work_dir, result):
csv_object = [["Benchmark"]]
DELIMITER = ";"
# generate header
run_keys = sorted(result.keys())
workloads = sorted(result[run_keys[0]])
metrics = sorted(result[run_keys[0]][workloads[0]])
for run_key in run_keys:
csv_object[0].append(f"{run_key}")
# generate data
for run_key in run_keys:
for wl_id, workload in enumerate(workloads):
for mid, metric in enumerate(metrics):
if len(csv_object) <= len(workloads) * len(metrics):
csv_object.append([f"{workload}-{metric}"])
csv_object[wl_id*len(metrics)+mid+1].append(str(result[run_key][workload][metric]))
csv_text = []
for csv_line in csv_object:
csv_text.append(DELIMITER.join(csv_line))
csv_text = "\n".join(csv_text) + "\n"
print(csv_text)
summary_file = work_dir.joinpath("summary.csv")
# write result file to summary
with open(summary_file, "w") as sf:
sf.write(csv_text)
def get_peak_mem(mem_log):
# example log:
# Max GPU Mem. Max RSS Mem. Max PSS Mem.
# 697 1971.07 1438.21
max_gpu_mem = 0.0
max_cpu_mem = 0.0
for line in mem_log:
numbers = re.split('\s+', line.strip())
if len(numbers) == 3:
gpu_mem = float(numbers[0])
cpu_mem = float(numbers[1])
max_gpu_mem = gpu_mem if gpu_mem > max_gpu_mem else max_gpu_mem
max_cpu_mem = cpu_mem if cpu_mem > max_cpu_mem else max_cpu_mem
return max_gpu_mem, max_cpu_mem
def analyze_workload(run_dir: Path, workload_name: str, res):
workload_dir = run_dir.joinpath(workload_name)
assert workload_dir.joinpath("result.log").exists() and workload_dir.joinpath("result_mem.log").exists(), \
f"Error: missing benchmark result file result.log or result_mem.log in {workload_dir}."
LATENCY_REGEX = "Total time elapsed: (.*) seconds."
with open(workload_dir.joinpath("result.log"), "r") as lf:
latency_log = lf.readlines()[-1].strip()
with open(workload_dir.joinpath("result_mem.log"), "r") as mf:
mem_log = mf.readlines()
latency = re.search(LATENCY_REGEX, latency_log).groups()[0]
res[workload_name] = {}
res[workload_name]["latency"] = latency
res[workload_name]["gpu_memory"], res[workload_name]["cpu_memory"] = get_peak_mem(mem_log)
return res
def dump_userbenchmark_result(results):
metrics = {}
for run_key in results:
for workload in results[run_key]:
for metric in results[run_key][workload]:
metric_name = f"{run_key}-{workload}-{metric}"
metrics[metric_name] = results[run_key][workload][metric]
return metrics
def analyze_run_key(work_dir, run_key, r):
run_dir = work_dir.joinpath(run_key)
workloads = get_workloads(run_dir)
workload_results = functools.reduce(lambda r, w: analyze_workload(run_dir, w, r), workloads, {})
r[run_key] = workload_results
return r
def analyze(work_dir: Path):
# get base_args (directory starting with "pytorch-")
work_dir = Path(work_dir)
run_keys = get_run_keys(work_dir)
assert run_keys, f"Expected non-empty run keys, get {run_keys}"
results = functools.reduce(lambda r, k: analyze_run_key(work_dir, k, r), run_keys, {})
# dump result to csv file
dump_result_csv(work_dir, results)
# dump results to userbenchmark object
results = dump_userbenchmark_result(results)
return results |
"""
Run PyTorch cpu benchmarking.
"""
import argparse
import itertools
import os
import subprocess
import sys
import time
import yaml
from datetime import datetime
from pathlib import Path
from typing import List
from .cpu_utils import REPO_PATH, parse_str_to_list, validate, get_output_dir, get_output_json, dump_output, analyze
from ..utils import add_path
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import (list_models, TorchBenchModelConfig,
list_devices, list_tests)
BM_NAME = "cpu"
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def generate_model_configs(devices: List[str], tests: List[str], model_names: List[str], batch_size: int, extra_args: List[str]) -> List[TorchBenchModelConfig]:
"""Use the default batch size and default mode."""
if not model_names:
model_names = list_models()
cfgs = itertools.product(*[devices, tests, model_names])
result = [TorchBenchModelConfig(
name=model_name,
device=device,
test=test,
batch_size=batch_size,
extra_args=extra_args,
extra_env=None,
) for device, test, model_name in cfgs]
return result
def dump_result_to_json(metrics, output_dir, fname):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result, output_dir, fname)
def generate_model_configs_from_yaml(yaml_file: str) -> List[TorchBenchModelConfig]:
yaml_file_path = os.path.join(CURRENT_DIR, yaml_file)
with open(yaml_file_path, "r") as yf:
config_obj = yaml.safe_load(yf)
models = config_obj["model"] if "model" in config_obj else None
models = validate(parse_str_to_list(models), list_models()) if models else list_models()
extra_args = config_obj["extra_args"].split(' ') if config_obj["extra_args"] else []
configs = []
for model in models:
config = TorchBenchModelConfig(
name=model,
device="cpu",
test=config_obj["test"],
batch_size=config_obj["batch_size"] if "batch_size" in config_obj else None,
extra_args=extra_args,
extra_env=None,
)
configs.append(config)
return configs
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cpu", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--model", "-m", default=None, help="Only run the specifice models, splited by comma.")
parser.add_argument("--batch-size", "-b", default=None, help="Run the specifice batch size.")
parser.add_argument("--config", "-c", default=None, help="YAML config to specify tests to run.")
parser.add_argument("--metrics", default="latencies", help="Benchmark metrics, split by comma.")
parser.add_argument("--output", "-o", default=None, help="Output dir.")
parser.add_argument("--timeout", default=None, help="Limit single model test run time. Default None, means no limitation.")
parser.add_argument("--launcher", action="store_true", help="Use torch.backends.xeon.run_cpu to get the peak performance on Intel(R) Xeon(R) Scalable Processors.")
parser.add_argument("--launcher-args", default="--throughput-mode", help="Provide the args of torch.backends.xeon.run_cpu. See `python -m torch.backends.xeon.run_cpu --help`")
parser.add_argument("--dryrun", action="store_true", help="Dryrun the command.")
return parser.parse_known_args(args)
def run(args: List[str]):
args, extra_args = parse_args(args)
test_date = datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
if args.config:
configs = generate_model_configs_from_yaml(args.config)
else:
# If not specified, use the entire model set
if not args.model:
args.model = list_models()
devices = validate(parse_str_to_list(args.device), list_devices())
tests = validate(parse_str_to_list(args.test), list_tests())
models = validate(parse_str_to_list(args.model), list_models())
configs = generate_model_configs(devices, tests, model_names=models, batch_size=args.batch_size, extra_args=extra_args)
args.output = args.output if args.output else get_output_dir(BM_NAME, test_date)
try:
for config in configs:
run_benchmark(config, args)
except KeyboardInterrupt:
print("User keyboard interrupted!")
result_metrics = analyze(args.output)
dump_result_to_json(result_metrics, Path(args.output).parent, f"metrics-{test_date}.json")
def run_benchmark(config, args):
benchmark_script = REPO_PATH.joinpath("userbenchmark", "cpu", "run_config.py")
cmd = [sys.executable]
if args.launcher:
cmd.extend(["-m", "torch.backends.xeon.run_cpu"])
if args.launcher_args:
import shlex
cmd.extend(shlex.split(args.launcher_args))
cmd.append(str(benchmark_script))
if config.name:
cmd.append("-m")
cmd.append(config.name)
if config.device:
cmd.append("-d")
cmd.append(config.device)
if config.batch_size:
cmd.append("-b")
cmd.append(str(config.batch_size))
if config.test:
cmd.append("-t")
cmd.append(config.test)
cmd.extend(config.extra_args)
cmd.append("--metrics")
cmd.append(args.metrics)
cmd.append("-o")
cmd.append(str(args.output))
print(f"\nRunning benchmark: {' '.join(map(str, cmd))}")
if not args.dryrun:
timeout = int(args.timeout) if args.timeout else None
try:
subprocess.run(cmd, cwd=REPO_PATH, check=False, timeout=timeout)
except Exception as e:
print(e)
|
"""
Run PyTorch cpu benchmarking.
"""
import json
import os
import re
import sys
import time
from datetime import datetime
from pathlib import Path
from typing import List, Dict, Optional
REPO_PATH = Path(__file__).absolute().parent.parent.parent
USERBENCHMARK_OUTPUT_PREFIX = ".userbenchmark"
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
def list_metrics() -> List[str]:
return ["latencies", "throughputs", "cpu_peak_mem"]
def parse_str_to_list(candidates):
if isinstance(candidates, list):
return candidates
candidates = list(map(lambda x: x.strip(), candidates.split(",")))
return candidates
def validate(candidates, choices: List[str]):
"""Validate the candidates provided by the user is valid"""
if isinstance(candidates, List):
for candidate in candidates:
assert candidate in choices, f"Specified {candidate}, but not in available list: {choices}."
else:
assert candidates in choices, f"Specified {candidates}, but not in available list: {choices}."
return candidates
def get_output_dir(bm_name, test_date=None):
current_dir = Path(__file__).parent.absolute()
bm_out_dir = current_dir.parent.parent.joinpath(USERBENCHMARK_OUTPUT_PREFIX, bm_name)
test_date = test_date if test_date else datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
output_dir = bm_out_dir.joinpath("cpu-" + test_date)
output_dir.mkdir(exist_ok=True, parents=True)
return output_dir
def get_output_json(bm_name, metrics):
import torch
return {
"name": bm_name,
"environ": {"pytorch_git_version": torch.version.git_version},
"metrics": metrics,
}
def dump_output(bm_name, output, output_dir=None, fname=None):
output_dir = output_dir if output_dir else get_output_dir(bm_name)
fname = fname if fname else "metrics-{}.json".format(os.getpid())
full_fname = os.path.join(output_dir, fname)
with open(full_fname, "w") as f:
json.dump(output, f, indent=4)
def get_run(test_dir: Path):
run = {}
testdir_name = test_dir.name
regex = "(.*)-(.*)"
g = re.match(regex, testdir_name).groups()
run["model"] = g[0]
run["test"] = g[1]
run["results"] = []
ins_jsons = filter(lambda x: x.is_file(), test_dir.iterdir())
for ins_json in ins_jsons:
with open(ins_json, "r") as ij:
run["results"].append(json.load(ij))
return run
def get_runs(work_dir: Path):
runs = []
for subdir in filter(lambda x: x.is_dir(), work_dir.iterdir()):
run = get_run(subdir)
runs.append(run)
return runs
def add_test_results(runs, result_metrics):
# metrics name examples:
# timm_regnet-eval_latency
# timm_regnet-eval_cmem
for run in runs:
run_base_name = f"{run['model']}-{run['test']}"
ins_number = len(run["results"])
assert ins_number
latency_metric = "latency" in run["results"][0]["metrics"]
throughput_metric = "throughput" in run["results"][0]["metrics"]
cmem_metric = "cpu_peak_mem" in run["results"][0]["metrics"]
latency_sum = 0
throughput_sum = 0
cmem_sum = 0
for ins_res in run["results"]:
if latency_metric:
latency_sum += ins_res["metrics"]["latency"]
if throughput_metric:
throughput_sum += ins_res["metrics"]["throughput"]
if cmem_metric:
cmem_sum += ins_res["metrics"]["cpu_peak_mem"]
if latency_metric:
result_metrics[f"{run_base_name}_latency"] = latency_sum / ins_number
if throughput_metric:
result_metrics[f"{run_base_name}_throughput"] = throughput_sum
if cmem_metric:
result_metrics[f"{run_base_name}_cmem"] = cmem_sum / ins_number
return result_metrics
def analyze(result_dir):
result_dir = Path(result_dir)
assert result_dir.is_dir(), f"Expected directory {str(result_dir)} doesn't exist."
result_metrics = {}
runs = get_runs(result_dir)
cpu_train = list(filter(lambda x: x["test"] == "train", runs))
if len(cpu_train):
add_test_results(cpu_train, result_metrics)
cpu_eval = list(filter(lambda x: x["test"] == "eval", runs))
if len(cpu_eval):
add_test_results(cpu_eval, result_metrics)
return result_metrics
|
"""
Run PyTorch cpu benchmarking.
"""
import argparse
import os
import numpy
from typing import List, Dict, Optional
from pathlib import Path
from cpu_utils import add_path, REPO_PATH, validate, parse_str_to_list, list_metrics, get_output_dir, get_output_json, dump_output
with add_path(str(REPO_PATH)):
from torchbenchmark.util.experiment.instantiator import (list_models, load_model, load_model_isolated, TorchBenchModelConfig,
list_devices, list_tests)
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
BM_NAME = 'cpu'
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def result_to_output_metrics(metrics: List[str], metrics_res: TorchBenchModelMetrics) -> Dict[str, float]:
result_metrics = {}
if metrics_res:
if "latencies" in metrics and metrics_res.latencies:
latency_metric = "latency"
median_latency = numpy.median(metrics_res.latencies)
assert median_latency, f"Run failed for metric {latency_metric}"
result_metrics[latency_metric] = median_latency
if "throughputs" in metrics and metrics_res.throughputs:
throughput_metric = "throughput"
median_throughput = numpy.median(metrics_res.throughputs)
assert median_throughput, f"Run failed for metric {throughput_metric}"
result_metrics[throughput_metric] = median_throughput
if "cpu_peak_mem" in metrics and metrics_res.cpu_peak_mem:
cpu_peak_mem = "cpu_peak_mem"
result_metrics[cpu_peak_mem] = metrics_res.cpu_peak_mem
return result_metrics
def dump_result_to_json(metrics, output_dir):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result, output_dir)
def run_config(config: TorchBenchModelConfig, metrics: List[str], dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
print(f"Running {config} ...", end='')
if dryrun:
return None
# We do not allow RuntimeError in this test
try:
if "cpu_peak_mem" in metrics:
# load the model instance within separate subprocess
model = load_model_isolated(config)
else:
# load the model instance within current process
model = load_model(config)
# get the model test metrics
result: TorchBenchModelMetrics = get_model_test_metrics(model, metrics=metrics)
except NotImplementedError as e:
print(" [NotImplemented]")
return None
print(" [Done]")
return result
def run(args: List[str], extra_args: List[str]):
device = validate(args.device, list_devices())
test = validate(args.test, list_tests())
model = validate(args.model, list_models())
metrics = validate(parse_str_to_list(args.metrics), list_metrics())
config = TorchBenchModelConfig(
name=model,
device=device,
test=test,
batch_size=args.batch_size,
extra_args=extra_args,
extra_env=None)
try:
metrics_res = run_config(config, metrics, dryrun=args.dryrun)
except KeyboardInterrupt:
print("User keyboard interrupted!")
if not args.dryrun:
args.output = args.output if args.output else get_output_dir(BM_NAME)
target_dir = Path(args.output).joinpath(f"{config.name}-{config.test}")
target_dir.mkdir(exist_ok=True, parents=True)
metrics_dict = result_to_output_metrics(metrics, metrics_res)
dump_result_to_json(metrics_dict, target_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cpu", help="Devices to run.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run.")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice model.")
parser.add_argument("--batch-size", "-b", default=None, type=int, help="Run the specifice batch size.")
parser.add_argument("--output", "-o", default=None, help="Output dir.")
parser.add_argument("--metrics", default="latencies", help="Benchmark metrics, split by comma.")
parser.add_argument("--dryrun", action="store_true", help="Dryrun the command.")
args, extra_args = parser.parse_known_args()
run(args, extra_args)
|
from typing import List
import torch
from torchbenchmark.util.distributed.submit import parse_args, get_init_file, TrainerWrapper
from ..utils import dump_output
BM_NAME = "distributed"
def gen_metrics_from_result(result):
assert isinstance(result, List), "The result should be a list."
metrics = {}
for result_id, r in enumerate(result):
for metric_name in r:
metrics[f"{result_id}-{metric_name}"] = r[metric_name]
return metrics
def run(args: List[str]):
args, model_args = parse_args(args)
if args.scheduler == "slurm":
result = slurm_run(args, model_args)
elif args.scheduler == "local":
result = local_run(args, model_args)
else:
raise ValueError(f"Unsupported scheduler: {args.scheduler}")
version = torch.version.git_version if hasattr(torch.version, "git_verison") else "Internal"
# dump the output file
output = {
"name": BM_NAME,
"environ": {"pytorch_git_version": version},
"args": vars(args),
"metrics": gen_metrics_from_result(result),
}
dump_output(BM_NAME, output)
def local_run(args, model_args):
# TODO: Currently this does nothing but to support the path for "--scheduler local"
print("Current local run is not implemented, use '--scheduler slurm'. Skipping local run.")
return []
def slurm_run(args, model_args):
import submitit
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, cluster=args.cluster, slurm_max_num_timeout=3000)
executor.update_parameters(
gpus_per_node=args.ngpus,
# one task per GPU
tasks_per_node=args.ngpus,
cpus_per_task=10,
nodes=args.nodes,
timeout_min=args.timeout,
# Below are cluster dependent parameters
slurm_partition=args.partition,
slurm_signal_delay_s=120,
slurm_exclude=args.exclude,
)
executor.update_parameters(name="distbench", slurm_array_parallelism=1, timeout_min=1000)
args.dist_url = get_init_file(args).as_uri()
args.output_dir = args.job_dir
args.extra_args = []
if model_args:
args.extra_args = model_args
job = executor.submit(TrainerWrapper(args, model_args))
# waits for completion and returns output
result = job.results()
return result
|
import sys
import subprocess
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import argparse
import importlib
import os
import copy
import csv
import dataclasses
import functools
import io
import json
import multiprocessing
import queue
import submitit
import time
from datetime import datetime, timedelta
import sys
import torch
import uuid
import warnings
from pathlib import Path
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from typing import Any, Dict, List, Optional, Tuple
MODEL_PATH_TEMPLATE = "torchbenchmark.models.{}.Model"
def output_csv(filename, headers, row):
assert filename
existed = os.path.exists(filename)
output = csv.writer(
io.TextIOWrapper(
open(filename, "ab", buffering=0),
"utf-8",
write_through=True,
),
lineterminator="\n",
)
if not existed:
output.writerow(headers)
output.writerow([(f"{x:.4f}" if isinstance(x, float) else x) for x in row])
def parse_args(args: List[str]=None):
parser = argparse.ArgumentParser(description='Submitit for PyTorch Distributed Benchmark', add_help=False)
parser.add_argument(
"--ngpus",
default=8,
type=int,
help="Number of gpus to request on each node"
)
parser.add_argument(
"--nodes",
default=None,
type=int,
action="extend",
nargs="+",
help="Number of nodes to request. Provide a list of nodes to test, e.g. `--nodes 8 4 2 1 --next_arg..."
)
parser.add_argument(
"--filter_models",
default=None,
type=str,
action="extend",
nargs="+",
help="List of models to test, e.g. --filter hf_T5 hf_T5_large resnet50"
)
parser.add_argument(
"--timeout",
default=120,
type=int,
help="Duration of the job"
)
parser.add_argument(
"--profiler",
default=False,
type=bool,
help="Measure with PyTorch Profiler. Disabled by default, as it crashes on AWS"
)
parser.add_argument(
"--partition",
default="train",
type=str,
help="The Slurm partition to submit to"
)
parser.add_argument(
"--cluster",
default=None,
type=str,
help="Which slurm cluster to target. Use 'local' to run jobs locally, 'debug' to run jobs in process",
)
parser.add_argument(
"--distributed",
default="ddp_no_static_graph",
type=str,
help="the distributed runner to use"
)
parser.add_argument(
"--job_dir",
default=os.getcwd(),
type=str,
help="A shared folder across all worker processes"
)
parser.add_argument(
"--trainer",
type=str,
default="torchbenchmark.util.distributed.core_model.trainer.Trainer",
help="training paradigm, by default using DDP"
)
parser.add_argument(
"--index_file",
type=str,
default=f"ddp_experiments_{datetime.now().strftime('%Y%m%d-%H%M%S')}.csv",
help="training paradigm, by default using DDP"
)
parser.add_argument(
"--exclude",
type=str,
default="",
help="comma-separated list of nodes to exclude from the slurm allocation",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="number of times to repeat the experiments",
)
parser.add_argument(
"--check_correctness_distributed",
action='store_true',
help="Do distributed correctness checks. Don't expect to use the same results for performance tests."
)
parser.add_argument(
"--precision",
type=str,
default=None,
help="Precision (e.g. amp, fp32, fp16)",
)
parser.add_argument(
"--nccl-socket-ifname",
type=str,
default="ens",
help="Value to use for NCCL_SOCKET_IFNAME environment variable",
)
try:
if args:
return parser.parse_args(args)
else:
return parser.parse_args()
except:
parser.print_help()
sys.exit(0)
def get_init_file(args):
# Init file must not exist, but it's parent dir must exist.
os.makedirs(args.job_dir, exist_ok=True)
init_file = Path(args.job_dir) / f"{uuid.uuid4().hex}_init"
print(init_file)
if init_file.exists():
os.remove(str(init_file))
return init_file
# This implements a barrier function, where all processes wait until they all
# reach the barrier() call.
# rank: there should be one
class FileBarrier:
def __init__(self, rank, world_size, sync_file, timeout: Optional[timedelta] = None):
self.rank = rank
self.world_size = world_size
self.sync_file = sync_file
self.store = torch.distributed.FileStore(sync_file, world_size)
if timeout is None:
timeout = timedelta(minutes=30)
self.store.set_timeout(timeout)
self.call_idx = 0
self.barrier()
def barrier(self):
self.call_idx += 1
my_key = f"barrier{self.call_idx}.{self.rank}"
self.store.add(my_key, 1)
wait_for = []
for i in range(self.world_size):
key = f"barrier{self.call_idx}.{i}"
wait_for.append(key)
self.store.wait(wait_for)
@dataclasses.dataclass
class ExperimentParams:
config: Dict
args: Any # arguments to the distributed trainer
model_args: Any # arguments to the model
is_reference: bool # should this experiment be treated as a reference for correctness?
# used for labeling filenames for correctness checks
def serialize_config(config: Dict):
keys = ["nodes", "model_name", "backend", "has_breaks"]
return "-".join([f"{k}_{config[k]}" for k in keys if k in config])
@dataclasses.dataclass
class JobConfig:
outer_sync_path: str
class TrainerWrapper(object):
# per_experiment_args is a list of expriments.
# Each experiment should be a tuple of (config dict, args, model_args).
# config: configuration data to attach to the result dict.
# args & model_args: arguments for core_model.Trainer.
def __init__(self, job_config: JobConfig, per_experiment_args: List[ExperimentParams]):
self.job_config = job_config
self.per_experiment_args = per_experiment_args
self.timeout = timedelta(45)
# this is called within a multiprocessing.Process.
def run_once(self, args, model_args, q):
print("run_once")
self._setup_gpu_args(args)
pos = args.model.rfind(".")
module = importlib.import_module(args.model[:pos])
model_class = getattr(module, args.model[(pos+1):])
pos = args.trainer.rfind(".")
module = importlib.import_module(args.trainer[:pos])
trainer_class = getattr(module, args.trainer[(pos+1):])
trainer = trainer_class(args, model_class, model_args=model_args)
result = trainer.measure()
print(f"result {result}")
q.put(result)
trainer.teardown()
def __call__(self):
results = []
job_env = submitit.JobEnvironment()
barrier = self._get_barrier()
print(f"This is node {job_env.node}")
# maps all configs that are expected to have the same output/gradients to the same value.
# i.e. we should expect that for a given model_name & number of nodes, we should get the same
# outputs and gradients, regardless of the backend/has_breaks/etc.
def reference_key(config):
return f"{config['model_name']}-{config['nodes']}"
latest_reference_file = {}
output_dir = self.per_experiment_args[0].args.output_dir
base_ref_name = Path(output_dir) / uuid.uuid4().hex
for experiment_args in self.per_experiment_args:
config = experiment_args.config
args = experiment_args.args
model_args = experiment_args.model_args
is_reference = experiment_args.is_reference
try:
key = reference_key(config)
if args.check_correctness_distributed:
# if this is a reference, dump the gradients into a file for later use.
# if this is not a reference, read the dumped gradients and compare.
if is_reference:
args.check_correctness_distributed = "reference"
args.reference_data_path = f"{base_ref_name}-{serialize_config(config)}"
latest_reference_file[key] = args.reference_data_path
else:
args.check_correctness_distributed = "test"
args.reference_data_path = latest_reference_file[key] if key in latest_reference_file else None
else:
args.check_correctness_distributed = None
if job_env.node >= args.nodes:
continue
result_dict = {**config}
q = multiprocessing.Queue()
proc = multiprocessing.Process(target=self.run_once, args=(args, model_args, q))
proc.start()
# wait for 3 minutes less than timeout, to give some buffer time so that
# the barrier doesn't time out.
# 3 minutes chosen based on 3x the 60s timeout for killing & joining jobs
# that are timing out.
timeout_seconds = (self.timeout - timedelta(minutes=3)).total_seconds()
# Wait in a loop because:
# - the queue has a limited buffer size, so we need to call q.get() before proc.join()
# in case the queue blocks when the worker process tries to put into the queue
# - if the worker process errors out, nothing will get put into the queue when it
# exits early and then we end up waiting until the timeout finishes
# So we wait in a loop and wait until either finishes
got_result = False
got_exit = False
exit_code = None
result = None
start_time = time.time()
while time.time() < start_time + timeout_seconds and not got_exit:
proc.join(timeout=1)
if proc.exitcode is not None:
got_exit = True
exit_code = proc.exitcode
if not got_result:
try:
result = q.get(timeout=1)
got_result = True
except queue.Empty:
pass
if not got_exit:
proc.kill()
proc.join(timeout=60)
proc.close()
if isinstance(result, dict) and 'latency_median' in result:
result_dict['result'] = result
else:
result_dict['result'] = None
print(f"exit code: {exit_code} and result: {result_dict}")
assert 'result' in result_dict
# wrap in <RESULT></RESULT> so we can parse partial results in the stdout logs
print(f"<RESULT>{json.dumps(result_dict)}</RESULT>")
results.append(result_dict)
finally:
barrier.barrier()
return results
def checkpoint(self):
self.args.dist_url = get_init_file(self.args).as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args, self.model_args)
empty_trainer = type(self)(self.args, self.model_args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _get_barrier(self):
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
return FileBarrier(
rank=rank,
world_size=world_size,
sync_file=self.job_config.outer_sync_path,
timeout=self.timeout
)
def _global_rank(self):
job_env = submitit.JobEnvironment()
return job_env.global_rank
def _setup_gpu_args(self, args):
job_env = submitit.JobEnvironment()
args.output_dir = Path(str(args.output_dir).replace("%j", str(job_env.job_id)))
args.gpu = job_env.local_rank
args.rank = job_env.global_rank
args.world_size = args.ngpus * args.nodes
print(f"Process group: {args.world_size} tasks, rank: {args.rank}")
os.environ["LOCAL_RANK"] = str(job_env.local_rank)
os.environ["RANK"] = str(job_env.global_rank)
os.environ["WORLD_SIZE"] = str(args.world_size)
os.environ["GPUS_PER_NODE"] = str(job_env.num_tasks//job_env.num_nodes)
# os.environ["NCCL_IB_DISABLE"] = str(1)
os.environ["NCCL_DEBUG"] = 'INFO'
os.environ["NCCL_DEBUG_SUBSYS"] = 'INIT,ENV,NET'
os.environ['NCCL_SOCKET_IFNAME'] = args.nccl_socket_ifname
# os.environ["NCCL_ALGO"] = 'ring'
os.environ["FI_PROVIDER"] = 'efa'
os.environ["FI_EFA_USE_DEVICE_RDMA"]= str(1)
os.environ["NET_TYPE"] = 'efa'
os.environ["ADAM_CAPTURABLE"] = str(1)
def parse_precision(args, copied_model_args):
if args.precision is not None:
copied_model_args.extend(["--precision", args.precision])
def get_node_list(args):
node_list = args.nodes
if node_list is None:
# run the 8-node version first so that all the caches get warmed up at the same time.
node_list = [8, 4, 2, 1]
return node_list
# takes `models` as a list of models in shortened form (i.e. not containing MODEL_PATH_TEMPLATE).
def filter_models(args, models: List[str]):
if args.filter_models is None:
return models
final_models = []
for m in args.filter_models:
if m in models:
final_models.append(m)
else:
warnings.warn(f"Model {m} was specified but is unsupported.")
return final_models
def benchmark_ddp(args, executor):
available_models = [
'hf_Bert',
'hf_GPT2_large',
'hf_T5_large',
'timm_vision_transformer_large',
'hf_T5',
'resnet50',
]
models = [MODEL_PATH_TEMPLATE.format(m) for m in filter_models(args, available_models)]
model_batch_size = {
'hf_Bert': 32,
'hf_GPT2_large': 4,
'hf_T5_large': 4,
'timm_vision_transformer_large': 16,
'hf_T5': 12,
'resnet50': 128,
}
model_batch_size = {MODEL_PATH_TEMPLATE.format(k): v for k, v in model_batch_size.items()}
# put eager first to ensure it can be used for reference values.
# try --torchdynamo eager or --torchdynamo aot_eager for debugging
model_args_configs = [
[], # no args = pure eager baseline
["--torchdynamo", "inductor"],
]
node_list = get_node_list(args)
def get_backend_name(model_args):
if "--torchdynamo" in model_args:
return "torchdynamo_" + model_args[model_args.index("--torchdynamo") + 1]
return "eager"
experiments = []
for i in range(args.repeat):
for nodes in node_list:
for model_name in models:
for model_args in model_args_configs:
for has_breaks in [True, False]:
backend_name = get_backend_name(model_args)
if backend_name == "eager" and has_breaks:
continue
is_reference = (backend_name == "eager")
# copy the model args so we can add more arguments without modifying
# the original model_args list.
copied_model_args = copy.copy(model_args)
breakname = "withbreaks" if has_breaks else "nobreaks"
if has_breaks:
copied_model_args.append("--optimize_dynamo_ddp")
if "inductor" in backend_name:
copied_model_args.extend(["--torchinductor_cudagraph", "False"])
if backend_name != "eager":
copied_model_args.extend(["--dynamo_disable_optimizer_step", "True"])
parse_precision(args, copied_model_args)
# skip non-distributed correctness checks to avoid extra iterations which can
# interfere with distributed correctness checks.
copied_model_args.append("--skip_correctness")
if args.check_correctness_distributed and "inductor" in backend_name:
copied_model_args.extend(["--torchinductor_fallback_random", "True"])
batch_size = model_batch_size[model_name]
args_copy = copy.deepcopy(args)
args_copy.model = model_name
args_copy.batch_size = batch_size
args_copy.nodes = nodes
args_copy.dist_url = get_init_file(args).as_uri()
args_copy.output_dir = args.job_dir
config = {
"nodes": nodes,
"model_name": model_name,
"backend": backend_name,
"has_breaks": has_breaks,
}
experiments.append(ExperimentParams(config, args_copy, copied_model_args, is_reference))
allocation_nodes = max(node_list)
executor.update_parameters(
nodes=allocation_nodes,
)
job_config = JobConfig(
outer_sync_path=str(get_init_file(args))
)
job = executor.submit(TrainerWrapper(job_config, experiments))
# print ID of the Slurm job
print(f"{allocation_nodes} nodes: {job.job_id}")
output_csv(
args.index_file,
("job_id",),
(job.job_id,),
)
# waits for completion and returns output
print(job.results())
def apply_fsdp(model, trainer, auto_wrap_policy):
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
assert trainer == "fsdp"
fsdp_model = FSDP(
model,
auto_wrap_policy=auto_wrap_policy,
device_id=torch.cuda.current_device(),
use_orig_params=True,
)
return fsdp_model
def apply_fsdp_hf_T5_large(model, trainer):
from transformers.models.t5.modeling_t5 import T5Block
return apply_fsdp(
model,
trainer,
functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=(T5Block,)),
)
def apply_fsdp_hf_GPT2_large(model, trainer):
from transformers.models.gpt2.modeling_gpt2 import GPT2Block
return apply_fsdp(
model,
trainer,
functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=(GPT2Block,)),
)
def apply_fsdp_hf_Bert_large(model, trainer):
from transformers.models.bert.modeling_bert import BertLayer
return apply_fsdp(
model,
trainer,
functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer,)),
)
def apply_fsdp_timm_VIT_large(model, trainer):
from timm.models.vision_transformer import Block
return apply_fsdp(
model,
trainer,
functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=(Block,)),
)
def benchmark_fsdp(args, executor):
def get_backend_name(model_args):
if "--torchdynamo" in model_args:
return "torchdynamo_" + model_args[model_args.index("--torchdynamo") + 1]
return "eager"
def generic_setup(nodes, model_args):
backend_name = get_backend_name(model_args)
copied_model_args = copy.copy(model_args)
if "inductor" in backend_name:
copied_model_args.extend(["--torchinductor_cudagraph", "False"])
if backend_name != "eager":
copied_model_args.extend(["--dynamo_disable_optimizer_step", "True"])
copied_model_args.append("--skip_correctness")
if args.check_correctness_distributed and "inductor" in backend_name:
copied_model_args.extend(["--torchinductor_fallback_random", "True"])
args_copy = copy.deepcopy(args)
args_copy.nodes = nodes
args_copy.dist_url = get_init_file(args).as_uri()
args_copy.output_dir = args.job_dir
return args_copy, copied_model_args
def fsdp_is_reference(backend_name):
return backend_name == "eager"
def get_model_config(
nodes,
model_args,
model_name,
wrap_fn,
batch_size_per_nodes,
):
model_path = MODEL_PATH_TEMPLATE.format(model_name)
args_copy, copied_model_args = generic_setup(nodes, model_args)
copied_model_args.extend(["--distributed_wrap_fn", wrap_fn])
parse_precision(args, copied_model_args)
assert nodes in batch_size_per_nodes
args_copy.batch_size = batch_size_per_nodes[nodes]
args_copy.model = model_path
backend_name = get_backend_name(model_args)
config = {
"nodes": nodes,
"model_name": model_name,
"backend": backend_name,
}
return ExperimentParams(config, args_copy, copied_model_args, is_reference=fsdp_is_reference(backend_name))
is_amp = args.precision == "amp"
model_configs = {
"timm_vision_transformer_large": functools.partial(
get_model_config,
model_name="timm_vision_transformer_large",
wrap_fn="userbenchmark.ddp_experiments.apply_fsdp_timm_VIT_large",
batch_size_per_nodes={1: 16, 2: 16, 4: 16, 8: 16} if is_amp else {1: 6, 2: 6, 4: 6, 8: 6},
),
"hf_GPT2_large": functools.partial(
get_model_config,
model_name="hf_GPT2_large",
wrap_fn="userbenchmark.ddp_experiments.apply_fsdp_hf_GPT2_large",
batch_size_per_nodes={1: 8, 2: 8, 4: 8, 8: 8} if is_amp else {1: 6, 2: 6, 4: 6, 8: 6},
),
"hf_Bert_large": functools.partial(
get_model_config,
model_name="hf_Bert_large",
wrap_fn="userbenchmark.ddp_experiments.apply_fsdp_hf_Bert_large",
batch_size_per_nodes={1: 20, 2: 20, 4: 20, 8: 20} if is_amp else {1: 16, 2: 16, 4: 16, 8: 16},
),
"hf_T5_large": functools.partial(
get_model_config,
model_name="hf_T5_large",
wrap_fn="userbenchmark.ddp_experiments.apply_fsdp_hf_T5_large",
batch_size_per_nodes={1: 6, 2: 6, 4: 6, 8: 6},
),
}
selected_models = filter_models(args, [k for k, _ in model_configs.items()])
model_configs = {k: v for k, v in model_configs.items() if k in selected_models}
model_args_configs = [
[], # no args = pure eager baseline
["--torchdynamo", "inductor"],
]
node_list = get_node_list(args)
experiments = []
for i in range(args.repeat):
for nodes in node_list:
for model_name, config_generator in model_configs.items():
for model_args in model_args_configs:
experiments.append(config_generator(nodes, model_args))
allocation_nodes = max(node_list)
executor.update_parameters(
nodes=allocation_nodes,
)
job_config = JobConfig(
outer_sync_path=str(get_init_file(args))
)
job = executor.submit(TrainerWrapper(job_config, experiments))
# print ID of the Slurm job
print(f"{allocation_nodes} nodes: {job.job_id}")
output_csv(
args.index_file,
("job_id",),
(job.job_id,),
)
# waits for completion and returns output
print(job.results())
def main():
args = parse_args()
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, cluster=args.cluster, slurm_max_num_timeout=3000)
executor.update_parameters(
gpus_per_node=args.ngpus,
# one task per GPU
tasks_per_node=args.ngpus,
cpus_per_task=12,
timeout_min=args.timeout,
# Below are cluster dependent parameters
slurm_partition=args.partition,
slurm_signal_delay_s=120,
slurm_exclude=args.exclude,
)
executor.update_parameters(name="distbench", slurm_array_parallelism=1, timeout_min=args.timeout)
if "ddp" in args.distributed:
benchmark_ddp(args, executor)
elif "fsdp" in args.distributed:
benchmark_fsdp(args, executor)
if __name__=="__main__":
import torch
if torch.version.debug:
raise RuntimeError("torch.version.debug == True, which is disallowed because " \
"NCCL performance is drastically worse when debug is on. Build with " \
"DEBUG=0 python setup.py [develop|install|bdist_wheel] instead."
)
main()
|
import csv
import json
import copy
import argparse
from typing import OrderedDict
from dataclasses import dataclass
import os
import pickle
from collections import defaultdict
import tabulate
import sys
def parse_partial(args):
"""
Schema:
model_data["model"]["backend"][#nodes] = result
where "result" can be a list of results, or "error"
"""
model_data = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list))))
rank_id = 0
log_path = os.path.join(args.results_dir, f"{args.job_id}_{rank_id}_log.out")
with open(log_path, "r") as f:
content = f.read()
pieces = content.split("<RESULT>")
pieces = [x.split("</RESULT>") for x in pieces]
pieces = [x[0] for x in pieces if len(x) == 2]
pieces = [json.loads(x) for x in pieces]
for row in pieces:
model = row["model_name"]
backend = row["backend"]
nodes = row["nodes"]
has_breaks = str(row["has_breaks"] if "has_breaks" in row else "False")
if isinstance(row["result"], dict):
latency = float(row["result"]["latency_median"])
if isinstance(model_data[model][backend][nodes][has_breaks], list):
model_data[model][backend][nodes][has_breaks].append(latency)
else:
model_data[model][backend][nodes][has_breaks] = "error"
return model_data
def model_name(model):
if "torchbenchmark.models." in model:
model = model[len("torchbenchmark.models."):]
if ".Model" in model:
model = model[:model.find(".Model")]
return model
def median(x):
if len(x) == 0:
return 0
x = copy.copy(x)
x = sorted(x)
idx = int(len(x)/2)
if len(x) % 2 == 0:
return (x[idx - 1] + x[idx]) / 2
else:
return x[idx]
def print_model_table(args, model, model_data):
node_counts = OrderedDict()
for backend in model_data:
for node in model_data[backend]:
node_counts[node] = node # hack orderedset
node_counts = list(node_counts)
node_counts = sorted(node_counts)
rows = []
for has_breaks in [False, True]:
for backend in model_data:
row = [f"{backend} {'w/' if has_breaks else 'wo/'}breaks", ]
for node in node_counts:
if node in model_data[backend]:
res = model_data[backend][node][str(has_breaks)]
if isinstance(res, list):
if len(res) > 0:
res = f"{median(res):.3f}"
else:
res = 0.0
row.append(res)
else:
row.append("-")
rows.append(row)
hdr = ("backend", ) + tuple(f"{node}_latency" for node in node_counts)
print(f"{model_name(model)}:")
print(tabulate.tabulate(rows, headers=hdr))
print()
def print_csv(args, data):
csv_data = []
node_counts = OrderedDict()
for model in data:
for backend in data[model]:
for node in data[model][backend]:
node_counts[node] = node # hack orderedset
node_counts = list(node_counts)
node_counts = sorted(node_counts)
labels = ["model", "has_ddp_breaks", "backend"]
for node in node_counts:
labels.append(f"{node}-node median")
# labels.append(f"{node}-node min")
# labels.append(f"{node}-node max")
for has_breaks in [False, True]:
for model in data:
for backend in data[model]:
row = {
"model": model,
"has_ddp_breaks": str(has_breaks),
"backend": backend,
}
for node in node_counts:
if node in data[model][backend]:
latency = data[model][backend][node][str(has_breaks)]
else:
latency = 0.
if isinstance(latency, list) and len(latency) == 0:
latency = 0.
node_label_median = f"{node}-node median"
node_label_min = f"{node}-node min"
node_label_max = f"{node}-node max"
latency_list = latency if isinstance(latency, list) else [latency]
row[node_label_median] = median(latency_list)
# row[node_label_min] = min(latency_list)
# row[node_label_max] = max(latency_list)
csv_data.append(row)
csv_writer = csv.DictWriter(sys.stdout, fieldnames=labels)
csv_writer.writeheader()
for row in csv_data:
csv_writer.writerow(row)
def print_results(args, data):
for model in data:
print_model_table(args, model, data[model])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job_id", required=True)
parser.add_argument("--results_dir", required=True)
parser.add_argument("--csv_out", action="store_true")
args = parser.parse_args()
data = parse_partial(args)
if args.csv_out:
print_csv(args, data)
else:
print_results(args, data)
if __name__ == "__main__":
main()
|
"""
Run PyTorch nightly benchmarking.
"""
import re
import argparse
import itertools
import json
import math
import os
import yaml
import numpy
from typing import List, Tuple, Dict, Optional, Any
from ..utils import REPO_PATH, add_path, get_output_json, get_default_output_json_path
from . import BM_NAME
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig, \
list_devices, list_tests
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_DELTA_THRESHOLD = 0.07
DEFAULT_TARGET_SCORE = 1000.0
def generate_model_configs(devices: List[str], tests: List[str], model_names: List[str]) -> List[TorchBenchModelConfig]:
"""Use the default batch size and default mode."""
if not model_names:
model_names = list_models()
cfgs = itertools.product(*[devices, tests, model_names])
result = [TorchBenchModelConfig(
name=model_name,
device=device,
test=test,
batch_size=None,
extra_args=[],
extra_env=None,
) for device, test, model_name in cfgs]
return result
def get_metrics(_config: TorchBenchModelConfig) -> List[str]:
return ["latencies", "cpu_peak_mem", "gpu_peak_mem"]
def compute_score(results, reference_latencies: Dict[str, float]) -> float:
# sanity checks
latency_results = {k: v for k, v in results.items() if k.endswith("_latency")}
test_set = set(latency_results.keys())
reference_set = set(reference_latencies.keys())
test_only_set = test_set.difference(reference_set)
assert not test_only_set, f"Tests {test_only_set} only appears in result json, not in reference yaml."
reference_only_set = reference_set.difference(test_set)
assert not reference_only_set, f"Tests {reference_only_set} only appears in reference yaml, not in result json."
# check that for every test in reference_latencies, we can find the corresponding tests in latency_results
total_score = 0.0
weight = 1.0 / len(reference_latencies)
for key, ref_latency in reference_latencies.items():
test_latency = latency_results[key]
ref_latency = float(ref_latency)
delta = (test_latency - ref_latency) / test_latency
# If less than threshold, treat it as noise
if abs(delta) <= DEFAULT_DELTA_THRESHOLD:
test_latency = ref_latency
total_score += weight * math.log(ref_latency / test_latency)
score = math.exp(total_score) * DEFAULT_TARGET_SCORE
return score
def result_to_output_metrics(results: List[Tuple[TorchBenchModelConfig, TorchBenchModelMetrics]]) -> Dict[str, float]:
# metrics name examples:
# test_eval[timm_regnet-cuda-eager]_latency
# test_eval[timm_regnet-cuda-eager]_cmem
# test_eval[timm_regnet-cuda-eager]_gmem
result_metrics = {}
for _config_id, (config, metrics) in enumerate(results):
metrics_base = f"test_{config.test}[{config.name}-{config.device}-eager]"
latency_metric = f"{metrics_base}_latency"
median_latency = numpy.median(metrics.latencies)
assert median_latency, f"Run failed for metric {latency_metric}"
result_metrics[latency_metric] = median_latency
if metrics.cpu_peak_mem:
cpu_peak_mem = f"{metrics_base}_cmem"
result_metrics[cpu_peak_mem] = metrics.cpu_peak_mem
if metrics.gpu_peak_mem:
gpu_peak_mem = f"{metrics_base}_gmem"
result_metrics[gpu_peak_mem] = metrics.gpu_peak_mem
return result_metrics
def validate(candidates: List[str], choices: List[str]) -> List[str]:
"""Validate the candidates provided by the user is valid"""
for candidate in candidates:
assert candidate in choices, f"Specified {candidate}, but not in available list: {choices}."
return candidates
def generate_model_configs_from_yaml(yaml_file: str) -> Tuple[List[TorchBenchModelConfig], Dict[str, float], Any]:
yaml_file_path = os.path.join(CURRENT_DIR, yaml_file)
with open(yaml_file_path, "r") as yf:
config_obj = yaml.safe_load(yf)
devices = config_obj["metadata"]["devices"]
configs = []
reference_latencies = {}
for device in devices:
for c in config_obj[device]:
if not c["stable"]:
continue
config = TorchBenchModelConfig(
name=c["model"],
device=device,
test=c["test"],
batch_size=c["batch_size"] if "batch_size" in c else None,
extra_args=[],
extra_env=None,
)
configs.append(config)
metrics_base = f"test_{config.test}[{config.name}-{config.device}-eager]"
latency_metric_key = f"{metrics_base}_latency"
reference_latencies[latency_metric_key] = c["median_latency"]
return configs, reference_latencies, config_obj
def parse_test_name(test_name: str) -> TorchBenchModelConfig:
regex = "test_(.*)\[(.*)-(.*)-eager\]"
test, model, device = re.match(regex, test_name).groups()
return TorchBenchModelConfig(
name=model,
device=device,
test=test,
batch_size=None,
extra_args=[],
extra_env=None,
)
def generate_model_configs_from_bisect_yaml(bisect_yaml_file: str) -> List[TorchBenchModelConfig]:
def _remove_suffix(test_name: str):
index_last_underscore = test_name.rfind("_")
return test_name[:index_last_underscore]
with open(bisect_yaml_file, "r") as yf:
bisect_obj = yaml.safe_load(yf)
# remove the suffix
bisect_tests = [ _remove_suffix(test_name) for test_name in bisect_obj["details"] ]
bisect_tests = set(bisect_tests)
configs = [ parse_test_name(test_name_str) for test_name_str in sorted(bisect_tests) ]
return configs
def parse_str_to_list(candidates):
if isinstance(candidates, list):
return candidates
candidates = list(map(lambda x: x.strip(), candidates.split(",")))
return candidates
def run_config(config: TorchBenchModelConfig, dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
metrics = get_metrics(config)
print(f"Running {config} ...", end='', flush=True)
if dryrun:
print(" [Skip: Dryrun]", flush=True)
return None
# We do not allow RuntimeError in this test
try:
# load the model instance in subprocess
model = load_model_isolated(config)
# get the model test metrics
result: TorchBenchModelMetrics = get_model_test_metrics(model, metrics=metrics)
except NotImplementedError as e:
print(" [NotImplemented]", flush=True)
return None
print(" [Done]", flush=True)
return result
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cuda", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice models, splited by comma.")
parser.add_argument("--config", "-c", default=None, help="YAML config to specify tests to run.")
parser.add_argument("--run-bisect", help="Run with the output of regression detector.")
parser.add_argument("--dryrun", action="store_true", help="Dryrun the command.")
parser.add_argument("--score", default=None, help="Generate score from the past run json only.")
parser.add_argument("--output", default=get_default_output_json_path(BM_NAME), help="Specify the path of the output file")
return parser.parse_args(args)
def run(args: List[str]):
args = parse_args(args)
if args.score:
assert args.config, f"To compute score, you must specify the config YAML using --config."
configs, reference_latencies, config_obj = generate_model_configs_from_yaml(args.config)
with open(args.score, "r") as sp:
run_result = json.load(sp)
input_metrics = run_result["metrics"]
score = compute_score(input_metrics, reference_latencies)
score_version = config_obj["metadata"]["score_version"]
score_name = f"{score_version}_score"
print(f"TorchBench {score_name}: {score}.")
exit(0)
elif args.config:
configs, reference_latencies, config_obj = generate_model_configs_from_yaml(args.config)
elif args.run_bisect:
configs = generate_model_configs_from_bisect_yaml(args.run_bisect)
reference_latencies = None
else:
# If not specified, use the entire model set
if not args.model:
args.model = list_models()
devices = validate(parse_str_to_list(args.device), list_devices())
tests = validate(parse_str_to_list(args.test), list_tests())
models = validate(parse_str_to_list(args.model), list_models())
configs = generate_model_configs(devices, tests, model_names=models)
reference_latencies = None
results = []
try:
for config in configs:
metrics = run_config(config, dryrun=args.dryrun)
if metrics:
results.append([config, metrics])
except KeyboardInterrupt:
print("User keyboard interrupted!")
if not args.dryrun:
metrics = result_to_output_metrics(results)
if reference_latencies:
score = compute_score(metrics, reference_latencies)
score_version = config_obj["metadata"]["score_version"]
score_name = f"{score_version}_score"
metrics[score_name] = score
result = get_output_json(BM_NAME, metrics)
if args.device == 'cuda':
import torch
result["environ"]["device"] = torch.cuda.get_device_name()
with open(args.output, 'w') as f:
json.dump(result, f, indent=4)
|
BM_NAME = "torch-nightly" |
from ..utils import TorchBenchABTestResult, TorchBenchABTestMetric
from . import BM_NAME
DEFAULT_REGRESSION_DELTA_THRESHOLD = 0.07
def run(control, treatment) -> TorchBenchABTestResult:
control_env = control["environ"]
control_env["git_commit_hash"] = control["environ"]["pytorch_git_version"]
control_metrics = control["metrics"]
treatment_env = treatment["environ"]
treatment_env["git_commit_hash"] = treatment["environ"]["pytorch_git_version"]
treatment_metrics = treatment["metrics"]
details = {}
for metric_names in control_metrics.keys():
control_metric = control_metrics[metric_names]
treatment_metric = treatment_metrics[metric_names]
delta = (treatment_metric - control_metric) / control_metric
# Disable torchrec_dlrm for now because bisecting it will require recompiling fbgemm_gpu
if abs(delta) > DEFAULT_REGRESSION_DELTA_THRESHOLD and not "torchrec_dlrm" in metric_names:
details[metric_names] = TorchBenchABTestMetric(control=control_metric, treatment=treatment_metric, delta=delta)
return TorchBenchABTestResult(name=BM_NAME,
control_env=control_env, \
treatment_env=treatment_env, \
details=details, \
control_only_metrics={}, \
treatment_only_metrics={}, \
bisection="pytorch")
|
from pathlib import Path
from typing import Any, Dict, List, Set, Tuple
from torchbenchmark import load_model_by_name
import torch
from torch import _dynamo as torchdynamo
from torch.optim import Adadelta, Adagrad, Adam, AdamW, Adamax, ASGD, SGD, RAdam, Rprop, RMSprop, NAdam, SparseAdam, LBFGS
import torch.utils.benchmark as benchmark
from userbenchmark.utils import REPO_PATH, add_path, dump_output, get_output_json
import argparse
import gc
import sys
import itertools
import datetime
import time
import yaml
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models
BM_NAME: str = 'optim'
continue_on_error: bool = False
run_on_subset: bool = False
run_basic_configs: bool = False
ignore_skips: bool = False
# Models that are unstable in torch-nightly should not run in the optim world either
def get_unstable_models() -> Set[str]:
unstable_models: Set[str]= set()
yaml_file_path = REPO_PATH.joinpath('userbenchmark/torch-nightly/v3-cuda-tests.yaml')
with open(yaml_file_path, "r") as yf:
config_obj = yaml.safe_load(yf)
for d in config_obj['cuda']:
if not d['stable']:
unstable_models.add(d['model'])
return unstable_models
unstable_models: Set[str] = get_unstable_models()
MODEL_NAMES: List[str] = list_models()
SUBSET_OF_MODEL_NAMES: List[str] = [
'BERT_pytorch', 'DALLE2_pytorch', 'hf_GPT2_large', 'hf_T5_large', 'resnet50', 'timm_vision_transformer', 'yolov3'
]
# PT2 compilation can take up 4 minutes for 1000 parameters even for foreach implementations,
# and erroring has not been very diverse across models, so we pick one small model and one
# larger model to ascertain PT2 performance over time. We skip everything else.
# We _are_ working on minimizing compilation for foreach implementations.
#
# PT2 dynamo tracing for the for-loop implementation takes over 30s.
# This is known + NOT going to be improved anytime soon, see
# https://github.com/pytorch/torchdynamo/issues/1803#issuecomment-1336688894
MODELS_TO_RUN_ON_PT2: List[str] = ['resnet18', 'timm_vision_transformer_large']
# NOTE: While it is possible to run these benchmarks on CPU, we skip running on CPU in CI because CPU stats can be
# unstable and we had stopped reporting them. You'll still be able to use this script to run CPU though, as it may
# be useful as a more local comparison point for implementations like forloop.
DEVICES: List[str] = ['cuda', 'cpu']
OPTIM_NAMES = [o.__name__ for o in [Adadelta, Adagrad, Adam, AdamW, Adamax, ASGD, SGD, RAdam, Rprop, RMSprop, NAdam, SparseAdam]]
FUNC_STRS = ['pt2_' , '']
OPTIMIZERS = [
(Adadelta, {}),
(Adadelta, {'maximize': True}),
(Adadelta, {'foreach': False}),
(Adadelta, {'differentiable': True}),
(Adadelta, {'foreach': True}),
(Adagrad, {}),
(Adagrad, {'maximize': True}),
(Adagrad, {'foreach': False}),
(Adagrad, {'differentiable': True}),
(Adagrad, {'foreach': True,}),
(Adam, {}),
(Adam, {'amsgrad': True, 'maximize': True}),
(Adam, {'foreach': False}),
(Adam, {'differentiable': True}),
(Adam, {'foreach': True}),
(Adam, {'foreach': True, 'maximize': True, 'capturable': True}),
(Adam, {'foreach': True, 'maximize': True, 'capturable': True, 'amsgrad': True}),
(Adam, {'fused': True}),
(Adam, {'fused': True, 'amsgrad': True, 'maximize': True}),
(Adam, {'fused': True, 'capturable': True}),
(Adam, {'fused': True, 'capturable': True, 'amsgrad': True}),
(AdamW, {}),
(AdamW, {'amsgrad': True, 'maximize': True}),
(AdamW, {'foreach': False}),
(AdamW, {'differentiable': True}),
(AdamW, {'foreach': True}),
(AdamW, {'foreach': True, 'maximize': True, 'capturable': True}),
(AdamW, {'foreach': True, 'maximize': True, 'capturable': True, 'amsgrad': True}),
(AdamW, {'fused': True}),
(AdamW, {'fused': True, 'amsgrad': True, 'maximize': True}),
(AdamW, {'fused': True, 'capturable': True}),
(AdamW, {'fused': True, 'capturable': True, 'amsgrad': True}),
(Adamax, {}),
(Adamax, {'maximize': True}),
(Adamax, {'foreach': False}),
(Adamax, {'differentiable': True}),
(Adamax, {'foreach': True,}),
(ASGD, {}),
(ASGD, {'maximize': True}),
(ASGD, {'foreach': False}),
(ASGD, {'differentiable': True}),
(ASGD, {'foreach': True}),
(SGD, {}),
(SGD, {'maximize': True}),
(SGD, {'foreach': False}),
(SGD, {'differentiable': True}),
(SGD, {'foreach': True,}),
(SGD, {'foreach': True, 'momentum': 0.9, 'nesterov': True}),
(SGD, {'foreach': True, 'momentum': 0.9, }),
(RAdam, {}),
(RAdam, {'foreach': False}),
(RAdam, {'differentiable': True}),
(RAdam, {'foreach': True,}),
(Rprop, {}),
(Rprop, {'maximize': True}),
(Rprop, {'foreach': False}),
(Rprop, {'differentiable': True}),
(Rprop, {'foreach': True}),
(RMSprop, {}),
(RMSprop, {'maximize': True}),
(RMSprop, {'foreach': False}),
(RMSprop, {'differentiable': True}),
(RMSprop, {'foreach': True}),
(NAdam, {}),
(NAdam, {'foreach': False}),
(NAdam, {'differentiable': True}),
(NAdam, {'foreach': True}),
(SparseAdam, {}),
# LBFGS requires a closure
# (LBFGS, {}),
]
DENSE_MODELS = [
'BERT_pytorch',
'Background_Matting',
'DALLE2_pytorch',
'LearningToPaint',
'Super_SloMo',
'alexnet',
'basic_gnn_edgecnn',
'basic_gnn_gcn',
'basic_gnn_gin',
'basic_gnn_sage',
'cm3leon_generate',
'dcgan',
'demucs',
'densenet121',
'detectron2_fasterrcnn_r_101_c4',
'detectron2_fasterrcnn_r_101_dc5',
'detectron2_fasterrcnn_r_101_fpn',
'detectron2_fasterrcnn_r_50_c4',
'detectron2_fasterrcnn_r_50_dc5',
'detectron2_fasterrcnn_r_50_fpn',
'detectron2_maskrcnn',
'detectron2_maskrcnn_r_101_c4',
'detectron2_maskrcnn_r_101_fpn',
'detectron2_maskrcnn_r_50_c4',
'detectron2_maskrcnn_r_50_fpn',
'dlrm',
'doctr_det_predictor',
'doctr_reco_predictor',
'drq',
'fambench_xlmr',
'fastNLP_Bert',
'functorch_dp_cifar10',
'functorch_maml_omniglot',
'gat',
'gcn',
'hf_Albert',
'hf_Bart',
'hf_Bert',
'hf_Bert_large',
'hf_BigBird',
'hf_DistilBert',
'hf_GPT2',
'hf_GPT2_large',
'hf_Longformer',
'hf_Reformer',
'hf_T5',
'hf_T5_base',
'hf_T5_generate',
'hf_T5_large',
'hf_Whisper',
'lennard_jones',
'llama',
'llama_v2_7b_16h',
'maml',
'maml_omniglot',
'mnasnet1_0',
'mobilenet_v2',
'mobilenet_v2_quantized_qat',
'mobilenet_v3_large',
'moco',
'nanogpt_generate',
'nvidia_deeprecommender',
'opacus_cifar10',
'phlippe_densenet',
'phlippe_resnet',
'pytorch_CycleGAN_and_pix2pix',
'pytorch_stargan',
'pytorch_unet',
'resnet152',
'resnet18',
'resnet50',
'resnet50_quantized_qat',
'resnext50_32x4d',
'sage',
'sam',
'shufflenet_v2_x1_0',
'simple_gpt',
'soft_actor_critic',
'speech_transformer',
'squeezenet1_1',
'stable_diffusion',
'tacotron2',
'timm_efficientdet',
'timm_efficientnet',
'timm_nfnet',
'timm_regnet',
'timm_resnest',
'timm_vision_transformer',
'timm_vision_transformer_large',
'timm_vovnet',
'torchrec_dlrm',
'tts_angular',
'vgg16',
'vision_maskrcnn',
'yolov3'
]
# Skips! Exclusions are represented by a dictionary of incompatible configs, where
# optim => optimizer name
# model => model name
# func_str => func string (e.g., pt2_)
# device => device name
# defaults => list of flag descriptions (strings) to exclude, e.g. no_foreach
# if empty list, will exclude all configurations
# Exclusions are general and will try to match on everything. For an exclusion
# {'optim': 'SparseAdam', 'model': 'BERT_pytorch'}, any configuration with
# SparseAdam on BERT_pytorch will be skipped.
EXCLUSIONS: List[Dict[str, Any]] = [
# Skip models deemed unstable by torch-nightly
{'model': m} for m in unstable_models
] + [
# 16h currently OOMs, but once it supports train, we should remove this line
# See tracker https://github.com/pytorch/benchmark/issues/1793
{'model': 'llama_v2_7b_16h'}
] + [
# Model needs to be run via dynamo torchbench and be provided distributed parameters
{'model': 'simple_gpt'}
] + [
# SparseAdam does not support dense gradients
{'optim': 'SparseAdam', 'model': m} for m in DENSE_MODELS
] + [
# DALL-E 2, timm_efficientdet, tacotron2 Not Supported on CPU
{'model': 'DALLE2_pytorch', 'device': 'cpu'},
{'model': 'tacotron2', 'device': 'cpu'},
{'model': 'timm_efficientdet', 'device': 'cpu'},
# FCOS train is not supported by upstream detectron2.
# See GH issue: https://github.com/facebookresearch/detectron2/issues/4369.
{'model': 'detectron2_fcos_r_50_fpn'},
# moco uses DDP and DistributedDataParallel/allgather requires cuda
{'model': 'moco', 'device': 'cpu'},
# pyhpc_equation_of_state and pyhpc_isoneutral_mixing have no parameters
{'model': 'pyhpc_equation_of_state'},
{'model': 'pyhpc_isoneutral_mixing'},
{'model': 'pyhpc_turbulent_kinetic_energy'},
# fused/capturable requires params to be floats on CUDA
{'defaults': ['fused'], 'device': 'cpu'},
{'defaults': ['capturable'], 'device': 'cpu'},
] + [
# PT2 compilation takes too long, so we only enable PT2 on a tiny subset of models.
# See note above on MODELS_TO_RUN_ON_PT2.
{'model': m, 'device': d, 'func_str': 'pt2_', 'defaults': []}
for d in DEVICES
for m in set(MODEL_NAMES) - set(MODELS_TO_RUN_ON_PT2)
] + [
{'func_str': 'pt2_', 'defaults': [df]}
for df in ['maximize', 'differentiable', 'capturable', 'amsgrad']
] + [
# torch.compile()'d optimizer.step() has too many arguments in C++
# See GH issue: https://github.com/pytorch/pytorch/issues/97361
{'model': m, 'device': 'cpu', 'func_str': 'pt2_', 'defaults': []} for m in [
'BERT_pytorch', 'Background_Matting', 'Super_SloMo',
'densenet121', 'detectron2_fasterrcnn_r_101_c4', 'detectron2_fasterrcnn_r_101_dc5',
'detectron2_fasterrcnn_r_101_fpn', 'detectron2_fasterrcnn_r_50_fpn', 'detectron2_maskrcnn',
'detectron2_maskrcnn_r_101_c4', 'detectron2_maskrcnn_r_101_fpn',
'detectron2_maskrcnn_r_50_fpn', 'doctr_det_predictor', 'doctr_reco_predictor', 'fambench_xlmr',
'fastNLP_Bert', 'hf_Bart', 'hf_Bert', 'hf_Bert_large', 'hf_BigBird', 'hf_DistilBert', 'hf_GPT2',
'hf_GPT2_large', 'hf_Longformer', 'hf_Reformer', 'hf_T5', 'hf_T5_base', 'hf_T5_large', 'llama',
'mnasnet1_0', 'mobilenet_v2', 'mobilenet_v2_quantized_qat', 'mobilenet_v3_large',
'phlippe_densenet', 'pytorch_unet', 'resnet152', 'resnet50', 'resnet50_quantized_qat', 'resnext50_32x4d',
'shufflenet_v2_x1_0', 'timm_efficientnet', 'timm_nfnet', 'timm_regnet',
'timm_vision_transformer', 'yolov3']
] + [
# torch.compile()'d optimizer.step() has too many arguments in the generated
# C++ kernel for both CUDA and CPU for single tensor implementations.
# See GH issue: https://github.com/pytorch/pytorch/issues/97361
{'model': m, 'func_str': 'pt2_', 'defaults': [df]} for m in [
'DALLE2_pytorch', 'fambench_xlmr'] for df in ['no_foreach', 'differentiable']
] + [
# torch.compile()'d optimizer.step() has too many arguments in the generated
# C++ kernel even when params are on CUDA for single tensor implementations on NAdam.
# See GH issue: https://github.com/pytorch/pytorch/issues/97361
{'model': m, 'device': 'cuda', 'func_str': 'pt2_', 'defaults': [df], 'optim': 'NAdam'} for m in [
'densenet121', 'doctr_reco_predictor', 'fambench_xlmr', 'hf_Bart', 'hf_Bert_large', 'hf_GPT2_large','hf_Longformer',
'hf_T5_base', 'hf_T5_large', 'moco', 'resnet152', 'timm_vision_transformer', 'timm_vision_transformer_large', 'yolov3'
] for df in ['no_foreach', 'differentiable']
] + [
# torch.compile()'d optimizer.step() has too many arguments in the generated
# C++ kernel even when params are on CUDA for single tensor implementations on ASGD.
# See GH issue: https://github.com/pytorch/pytorch/issues/97361
{'model': m, 'device': 'cuda', 'func_str': 'pt2_', 'defaults': [df], 'optim': 'ASGD'} for m in [
'densenet121', 'fambench_xlmr', 'hf_Bart', 'hf_Bert_large', 'hf_GPT2_large', 'hf_Longformer',
'hf_T5_base', 'hf_T5_large', 'moco'
] for df in ['no_foreach', 'differentiable']
]
# Returns clones of params and not a generator.
def _get_model_params(m) -> List[torch.nn.Parameter]:
model, _ = m.get_module()
params_clone = []
for p in model.parameters():
params_clone.append(p.clone().detach())
return params_clone
lil_cache: Tuple[str, str, List[torch.nn.Parameter]] = ('', '', [])
# Returns clones of params given a model name
def get_model_params(modelName: str, device: str) -> List[torch.nn.Parameter]:
global lil_cache
cached_mn, cached_d, cached_params = lil_cache
if modelName == cached_mn and device == cached_d:
return cached_params
# free the old params before initializing a model to conserve memory
lil_cache = ('', '', [])
torch.cuda.empty_cache()
Model = load_model_by_name(modelName)
# some (usually quantized) models do not support eval on CPU, but since we
# only care about params + randomly generate grads, eval vs train doesn't matter
try:
params = _get_model_params(Model(device=device, test='train', batch_size=1))
except:
try:
params = _get_model_params(Model(device=device, test='eval', batch_size=1))
except:
try:
params = _get_model_params(Model(device=device, test='train'))
except:
params = _get_model_params(Model(device=device, test='eval'))
finally:
del Model
lil_cache = (modelName, device, params)
return params
# This fakes a model forward & backward--we are not concerned about
# accuracy here, but about the perf of optim on particular shapes and
# dtypes of commonly used models!
def generate_random_gradients(parameters):
for p in parameters:
p.grad = torch.rand_like(p)
def optimizer_step(optimizer):
optimizer.step()
def pt2_optimizer_step(optimizer):
@torchdynamo.optimize('inductor')
def f():
optimizer.step()
f()
def defaults_to_str(defaults: Dict[str, Any]) -> str:
# We define lr for SGD, but we don't currently vary lr so it is effectively the default.
defaults.pop('lr', None)
if len(defaults) == 0:
return 'default'
def entry_to_str(k, v) -> str:
if isinstance(v, bool):
return 'no_' + k if not v else k
return f'{k}={v}'
return ', '.join([entry_to_str(k, v) for k, v in defaults.items()])
def is_excluded(mn: str, d: str, on: str, func_str: str, defaults: Dict[str, Any]) -> bool:
return any([('model' not in e or e['model'] == mn) and
('device' not in e or e['device'] == d) and
('optim' not in e or e['optim'] == on) and
('func_str' not in e or e['func_str'] == func_str) and
('defaults' not in e or all(f in defaults_to_str(defaults) for f in e['defaults'])) for e in EXCLUSIONS])
def run_model(modelName, device, Optim, defaults, maybe_pt2_):
try:
params = get_model_params(modelName, device)
print(datetime.datetime.now(), 'getting params: ', params[0].size(), params[0].dtype, len(params), params[0].device)
if Optim.__name__ == 'SGD':
defaults['lr'] = 1e-2
optim = Optim(params, **defaults)
generate_random_gradients(params)
pt2_description = '' if maybe_pt2_ == '' else '(pt2) '
print(f'{datetime.datetime.now()} python -m userbenchmark.optim.run -m {modelName} -d {device}' +
f' -o {Optim.__name__} --df "{defaults_to_str(defaults)}" -f {maybe_pt2_}')
compile_r = None
sub_label = f'{modelName}, {optim.__class__.__name__}, {device}'
description = pt2_description + defaults_to_str(defaults)
# Get compile time by running 5 times and subtracting
# first entry - avg(entries 3 through 5)
# skipping the second entry due to high variance of first cache hit
if maybe_pt2_ != '':
times = []
if device == "cuda":
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
for _ in range(5):
t0 = time.perf_counter()
pt2_optimizer_step(optim)
t1 = time.perf_counter()
times.append(t1 - t0)
compile_r = (f'compile_time, {sub_label}, {description}', times[0] - sum(times[2:5]) / 3)
r = benchmark.Timer(
stmt=f'{maybe_pt2_}optimizer_step(optim)',
globals={'optim': optim, 'optimizer_step': optimizer_step, 'pt2_optimizer_step': pt2_optimizer_step},
sub_label=sub_label,
description=description,
).blocked_autorange()
return r, compile_r
except Exception as e:
if not continue_on_error:
raise e
print(e)
with open('errors.txt', 'a') as f:
f.write(f'{datetime.datetime.now()} python -m userbenchmark.optim.run -m {modelName} -d {device}' +
f' -o {Optim.__name__} --df "{defaults_to_str(defaults)}" -f {maybe_pt2_}{str(e)}\n')
return None, None
def run_benchmarks(optims: List[str], func_strs: List[str], models: List[str], devices: List[str],
flags: List[str]) -> Tuple[List[torch.utils.benchmark.utils.common.Measurement], Dict[str, float]]:
results = []
compile_metrics = {}
optim_cfgs = [(O, defaults) for (O, defaults) in OPTIMIZERS if O.__name__ in optims and all(f in defaults_to_str(defaults) for f in flags)]
if run_on_subset:
models = [m for m in SUBSET_OF_MODEL_NAMES if m in models]
if run_on_subset or run_basic_configs:
optim_cfgs = [(O, defaults) for (O, defaults) in optim_cfgs if (all([x in ['foreach', 'fused', 'lr'] for x in defaults]))]
for mn, d, (O, defaults), func_str in itertools.product(models, devices, optim_cfgs, func_strs):
if (not ignore_skips and is_excluded(mn, d, O.__name__, func_str, defaults)):
continue
r, compile_r = run_model(mn, d, O, defaults, func_str)
if r is not None:
results.append(r)
if compile_r is not None:
metric_name, compile_time = compile_r
compile_metrics[metric_name] = compile_time
return results, compile_metrics
def parse_args(args: List[str]):
parser = argparse.ArgumentParser()
parser.add_argument(
'--optims', '-o',
nargs='*',
default=OPTIM_NAMES,
choices=OPTIM_NAMES,
help='List of optimizers to run tests on')
parser.add_argument(
'--funcs', '-f',
nargs='*',
default=FUNC_STRS,
choices=FUNC_STRS,
help='What optimizer.step() function variations to benchmark. NOTE: there is an underscore ' +
'for "pt2_"!'
)
parser.add_argument(
'--models', '-m',
nargs='*',
default=MODEL_NAMES,
choices=MODEL_NAMES,
help='List of models to run tests on')
parser.add_argument(
'--subset', '-s',
action='store_true',
help='Run benchmarks on a standard subset of models. If the --models (-m) is set, we will ' +
'take the intersection of the requested models and the defined subset. For example, ' +
'`...-s -m llama yolov3` will ONLY run yolov3.'
)
parser.add_argument(
'--basic', '-b',
action='store_true',
help='Run benchmarks on a standard subset of optimizer configs. If the --defaults-flags (--df)' +
' is set, we will take the intersection of the requested configs and the defined subset. ' +
'For example, `...-b --df maximize fused` will ONLY run fused.'
)
parser.add_argument(
'--devices', '-d',
nargs='*',
default=DEVICES,
choices=DEVICES,
help='List of devices to run tests on')
parser.add_argument(
'--default-flags', '--df',
nargs='*',
default=[],
help='List of flag descriptions to run tests on. We serialize the configs to a string (see ' +
'defaults_to_str()) and test for inclusion of the flag description in the string. ' +
'For example, "foreach" will enable all default configs with "foreach", including ' +
'those with other flags and also "no_foreach". Effectually, passing in more flags ' +
'will further limit the default configs run.\nValid flags include: foreach, no_foreach, ' +
'fused, maximize, capturable, differentiable, default, amsgrad, momentum, nesterov' +
' and more!\n'
)
parser.add_argument(
'--continue-on-error', '-c',
action='store_true',
help='Continue running benchmarks on failure, errors will be written to errors.txt'
)
parser.add_argument(
'--output-dir', '--od', default=None, type=str,
help='name of directory path in which to dump the metrics json, e.g., "./.userbenchmark/optim/tmp". ' +
'If None, we will dump output the metrics json to "REPO_ROOT/.userbenchmark/optim".'
)
parser.add_argument(
'--ignore-skips', '-i', action='store_true',
help='Runs ALL benchmarks ignoring any skips. This allows for easy testing of current skipped ' +
'benchmarks once one believes they should be fixed. Beware though! You may run into errors ' +
'that were previously hidden by the exclusions.'
)
args = parser.parse_args(args)
return args
# convert results into a JSON of description to mean time in seconds
def get_metrics(results: List[torch.utils.benchmark.utils.common.Measurement]) -> Dict[str, float]:
metrics = {}
for r in results:
ts: torch.utils.benchmark.utils.common.TaskSpec = r.task_spec
metrics[f'{ts.sub_label}, {ts.description}'] = r.mean
return metrics
def run(args: List[str]):
args = parse_args(args)
global continue_on_error, run_on_subset, run_basic_configs, ignore_skips
continue_on_error = args.continue_on_error
run_on_subset = args.subset
run_basic_configs = args.basic
ignore_skips = args.ignore_skips
target_dir = Path(args.output_dir) if args.output_dir is not None else None
if target_dir is not None:
target_dir.mkdir(exist_ok=True, parents=True)
results, compile_metrics = run_benchmarks(args.optims, args.funcs, args.models, args.devices, args.default_flags)
metrics: Dict[str, float] = get_metrics(results)
dump_output(BM_NAME, get_output_json(BM_NAME, {**metrics, **compile_metrics}), target_dir=target_dir)
print("----------------- RUNTIME RESULTS -----------------")
compare = benchmark.Compare(results)
compare.trim_significant_figures()
compare.colorize(rowwise=True)
compare.print()
print("----------------- COMPILE TIME RESULTS -----------------")
print(compile_metrics)
if __name__ == '__main__':
run(sys.argv[1:])
|
#!/bin/bash python3
'''
This script is intended for the CI context only! The whole purpose behind this script is to enable
process/context/memory isolation across different models and devices. The OG script (which this
script calls) is the userbenchmark/optim/run.py script, which is better documented and what is
intended to be used locally. The current script is simply a wrapper that dispatches serial
subprocesses to run the OG script and handles the metrics.json merging afterwards.
WARNING! Running this script will wipe clean the OUTPUT_DIR, .userbenchmark/optim/tmp!
'''
from pathlib import Path
import shutil
import subprocess
from typing import Any, List, Dict, Tuple
import argparse
import sys
import itertools
import json
from userbenchmark.utils import REPO_PATH, add_path, dump_output, get_output_json
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models
BM_NAME: str = 'optim'
MODEL_NAMES: List[str] = list_models()
# NOTE: While it is possible to run these benchmarks on CPU, we skip running on CPU in CI because CPU stats can be
# unstable and we had stopped reporting them. You'll still be able to use the run.py script to run CPU though, as
# it may be useful as a more local comparison point for implementations like forloop.
DEVICES: List[str] = ['cuda']
OUTPUT_DIR: Path = REPO_PATH.joinpath('.userbenchmark/optim/tmp')
# Capture the specified models and devices we want to run to avoid redundant work,
# but send the rest of the user arguments to the underlying optim benchmark runner.
def parse_args() -> Tuple[Dict[Any, Any], Dict[Any, Any]]:
parser = argparse.ArgumentParser(description='Run optim benchmarks per model and device')
parser.add_argument(
'--models', '-m',
nargs='*',
default=MODEL_NAMES,
choices=MODEL_NAMES,
help='List of models to run tests on')
parser.add_argument(
'--devices', '-d',
nargs='*',
default=DEVICES,
choices=DEVICES,
help='List of devices to run tests on')
return parser.parse_known_args()
def main() -> None:
args, optim_bm_args = parse_args()
assert not OUTPUT_DIR.exists() or not any(OUTPUT_DIR.glob("*")), \
f'{OUTPUT_DIR} must be empty or nonexistent. Its contents will be wiped by this script.'
# Run benchmarks in subprocesses to take isolate contexts and memory
for m, d in itertools.product(args.models, args.devices):
command = [sys.executable, '-m', 'userbenchmark.optim.run', '--continue-on-error',
'--output-dir', OUTPUT_DIR, '--models', m, '--devices', d] + optim_bm_args
# Use check=True to force this process to go serially since our capacity
# only safely allows 1 model at a time
completed_process = subprocess.run(command, check=True)
# While it is certainly unexpected for a subprocess to fail, we don't want to halt entirely
# as there can be valuable benchmarks to gather from the other subprocesses.
if completed_process.returncode != 0:
print(f'OH NO, the subprocess for model {m} and device {d} exited with {completed_process.returncode}!')
# Nightly CI expects ONE metrics json in .userbenchmark/optim, but we may have multiple, so
# consolidate them into one file.
aggregated_metrics = {}
for file_path in Path(OUTPUT_DIR).glob("metrics*.json"):
with open(file_path, 'r') as f:
json_data = json.load(f)
aggregated_metrics.update(json_data['metrics'])
dump_output(BM_NAME, get_output_json(BM_NAME, aggregated_metrics))
# Gotta delete the tmp folder--otherwise the nightly CI will think there are multiple metrics jsons!
shutil.rmtree(OUTPUT_DIR)
if __name__ == '__main__':
main()
|
from typing import Optional
from ..utils import TorchBenchABTestResult, TorchBenchABTestMetric
DEFAULT_REGRESSION_DELTA_THRESHOLD = 0.3
COMPILE_TIME_REGRESSION_DELTA_THRESHOLD = 2.0
def run(control, treatment) -> Optional[TorchBenchABTestResult]:
control_env = control["environ"]
treatment_env = treatment["environ"]
control_metrics = control["metrics"]
treatment_metrics = treatment["metrics"]
details = {}
for control_metric_name, control_metric in control_metrics.items():
if control_metric_name in treatment_metrics:
regression_threshold = COMPILE_TIME_REGRESSION_DELTA_THRESHOLD if "compile_time" in control_metric_name else DEFAULT_REGRESSION_DELTA_THRESHOLD
treatment_metric = treatment_metrics[control_metric_name]
delta = (treatment_metric - control_metric) / control_metric
# Trigger on BOTH slowdowns and speedups
if abs(delta) > regression_threshold:
details[control_metric_name] = TorchBenchABTestMetric(control=control_metric, treatment=treatment_metric, delta=delta)
# control_only_metrics/treatment_only_metrics will be filled in later by the main regression detector
return TorchBenchABTestResult(name=control["name"],
control_env=control_env,
treatment_env=treatment_env,
details=details)
|
import argparse
from datetime import datetime
import git
import numpy as np
import os
import json
import subprocess
import sys
import time
import shutil
from pathlib import Path
from ..utils import dump_output, get_output_dir, get_output_json, REPO_PATH
from typing import List
BM_NAME = "instruction-count"
RESULT_JSON = "ubenchmark_results.json"
PYTORCH_SRC_URL = "https://github.com/pytorch/pytorch.git"
def translate_result_metrics(json_path: Path):
metrics = {}
with open(json_path, "r") as j:
raw_result = json.load(j)
raw_values = raw_result["values"]
for key in raw_values:
times = raw_values[key]["times"]
counts = raw_values[key]["counts"]
metrics[f"{key}_count_min"] = min(counts)
metrics[f"{key}_count_max"] = max(counts)
metrics[f"{key}_count_p25"] = int(np.percentile(counts, 25))
metrics[f"{key}_count_median"] = int(np.median(counts))
metrics[f"{key}_count_p75"] = int(np.percentile(counts, 75))
metrics[f"{key}_t_min"] = min(times)
metrics[f"{key}_t_max"] = max(times)
metrics[f"{key}_t_mean"] = float(np.mean(times))
metrics[f"{key}_t_p01"] = float(np.percentile(times, 1))
metrics[f"{key}_t_p25"] = float(np.percentile(times, 25))
metrics[f"{key}_t_median"] = float(np.median(times))
metrics[f"{key}_t_75"] = float(np.percentile(times, 75))
metrics[f"{key}_t_99"] = float(np.percentile(times, 99))
metrics[f"{key}_t_stddev"] = float(np.std(times))
return metrics
def get_timestamp():
return datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
def get_work_dir(output_dir):
work_dir = output_dir.joinpath(f"run-{get_timestamp()}")
work_dir.mkdir(exist_ok=True, parents=True)
return work_dir
def get_run_env(env):
env["BENCHMARK_USE_DEV_SHM"] = "1"
return env
def checkout_pytorch_repo(pytorch_repo: str, pytorch_branch: str):
git.Repo.clone_from(PYTORCH_SRC_URL, pytorch_repo, depth=1, branch=pytorch_branch)
def cleanup_pytorch_repo(pytorch_repo: str):
pytorch_repo_path = Path(pytorch_repo)
if pytorch_repo_path.exists():
shutil.rmtree(pytorch_repo_path)
def run_benchmark(pytorch_src_path: Path, output_json_path: Path):
benchmark_path = pytorch_src_path.joinpath("benchmarks", "instruction_counts")
runtime_env = get_run_env(os.environ.copy())
command = [sys.executable, "main.py", "--mode", "ci", "--destination", str(output_json_path.resolve())]
subprocess.check_call(command, cwd=benchmark_path, env=runtime_env)
def parse_args(args: List[str], work_dir: Path):
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch-src", default=str(work_dir.resolve()),
help="Location of PyTorch source repo")
parser.add_argument("--pytorch-branch", default="main",
help="The branch of pytorch to check out")
parser.add_argument("--analyze-json", type=str, default=None, help="Only analyze an existing result")
args = parser.parse_args(args)
return args
def run(args: List[str]):
output_dir = get_output_dir(BM_NAME)
work_dir = get_work_dir(output_dir)
args = parse_args(args, work_dir)
if args.analyze_json:
json_path = Path(args.analyze_json)
metrics = translate_result_metrics(json_path)
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
return
cleanup_pytorch_repo(args.pytorch_src)
checkout_pytorch_repo(args.pytorch_src, args.pytorch_branch)
pytorch_src_path = Path(args.pytorch_src)
output_json_path = work_dir.joinpath(RESULT_JSON)
run_benchmark(pytorch_src_path, output_json_path)
metrics = translate_result_metrics(output_json_path)
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
cleanup_pytorch_repo(args.pytorch_src)
|
import logging
import warnings
from .torchbench import setup_torchbench_cwd, TorchBenchmarkRunner
try:
from .common import main
except ImportError:
from common import main
from typing import List
def run(args: List[str]):
original_dir = setup_torchbench_cwd()
logging.basicConfig(level=logging.WARNING)
warnings.filterwarnings("ignore")
main(TorchBenchmarkRunner(), original_dir, args=args)
|
#!/usr/bin/env python3
import gc
import importlib
import logging
import os
import re
import sys
import warnings
from os.path import abspath, exists
import torch
from .common import BenchmarkRunner, main
from ._dynamo.testing import collect_results, reduce_to_scalar_loss
from ._dynamo.utils import clone_inputs
# We are primarily interested in tf32 datatype
torch.backends.cuda.matmul.allow_tf32 = True
def setup_torchbench_cwd():
original_dir = abspath(os.getcwd())
os.environ["KALDI_ROOT"] = "/tmp" # avoids some spam
for torchbench_dir in (
"./torchbenchmark",
"../torchbenchmark",
"../torchbench",
"../benchmark",
"../../torchbenchmark",
"../../torchbench",
"../../benchmark",
"../../../torchbench",
"../../../benchmark",
):
if exists(torchbench_dir):
break
if exists(torchbench_dir):
torchbench_dir = abspath(torchbench_dir)
os.chdir(torchbench_dir)
sys.path.append(torchbench_dir)
return original_dir
# Some models have large dataset that doesn't fit in memory. Lower the batch
# size to test the accuracy.
USE_SMALL_BATCH_SIZE = {
"demucs": 4,
"dlrm": 1024,
"densenet121": 4,
"hf_Reformer": 4,
"hf_T5_base": 4,
"timm_efficientdet": 1,
"llama_v2_7b_16h": 1,
}
DETECTRON2_MODELS = {
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_fpn",
}
SKIP = {
# https://github.com/pytorch/torchdynamo/issues/101
"detectron2_maskrcnn",
# https://github.com/pytorch/torchdynamo/issues/145
"fambench_xlmr",
# TIMEOUT, https://github.com/pytorch/pytorch/issues/98467
"tacotron2",
"hf_Bert", # Error: RelaxedUnspecConstraint(L['input_ids'].size()[0]) - inferred constant (4)
"hf_Bert_large", # Error: RelaxedUnspecConstraint(L['input_ids'].size()[0]) - inferred constant (4)
# takes too long, extreme slowdown (< .001)
"maml",
}
SKIP_FOR_CPU = {
"hf_T5_generate", # OOMs
"cm3leon_generate", # model is CUDA only
"nanogpt_generate", # timeout
"sam", # timeout
"llama_v2_7b_16h", # model is CUDA only
"stable_diffusion", # flaky
"torchrec_dlrm", # requires FBGEMM, CUDA only
}
SKIP_FOR_CUDA = {
"gat", # only works on CPU
"gcn", # only works on CPU
"sage", # only works on CPU
}
# Additional models that are skipped in training
SKIP_TRAIN = {
# not designed for training
"pyhpc_equation_of_state",
"pyhpc_isoneutral_mixing",
"pyhpc_turbulent_kinetic_energy",
"maml",
"llama",
"llama_v2_7b_16h",
}
SKIP_TRAIN.update(DETECTRON2_MODELS)
# These models support only train mode. So accuracy checking can't be done in
# eval mode.
ONLY_TRAINING_MODE = {
"tts_angular",
"tacotron2",
"demucs",
"hf_Reformer",
"pytorch_struct",
"yolov3",
}
ONLY_TRAINING_MODE.update(DETECTRON2_MODELS)
# Need lower tolerance on GPU. GPU kernels have non deterministic kernels for these models.
REQUIRE_HIGHER_TOLERANCE = {
"alexnet",
"attention_is_all_you_need_pytorch",
"densenet121",
"hf_Albert",
"vgg16",
"mobilenet_v3_large",
"nvidia_deeprecommender",
"timm_efficientdet",
}
# These models need >1e-3 tolerance
REQUIRE_EVEN_HIGHER_TOLERANCE = {
"soft_actor_critic",
"tacotron2",
}
REQUIRE_HIGHER_FP16_TOLERANCE = {
"drq",
}
REQUIRE_COSINE_TOLERACE = {
# Just keeping it here even though its empty, if we need this in future.
}
# non-deterministic output / cant check correctness
NONDETERMINISTIC = {
# https://github.com/pytorch/pytorch/issues/98355
"mobilenet_v3_large",
}
# These benchmarks took >600s on an i9-11900K CPU
VERY_SLOW_BENCHMARKS = {
"hf_BigBird", # 3339s
"hf_Longformer", # 3062s
"hf_T5", # 930s
}
# These benchmarks took >60s on an i9-11900K CPU
SLOW_BENCHMARKS = {
*VERY_SLOW_BENCHMARKS,
"BERT_pytorch", # 137s
"demucs", # 116s
"fastNLP_Bert", # 242s
"hf_Albert", # 221s
"hf_Bart", # 400s
"hf_Bert", # 334s
"hf_DistilBert", # 187s
"hf_GPT2", # 470s
"hf_Reformer", # 141s
"speech_transformer", # 317s
"vision_maskrcnn", # 99s
}
TRT_NOT_YET_WORKING = {
"alexnet",
"resnet18",
"resnet50",
"mobilenet_v2",
"mnasnet1_0",
"squeezenet1_1",
"shufflenetv2_x1_0",
"vgg16",
"resnext50_32x4d",
}
DONT_CHANGE_BATCH_SIZE = {
"demucs",
"pytorch_struct",
"pyhpc_turbulent_kinetic_energy",
"vision_maskrcnn", # https://github.com/pytorch/benchmark/pull/1656
}
SKIP_ACCURACY_CHECK_MODELS = {
# Models too large to have eager, dynamo and fp64_numbers simultaneosuly
# even for 40 GB machine. We have tested accuracy for smaller version of
# these models
"hf_GPT2_large",
"hf_T5_large",
"timm_vision_transformer_large",
"maml", # accuracy https://github.com/pytorch/pytorch/issues/93847
"llama_v2_7b_16h",
"Background_Matting",
}
SKIP_ACCURACY_CHECK_AS_EAGER_NON_DETERMINISTIC_MODELS = {
# Models that deterministic algorithms can not be turned on for eager mode.
"Background_Matting",
}
MAX_BATCH_SIZE_FOR_ACCURACY_CHECK = {
"hf_GPT2": 2,
"pytorch_unet": 2,
}
FORCE_AMP_FOR_FP16_BF16_MODELS = {
"DALLE2_pytorch",
"doctr_det_predictor",
"doctr_reco_predictor",
"Super_SloMo",
"tts_angular",
}
# models in canary_models that we should run anyway
CANARY_MODELS = {
"torchrec_dlrm",
}
class TorchBenchmarkRunner(BenchmarkRunner):
def __init__(self):
super().__init__()
self.suite_name = "torchbench"
self.optimizer = None
@property
def skip_models(self):
return SKIP
@property
def skip_models_for_cpu(self):
return SKIP_FOR_CPU
@property
def skip_models_for_cuda(self):
return SKIP_FOR_CUDA
@property
def slow_models(self):
return SLOW_BENCHMARKS
@property
def very_slow_models(self):
return VERY_SLOW_BENCHMARKS
@property
def non_deterministic_models(self):
return NONDETERMINISTIC
@property
def skip_not_suitable_for_training_models(self):
return SKIP_TRAIN
@property
def failing_fx2trt_models(self):
return TRT_NOT_YET_WORKING
@property
def force_amp_for_fp16_bf16_models(self):
return FORCE_AMP_FOR_FP16_BF16_MODELS
@property
def skip_accuracy_checks_large_models_dashboard(self):
if self.args.dashboard or self.args.accuracy:
return SKIP_ACCURACY_CHECK_MODELS
return set()
@property
def skip_accuracy_check_as_eager_non_deterministic(self):
if self.args.accuracy and self.args.training:
return SKIP_ACCURACY_CHECK_AS_EAGER_NON_DETERMINISTIC_MODELS
return set()
def load_model(
self,
device,
model_name,
batch_size=None,
part=None,
):
if self.args.enable_activation_checkpointing:
raise NotImplementedError(
"Activation checkpointing not implemented for Torchbench models"
)
is_training = self.args.training
use_eval_mode = self.args.use_eval_mode
dynamic_shapes = self.args.dynamic_shapes
candidates = [
f"torchbenchmark.models.{model_name}",
f"torchbenchmark.canary_models.{model_name}",
f"torchbenchmark.models.fb.{model_name}",
]
for c in candidates:
try:
module = importlib.import_module(c)
break
except ModuleNotFoundError as e:
if e.name != c:
raise
else:
raise ImportError(f"could not import any of {candidates}")
benchmark_cls = getattr(module, "Model", None)
if not hasattr(benchmark_cls, "name"):
benchmark_cls.name = model_name
cant_change_batch_size = (
not getattr(benchmark_cls, "ALLOW_CUSTOMIZE_BSIZE", True)
or model_name in DONT_CHANGE_BATCH_SIZE
)
if cant_change_batch_size:
batch_size = None
if batch_size is None and is_training and model_name in USE_SMALL_BATCH_SIZE:
batch_size = USE_SMALL_BATCH_SIZE[model_name]
# Control the memory footprint for few models
if self.args.accuracy and model_name in MAX_BATCH_SIZE_FOR_ACCURACY_CHECK:
batch_size = min(batch_size, MAX_BATCH_SIZE_FOR_ACCURACY_CHECK[model_name])
# workaround "RuntimeError: not allowed to set torch.backends.cudnn flags"
torch.backends.__allow_nonbracketed_mutation_flag = True
extra_args = []
if part:
extra_args = ["--part", part]
if model_name == "vision_maskrcnn" and is_training:
# Output of vision_maskrcnn model is a list of bounding boxes,
# sorted on the basis of their scores. This makes accuracy
# comparison hard with torch.compile. torch.compile can cause minor
# divergences in the output because of how fusion works for amp in
# TorchInductor compared to eager. Therefore, instead of looking at
# all the bounding boxes, we compare only top 5.
model_kwargs = {"box_detections_per_img": 5}
benchmark = benchmark_cls(
test="train",
device=device,
batch_size=batch_size,
extra_args=extra_args,
model_kwargs=model_kwargs,
)
elif is_training:
benchmark = benchmark_cls(
test="train",
device=device,
batch_size=batch_size,
extra_args=extra_args,
)
else:
benchmark = benchmark_cls(
test="eval",
device=device,
batch_size=batch_size,
extra_args=extra_args,
)
model, example_inputs = benchmark.get_module()
# Models that must be in train mode while training
if is_training and (not use_eval_mode or model_name in ONLY_TRAINING_MODE):
model.train()
else:
model.eval()
gc.collect()
batch_size = benchmark.batch_size
# Torchbench has quite different setup for yolov3, so directly passing
# the right example_inputs
if model_name == "yolov3":
example_inputs = (torch.rand(batch_size, 3, 384, 512).to(device),)
# See https://github.com/pytorch/benchmark/issues/1561
if model_name == "maml_omniglot":
batch_size = 5
assert example_inputs[0].shape[0] == batch_size
if model_name == "vision_maskrcnn":
batch_size = 1
# global current_name, current_device
# current_device = device
# current_name = benchmark.name
if self.args.trace_on_xla:
# work around for: https://github.com/pytorch/xla/issues/4174
import torch_xla # noqa: F401
self.validate_model(model, example_inputs)
return device, benchmark.name, model, example_inputs, batch_size
def iter_model_names(self, args):
from torchbenchmark import _list_canary_model_paths, _list_model_paths
models = _list_model_paths()
models += [
f
for f in _list_canary_model_paths()
if os.path.basename(f) in CANARY_MODELS
]
models.sort()
start, end = self.get_benchmark_indices(len(models))
for index, model_path in enumerate(models):
if index < start or index >= end:
continue
model_name = os.path.basename(model_path)
if (
not re.search("|".join(args.filter), model_name, re.I)
or re.search("|".join(args.exclude), model_name, re.I)
or model_name in args.exclude_exact
or model_name in self.skip_models
):
continue
yield model_name
def pick_grad(self, name, is_training):
if is_training or name in ("maml",):
return torch.enable_grad()
else:
return torch.no_grad()
def get_tolerance_and_cosine_flag(self, is_training, current_device, name):
tolerance = 1e-4
cosine = self.args.cosine
# Increase the tolerance for torch allclose
if self.args.float16 or self.args.amp:
if name in REQUIRE_HIGHER_FP16_TOLERANCE:
return 1e-2, cosine
return 1e-3, cosine
if is_training and current_device == "cuda":
tolerance = 1e-3
if name in REQUIRE_COSINE_TOLERACE:
cosine = True
elif name in REQUIRE_HIGHER_TOLERANCE:
tolerance = 1e-3
elif name in REQUIRE_EVEN_HIGHER_TOLERANCE:
tolerance = 8 * 1e-2
return tolerance, cosine
def compute_loss(self, pred):
return reduce_to_scalar_loss(pred)
def forward_pass(self, mod, inputs, collect_outputs=True):
with self.autocast():
return mod(*inputs)
def forward_and_backward_pass(self, mod, inputs, collect_outputs=True):
cloned_inputs = clone_inputs(inputs)
self.optimizer_zero_grad(mod)
with self.autocast():
pred = mod(*cloned_inputs)
loss = self.compute_loss(pred)
self.grad_scaler.scale(loss).backward()
self.optimizer_step()
if collect_outputs:
return collect_results(mod, pred, loss, cloned_inputs)
return None
def torchbench_main():
original_dir = setup_torchbench_cwd()
logging.basicConfig(level=logging.WARNING)
warnings.filterwarnings("ignore")
main(TorchBenchmarkRunner(), original_dir)
if __name__ == "__main__":
torchbench_main()
|
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import collections
import contextlib
import copy
import csv
import dataclasses
import functools
import importlib
import itertools
import logging
import os
import pathlib
import random
import shutil
import signal
import subprocess
import sys
import time
from contextlib import contextmanager
from typing import Any, Callable, Mapping, NamedTuple, Optional, Tuple, Type
from unittest.mock import MagicMock
import numpy as np
import pandas as pd
import psutil
import torch
import torch._dynamo
import torch._dynamo.utils
import torch._export
import torch.distributed
import torch.fx._pytree as fx_pytree
import torch.multiprocessing as mp
from scipy.stats import gmean, ttest_ind
from torch._dynamo.profiler import fx_insert_profiling, Profiler
from torch._dynamo.testing import dummy_fx_compile, format_speedup, same
from torch._dynamo.utils import clone_inputs
from torch._functorch.aot_autograd import set_model_name
from torch._inductor import config as inductor_config
from torch._inductor.utils import fresh_inductor_cache
from torch._subclasses.fake_tensor import FakeTensorMode
from torch.utils import _pytree as pytree
from torch.utils._pytree import tree_map, tree_map_only
from tqdm.auto import tqdm, trange
log = logging.getLogger(__name__)
# We are primarily interested in TF32
torch.backends.cuda.matmul.allow_tf32 = True
# Suppress torch.profiler spam
os.environ["KINETO_LOG_LEVEL"] = "5"
current_name = ""
current_device = ""
current_onnx_compiler = ""
current_batch_size = None
output_filename = None
MAX_DOWNLOAD_ATTEMPTS = 5
class CI(NamedTuple):
backend: str # aot_eager or inductor
training: bool
dynamic: bool = False
device: str = "cuda"
CI_SKIP = collections.defaultdict(list)
# Skips for dynamic=False
# Here eager really means dynamo+eager
CI_SKIP[CI("eager", training=False)] = [
# TorchBench
"DALLE2_pytorch", # AttributeError: text_encodings
"hf_BigBird", # fail_accuracy
# TypeError: pad_center() takes 1 positional argument but 2 were given
"tacotron2",
# Huggingface
"DebertaV2ForQuestionAnswering", # OOM
]
CI_SKIP[CI("eager", training=True)] = [
*CI_SKIP[CI("eager", training=False)],
# TorchBench
"BERT_pytorch", # accuracy
"Background_Matting", # fp64_OOM
"hf_BigBird", # fp64_OOM
"hf_T5_base", # fp64_OOM
"llama", # Accuracy failed: allclose not within tol=0.001
"vision_maskrcnn", # The size of tensor a (29) must match the size of tensor b (33) (doesn't repro)
# Huggingface
"XGLMForCausalLM", # OOM
# TIMM
"cait_m36_384", # fp64_OOM
"convit_base", # fp64_OOM
"mobilenetv2_100", # accuracy
"xcit_large_24_p8_224", # fp64_OOM,
]
CI_SKIP[CI("aot_eager", training=False)] = [
*CI_SKIP[CI("eager", training=False)],
# all dynamic shapes errors for detectron variants
"demucs", # OOM
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_fcos_r_50_fpn",
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_c4",
"detectron2_maskrcnn_r_50_fpn",
"hf_BigBird", # OOM
"tacotron2", # AssertionError: Deduped args out of bounds
# Huggingface
"BartForConditionalGeneration", # OOM
"DebertaV2ForQuestionAnswering", # OOM
# Torchbench
"speech_transformer", # https://github.com/pytorch/pytorch/issues/99893
"pyhpc_isoneutral_mixing", # https://github.com/pytorch/pytorch/issues/99893
"pyhpc_turbulent_kinetic_energy", # https://github.com/pytorch/pytorch/issues/99893
]
CI_SKIP[CI("aot_eager", training=True)] = [
*CI_SKIP[CI("aot_eager", training=False)],
# TorchBench
"Background_Matting", # fp64_OOM
"hf_T5_base", # fp64_OOM
"mobilenet_v2_quantized_qat", # fp64_OOM
"resnet50_quantized_qat", # fp64_OOM
"pytorch_struct",
# Huggingface
"MBartForConditionalGeneration", # OOM
"M2M100ForConditionalGeneration", # OOM
"XGLMForCausalLM", # OOM
# TIMM
"cait_m36_384", # fp64_OOM
"convit_base", # fp64_OOM
"fbnetv3_b", # Accuracy (blocks.2.2.bn1.weight.grad)
"levit_128", # Accuracy (patch_embed.0.c.weight.grad)
"lcnet_050", # Accuracy (blocks.1.0.bn2.weight.grad)
"sebotnet33ts_256", # Accuracy (stem.conv1.conv.weight.grad)
"xcit_large_24_p8_224", # fp64_OOM,
]
CI_SKIP[CI("inductor", training=False)] = [
# TorchBench
"DALLE2_pytorch", # AttributeError: text_encodings
"demucs", # OOM
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_fcos_r_50_fpn",
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_c4",
"detectron2_maskrcnn_r_50_fpn",
# TorchBench
"detectron2",
"densenet121", # flaky accuracy
"hf_T5", # accuracy
"hf_BigBird", # accuracy
"hf_GPT2_large", # OOM
"maml", # accuracy
"mobilenet_v2_quantized_qat", # The eval test only supports CPU
"pytorch_struct", # Test eval is not implemented
"pyhpc_equation_of_state", # Accuracy
"pyhpc_turbulent_kinetic_energy", # Accuracy
"tacotron2",
]
CI_SKIP[CI("inductor", training=False, device="cpu")] = [
# TorchBench
"drq", # Need to update torchbench
"detectron2_fasterrcnn_r_101_c4",
"detectron2_fasterrcnn_r_101_dc5",
"detectron2_fasterrcnn_r_101_fpn",
"detectron2_fasterrcnn_r_50_c4",
"detectron2_fasterrcnn_r_50_dc5",
"detectron2_fasterrcnn_r_50_fpn",
"detectron2_fcos_r_50_fpn",
"detectron2_maskrcnn_r_101_c4",
"detectron2_maskrcnn_r_101_fpn",
"detectron2_maskrcnn_r_50_c4",
"detectron2_maskrcnn_r_50_fpn",
"doctr_det_predictor", # requires newer gcc
"doctr_reco_predictor", # requires newer gcc
"gat", # does not work with fp32
"gcn", # does not work with fp32
"hf_Bert_large", # OOM
"hf_GPT2_large", # Intermittent failure on CI
"hf_T5_base", # OOM
"mobilenet_v2_quantized_qat",
"pyhpc_turbulent_kinetic_energy",
"resnet50_quantized_qat", # Eager model failed to run(Quantize only works on Float Tensor, got Double)
"sage", # does not work with fp32
# Huggingface
"MBartForConditionalGeneration", # Accuracy https://github.com/pytorch/pytorch/issues/94793
"PLBartForConditionalGeneration", # Accuracy https://github.com/pytorch/pytorch/issues/94794
# TIMM
"cait_m36_384", # Accuracy
"pnasnet5large", # OOM
"xcit_large_24_p8_224", # OOM https://github.com/pytorch/pytorch/issues/95984
"opacus_cifar10", # Fails to run https://github.com/pytorch/pytorch/issues/99201
]
CI_SKIP[CI("inductor", training=True)] = [
*CI_SKIP[CI("inductor", training=False)],
# TorchBench
"Background_Matting", # fp64_OOM
"hf_T5_base", # accuracy
"mobilenet_v3_large", # accuracy
"resnet50_quantized_qat", # Eager model failed to run
"AlbertForQuestionAnswering", # accuracy
"crossvit_9_240", # fails to run on timm 0.8.22 with cudagraphs, mempools
"deit_base_distilled_patch16_224", # fails to run in timm 0.8.22, cudagraphs
"mobilevit_s",
"pit_b_224",
"twins_pcpvt_base",
"visformer_small",
"vit_base_patch16_224",
"xcit_large_24_p8_224",
]
# Skips for dynamic=True
CI_SKIP[CI("aot_eager", training=False, dynamic=True)] = [
*CI_SKIP[CI("aot_eager", training=False)],
"vision_maskrcnn", # accuracy failure on boxes, after https://github.com/pytorch/pytorch/issues/101093
# https://github.com/pytorch/pytorch/issues/103760
"hf_T5_generate",
"hf_Bert", # Error: RelaxedUnspecConstraint(L['input_ids'].size()[0]) - inferred constant (4)
]
CI_SKIP[CI("aot_eager", training=True, dynamic=True)] = [
*CI_SKIP[CI("aot_eager", training=True)],
*CI_SKIP[CI("aot_eager", training=False, dynamic=True)],
"llama", # AssertionError: cannot compute free_symbols of True
"torchrec_dlrm", # RuntimeError: mat1 and mat2 must have the same dtype, but got Float and BFloat16
]
CI_SKIP[CI("inductor", training=False, dynamic=True)] = [
*CI_SKIP[CI("aot_eager", training=False, dynamic=True)],
*CI_SKIP[CI("inductor", training=False)],
"nanogpt_generate", # Assertion `index out of bounds: 0 <= tmp0 < 64` failed.
]
CI_SKIP[CI("inductor", training=True, dynamic=True)] = [
# NB: Intentionally omitting for symmetry with dynamic=False
# *CI_SKIP[CI("aot_eager", training=True, dynamic=True)],
*CI_SKIP[CI("inductor", training=False, dynamic=True)],
*CI_SKIP[CI("inductor", training=True)],
"levit_128", # Accuracy fails on A10G, passes on A100
"sebotnet33ts_256", # Flaky accuracy failed
]
CI_SKIP[CI("inductor", training=False, dynamic=True, device="cpu")] = [
*CI_SKIP[CI("inductor", training=False, device="cpu")],
"pyhpc_isoneutral_mixing",
"dpn107",
]
CI_SKIP_OPTIMIZER = {
# TIMM
"convmixer_768_32", # accuracy
"hrnet_w18", # Stack issue in fx
# HF
"pnasnet5large", # Stack issue in fx
"MobileBertForMaskedLM", # Stack issue in fx
"MobileBertForQuestionAnswering", # Stack issue in fx
"PegasusForConditionalGeneration", # OOM
}
CI_SKIP_DYNAMIC_BATCH_ONLY = {
"sam",
# See https://github.com/mindee/doctr/blob/f2114758d529ed8d3d0030581638f0520b6b98d8/doctr/models/detection/core.py#L89
# It iterates over the batch, which is dynamic, and dynamo chokes
# We should be able to graphbreak there.
"doctr_det_predictor",
"dlrm",
}
def model_specified_by_path(path_and_class_str):
return ":" in path_and_class_str
def load_model_from_path(path_and_class_str):
configs = {}
for kvstr in path_and_class_str.split(","):
k, v = kvstr.split(":")
configs[k] = v
for name in ["path", "class"]:
if name not in configs:
raise RuntimeError(
"Invalid --only arguments. Check help message for the correct format"
)
path = configs["path"]
class_name = configs["class"]
if path[:1] != "/":
raise RuntimeError(
"Use absolute path since dynamo may change the current working directory which makes using relative path tricky"
)
spec = importlib.util.spec_from_file_location("module_name", path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
model_class = getattr(module, class_name)
assert issubclass(model_class, torch.nn.Module)
model = model_class()
assert hasattr(model, "get_example_inputs")
inputs = model.get_example_inputs()
return model, inputs
def output_csv(filename, headers, row):
if os.path.exists(filename):
with open(filename) as fd:
lines = list(csv.reader(fd)) or [[]]
if headers and len(headers) > len(lines[0]):
# if prior results failed the header might not be filled in yet
lines[0] = headers
else:
headers = lines[0]
else:
lines = [headers]
lines.append([(f"{x:.6f}" if isinstance(x, float) else x) for x in row])
with open(filename, "w") as fd:
writer = csv.writer(fd, lineterminator="\n")
for line in lines:
writer.writerow(list(line) + ["0"] * (len(headers) - len(line)))
def nothing(f):
return f
@functools.lru_cache(None)
def patch_torch_manual_seed():
"""Make torch manual seed deterministic. Helps with accuracy testing."""
def deterministic_torch_manual_seed(*args, **kwargs):
from torch._C import default_generator
seed = 1337
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
return default_generator.manual_seed(seed)
torch.manual_seed = deterministic_torch_manual_seed
def synchronize():
pass
def summarize_graph_break(filename):
"""
Sorts and de-dupes the graphs breaks on the reason string. Note that this
function is just a best effort to reduce the logging information. We could
miss some graph breaks because of de-duping. We can further refine this
function as need arises.
"""
log_file = f"{filename.rstrip('.csv')}_graph_breaks.csv"
if os.path.exists(log_file):
df = pd.read_csv(log_file)
df = df.sort_values("reason").drop_duplicates(subset="reason")
# Specialize for multi tensor sgd as reason is not identical
multi_tensor_sgd_row = df.loc[df["reason"].str.contains("_multi_tensor_sgd")]
if len(multi_tensor_sgd_row):
df = df[
~df["reason"].str.contains("_multi_tensor_sgd")
] # Drop all sgd rows
df = pd.concat(
[df, pd.DataFrame([multi_tensor_sgd_row.iloc[0]])], axis=0
) # Add back a single row
df.to_csv(f"{log_file.rstrip('.csv')}_deduped.csv", index=False)
def print_summary(filename, print_dataframe=False):
if not (filename and os.path.exists(filename)):
return
data = pd.read_csv(filename)
if "tag" in data.columns:
for tag in data.tag.unique():
if tag == "0.0000":
continue # This happens for failed runs
print(f"\nSummary for tag={tag}:")
print_summary_table(data[data.tag == tag], print_dataframe=print_dataframe)
else:
print_summary_table(data, print_dataframe=print_dataframe)
summarize_graph_break(filename)
def print_summary_table(data, print_dataframe=False):
if print_dataframe:
pd.options.display.max_rows = 1000
pd.options.display.max_columns = 1000
pd.options.display.width = 2000
print(data)
width = max(map(len, data.columns))
for col in data.columns:
try:
if col in ("dev", "name", "batch_size", "tag"):
continue
elif col in ("pct_ops", "pct_time"):
print(col.ljust(width), f"{data[col].mean():.3%}")
elif col in ("graphs", "graph_calls", "captured_ops", "total_ops"):
print(col.ljust(width), f"{data[col].mean():.3f}")
elif col in ("compilation_latency"):
print(col.ljust(width), f"mean={data[col].mean():.3f} seconds")
elif col in ("compression_ratio"):
print(col.ljust(width), f"mean={data[col].mean():.3f}x")
elif col in ("accuracy"):
pass_rate = (data[col] == "pass").mean()
print(col.ljust(width), f"pass_rate={100*pass_rate:.2f}%")
else:
cdata = data[col]
print(
col.ljust(width),
f"gmean={gmean(cdata):.2f}x mean={cdata.mean():.3f}x",
)
except Exception as e:
pass
def tensor_is_on_xla(tensors):
def visit(x: torch.Tensor):
nonlocal result
if x.device.type == "xla":
result = True
result = False
tree_map_only(torch.Tensor, visit, tensors)
return result
def timed(
model,
model_iter_fn,
example_inputs,
times=1,
return_result=False,
collect_outputs=False,
):
use_xla = tensor_is_on_xla(example_inputs)
synchronize()
if use_xla:
xm.mark_step()
xm.wait_device_ops()
time_total = 0
# Dont collect outputs to correctly measure timing
for _ in range(times):
# Put this call inside the loop to reset the seed for each iteration.
# Don't include reset_rng_state() to correctly measure timing
reset_rng_state(use_xla)
t_iter_begin = time.perf_counter()
result = model_iter_fn(model, example_inputs, collect_outputs=collect_outputs)
# instead of calling sync on result_list, we should call mark_step.
# In training case, result_list may be empty, but we want to
# send all the pending graphs for compilation.
if use_xla:
# For the model running on regular torchxla (baseline), we need the
# mark step to send the accumulated graph for compilation.
#
# For the model running with dynamo/torchxla bridge, in training case,
# we need the mark step to send the optimizer graph out for
# compilation.
xm.mark_step()
t_iter_end = time.perf_counter()
time_total += t_iter_end - t_iter_begin
t_0 = time.perf_counter()
if use_xla:
xm.wait_device_ops()
synchronize()
t_1 = time.perf_counter()
time_total += t_1 - t_0
return (time_total, result) if return_result else time_total
def _normalize_bench_inputs(example_inputs) -> Tuple[Tuple[Any], Mapping[str, Any]]:
# NOTE(bowbao): For huggingface benchmark, example_inputs are formatted as dictionary,
# and consumed like `model(**example_inputs)`.
# For other benchmarks, example_inputs are formatted as tuple and consumed
# like `model(*example_inputs)`.
if isinstance(example_inputs, dict):
return (), example_inputs
else:
return tuple(example_inputs), {}
def _register_dataclass_output_as_pytree(example_outputs) -> None:
# NOTE(angelayi): For huggingface benchmark, some example outputs are
# formatted as a dataclass which pytree cannot consume. So we want
# to register the pytree implementation here
example_outputs_flat, _ = pytree.tree_flatten(example_outputs)
output_dataclass_types = [
type(out) for out in example_outputs_flat if dataclasses.is_dataclass(type(out))
]
for output_type in output_dataclass_types:
from torch._export.utils import register_dataclass_as_pytree_node
register_dataclass_as_pytree_node(output_type)
class Stats:
totals = collections.defaultdict(collections.Counter)
@classmethod
def reset_counters(cls):
for k, v in torch._dynamo.utils.counters.items():
cls.totals[k].update(v)
ok = torch._dynamo.utils.counters["frames"]["ok"]
total = torch._dynamo.utils.counters["frames"]["total"]
torch._dynamo.utils.counters.clear()
return ok, total
@classmethod
def print_summary(cls):
for k, v in sorted(cls.totals.items()):
lines = "\n ".join(map(str, v.most_common(50)))
print(f"STATS {k}\n {lines}")
@classmethod
def aot_summary(cls):
return [cls.totals["aot_autograd"]["total"], cls.totals["aot_autograd"]["ok"]]
def coverage_experiment(args, model_iter_fn, model, example_inputs):
"""
Test operator/model coverage of TorchDynamo and record statistics
taken from a profiler. This target is mainly intended to check
correctness.
Writes to ./coverage.csv
"""
profiler = Profiler()
frozen_model_iter_fn = torch._dynamo.run(model_iter_fn)
with profiler.prof:
frozen_model_iter_fn(model, example_inputs)
coverage_result = profiler.results()
output_csv(
output_filename,
(
"dev",
"name",
"batch_size",
"graphs",
"graph_calls",
"captured_ops",
"total_ops",
"pct_ops",
"pct_time",
),
[
current_device,
current_name,
current_batch_size,
]
+ coverage_result.tocsv(),
)
return coverage_result
def speedup_experiment_fx2trt(args, model_iter_fn, model, example_inputs):
"""
Measure speedups over eager using the trt inference backend. TRT backend is based fx graph
generated by torch._dynamo.
Writes to ./speedups_fx2trt.csv
"""
return speedup_experiment(args, model_iter_fn, model, example_inputs)
def recompile_profiler_experiment(args, model_iter_fn, model, example_inputs):
with torch._dynamo.utils.CompileProfiler() as prof:
opt_model_iter_fn = torch._dynamo.optimize(prof, nopython=args.nopython)(
model_iter_fn
)
opt_model_iter_fn(model, example_inputs)
output_csv(
output_filename, ["model", "profiler report"], [current_name, prof.report()]
)
met = prof.get_metrics()
guard_failures = len(met["guard_failures"])
return [guard_failures]
def randomize_input(inputs):
if isinstance(inputs, (list, tuple)):
return type(inputs)([randomize_input(x) for x in inputs])
elif isinstance(inputs, torch.Tensor):
if inputs.dtype in (torch.float32, torch.float64):
torch._dynamo.utils.counters["randomize_input"]["times"] += 1
return torch.randn_like(inputs)
elif inputs.dtype == torch.int64:
# Note: we can not simply tune integer tensors as follows
# `return torch.randint_like(inputs, high=inputs.max().item())`
# This may break some invariants between tensors.
# E.g. in embedding lookup case, one tensor is the length
# and another is an indices tensor.
return inputs
else:
raise RuntimeError(
f"randomize_input need support tensor of type {inputs.dtype}"
)
else:
raise RuntimeError(
f"randomize_input can not handle input of type {type(inputs)}"
)
def maybe_mark_step(args):
if args.trace_on_xla:
xm.mark_step()
def speedup_experiment(args, model_iter_fn, model, example_inputs, **kwargs):
"""
Measure speedups over eager.
Writes to ./speedups.csv
"""
# if args.dynamic_shapes:
# return speedup_experiment_ds(args, model_iter_fn, model, example_inputs)
timings = np.zeros((args.repeat, 2), np.float64)
# if we randomize the input, we should also check the result is correct
should_check_result = should_randomize_input = args.randomize_input
import contextlib
from torch._inductor.utils import maybe_profile
@contextlib.contextmanager
def maybe_mark_profile(*args, **kwargs):
prof: torch.profiler.profile = kwargs.pop("p", None)
mark = kwargs.pop("mark", None)
if prof:
with torch.profiler.record_function(mark):
yield
else:
yield
times = args.iterations_per_run
# Use higher tolerance for XLA since XLA cause numerical unstability when
# graph size changes
tolerance = args.xla_tolerance if args.trace_on_xla else 1e-4
torch._dynamo.config.repro_tolerance = tolerance
with maybe_profile(args.export_profiler_trace) as p:
if args.export_aot_inductor:
frozen_model_iter_fn = export_aot_inductor(model_iter_fn)
else:
frozen_model_iter_fn = torch._dynamo.run(model_iter_fn)
for rep in trange(args.repeat, desc="running benchmark"):
inputs = (
randomize_input(copy.deepcopy(example_inputs))
if should_randomize_input
else example_inputs
)
# need call mark_step to perform the computation
# on randomize_input. Otherwise the first call using the
# inputs will incur high penalty then the next one.
maybe_mark_step(args)
# interleave the runs to handle frequency scaling and load changes
with maybe_mark_profile(p=p, mark="expected"):
timings[rep, 0], expected_output = timed(
model,
model_iter_fn,
inputs,
return_result=True,
times=times,
collect_outputs=args.collect_outputs,
)
# call mark_step between the 2 calls to make the comparison fair.
maybe_mark_step(args)
with maybe_mark_profile(p=p, mark="actual"):
timings[rep, 1], actual_output = timed(
model,
frozen_model_iter_fn,
inputs,
return_result=True,
times=times,
collect_outputs=args.collect_outputs,
)
if should_check_result:
is_correct = is_correct and same(
expected_output, actual_output, tol=tolerance
)
if args.export_profiler_trace:
name = args.profiler_trace_name + "_" + model.name + ".json"
name = os.path.join(torch._dynamo.config.base_dir, name)
p.export_chrome_trace(name)
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
if args.dump_raw_metrics:
np.save(
f"{output_filename[:-4]}-raw_timings-{current_name}-{current_device}.npy",
timings,
)
first_headers = ["dev", "name", "batch_size"]
first_fields = [current_device, current_name, current_batch_size]
if "tag" in kwargs:
first_headers.append("tag")
first_fields.append(kwargs["tag"])
headers = first_headers + ["speedup", "abs_latency"]
row = first_fields + [float(speedup), median[1] * 1000]
msg = f"{speedup:.3f}x"
if args.baseline:
headers.extend(
[
"baseline",
"speedup_vs_baseline",
]
)
df = pd.read_csv(args.baseline)
try:
baseline_speedup = df[df["name"] == current_name]["speedup"].item()
row.extend([baseline_speedup, speedup / baseline_speedup])
msg = f"{baseline_speedup:.3f}x -> {speedup:.3f}x [{speedup / baseline_speedup:.3f}x]"
except (KeyError, ZeroDivisionError):
row.extend(
[
0.0,
0.0,
]
)
if "compilation_latency" in kwargs:
headers += [
"compilation_latency",
"compression_ratio",
"eager_peak_mem",
"dynamo_peak_mem",
]
row.append(kwargs["compilation_latency"])
row.append(kwargs["compression_ratio"])
row.append(kwargs["eager_peak_mem"])
row.append(kwargs["dynamo_peak_mem"])
if "dynamo_stats" in kwargs:
for k, v in kwargs["dynamo_stats"].items():
headers.append(k)
row.append(v)
output_csv(
output_filename,
headers,
row,
)
headers, data = torch._dynamo.utils.compile_times(repr="csv", aggregate=True)
assert (
output_filename.find(".csv") > 0
), f"expected output_filename to be a .csv, but got {output_filename}"
output_csv(
output_filename[:-4] + "_compilation_metrics.csv",
first_headers + headers,
first_fields + data,
)
return msg
def speedup_experiment_ds(args, model_iter_fn, model, example_inputs):
"""
Run dynamic shapes benchmarks.
Requires dynamic shape compatible models, which provide a list of example inputs.
Warms up using the first input example and then iterates the inputs,
measuring (and expecting minimal) variance between the runtime for different examples.
"""
timings = np.zeros((args.repeat, len(example_inputs), 2), np.float64)
if args.repeat > 5:
print(
f"\ndynamic shapes experiments are slow, consider setting --repeat less than {args.repeat}\n"
)
nwarmup = 4
for rep in range(args.repeat):
# Start each rep fresh, e.g. only warmup on example 0
torch._dynamo.reset()
optimized_model_iter_fn = optimize_ctx(model_iter_fn)
for _ in range(nwarmup):
optimized_model_iter_fn(model, example_inputs[0])
for input_idx, inputs in enumerate(example_inputs):
# interleave the runs to handle frequency scaling and load changes
timings[rep, input_idx, 0] = timed(
model, model_iter_fn, inputs, return_result=False
)
# different from regular speedup_experiment, we _DO_ want to allow recompilation
timings[rep, input_idx, 1] = timed(
model, optimized_model_iter_fn, inputs, return_result=False
)
medians = np.median(timings, axis=0)
speedups = list(medians[:, 0] / medians[:, 1])
speedups_mean = np.mean(speedups)
speedups_median = np.median(speedups)
speedups_var = np.var(speedups)
# TODO this x[0] is not going to work in general but bert only has 1 input
shapes = [x[0].shape for x in example_inputs]
shape_keys = sorted(set(shapes))
shape_speedups = {
shape: [
it[1] for it in filter(lambda it: it[0] == shape, zip(shapes, speedups))
]
for shape in shape_keys
}
output_str = (
f"mean: {speedups_mean:.3f}, median: {speedups_median:.3f}, var: {speedups_var:.3f}"
+ "\nSpeedups by shape: "
+ "\n".join(
[
f"{shape}: "
+ ", ".join([f"{speedup: .3g}" for speedup in shape_speedups[shape]])
for shape in shape_keys
]
)
)
output_csv(
output_filename,
("dev", "name", "batch_size", "speedup mean", "speedup median", "speedup var"),
[
current_device,
current_name,
current_batch_size,
speedups_mean,
speedups_median,
speedups_var,
],
)
return output_str
def speedup_experiment_onnx(
onnx_model_cls: Type[OnnxModelFromTorchScript],
args,
model_iter_fn,
model,
example_inputs,
**kwargs,
):
"""
Measure speedups over eager.
This function is responsible for the following:
1. Creation of OnnxModel, which handles export, ort initialization.
2. Creating iobinding with OnnxModel if device is CUDA, which is essential for perf measurement.
3. Running ORT with OnnxModel.
Writes to ./{output_filename}, which should be
`pathlib.Path(self.output_dir) / f"{self.compiler}_{suite}_{self.dtype}_{self.mode}_{self.device}_{self.testing}.csv".
TODO(bowbao): Record export time and export peak memory usage.
"""
timings = np.zeros((args.repeat, 2), np.float64)
is_correct = True
should_randomize_input = args.randomize_input
times = args.iterations_per_run
onnx_model = onnx_model_cls(
args.output_directory or ".", model, copy.deepcopy(example_inputs)
)
def create_onnx_input_binded_fn(
onnx_model: OnnxModelFromTorchScript, pt_inputs, example_outputs
):
# Goal is to move the iobinding creation outside of the timer function.
iobinding, outputs = onnx_model.create_iobinding(pt_inputs, example_outputs)
def onnxrt_model_iter_fn(model, inputs, collect_outputs=True):
onnx_model.run_with_iobinding(iobinding, outputs)
if collect_outputs:
return outputs
return onnxrt_model_iter_fn
def create_onnx_fn(onnx_model: OnnxModelFromTorchScript, pt_inputs):
def onnxrt_model_iter_fn(model, inputs, collect_outputs=True):
return onnx_model.run(pt_inputs)
return onnxrt_model_iter_fn
for rep in range(args.repeat):
inputs = (
randomize_input(copy.deepcopy(example_inputs))
if should_randomize_input
else example_inputs
)
timings[rep, 0], expected_output = timed(
model,
model_iter_fn,
inputs,
return_result=True,
times=times,
collect_outputs=args.collect_outputs,
)
if current_device == "cpu":
onnxrt_model_iter_fn = create_onnx_fn(onnx_model, inputs)
else:
onnxrt_model_iter_fn = create_onnx_input_binded_fn(
onnx_model, inputs, expected_output
)
timings[rep, 1], actual_output = timed(
model,
onnxrt_model_iter_fn,
inputs,
return_result=True,
times=times,
collect_outputs=args.collect_outputs,
)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
if args.dump_raw_metrics:
np.save(
f"{output_filename[:-4]}-raw_timings-{current_name}-{current_device}.npy",
timings,
)
headers = ["dev", "name", "batch_size", "speedup", "abs_latency"]
row = [
current_device,
current_name,
current_batch_size,
float(speedup),
median[1] * 1000,
]
if "compilation_latency" in kwargs:
headers = headers + ["compilation_latency", "compression_ratio"]
row.append(kwargs["compilation_latency"])
row.append(kwargs["compression_ratio"])
output_csv(
output_filename,
headers,
row,
)
headers, data = torch._dynamo.utils.compile_times(repr="csv", aggregate=True)
assert (
output_filename.find(".csv") > 0
), f"expected output_filename to be a .csv, but got {output_filename}"
output_csv(
output_filename[:-4] + "_compilation_metrics.csv",
["dev", "name", "batch_size"] + headers,
[current_device, current_name, current_batch_size] + data,
)
return format_speedup(speedup, pvalue, is_correct=is_correct)
def overhead_experiment(*args, model_iter_fn):
"""
Measure overheads of TorchDynamo by running with no backend (only
eager+FX), and reporting speedup/slowdown over eager.
Writes to ./overheads.csv
"""
return speedup_experiment(*args, model_iter_fn)
def print_fx(gm, example_inputs):
print(gm.graph)
return gm
def print_aten_ops(gm, example_inputs):
from functorch.compile import aot_module
def trace_printer(gm, _):
print(gm.graph)
return gm
return aot_module(gm, fw_compiler=trace_printer, bw_compiler=trace_printer)
def baselines(models, model_iter_fn, example_inputs, args):
"""
Common measurement code across all baseline experiments.
"""
models = list(models)
for idx, (name, model) in enumerate(models):
if idx == 0:
result0 = model_iter_fn(model, example_inputs)
elif model is not None:
try:
result = model_iter_fn(model, example_inputs)
if same(result0, result):
continue
print(name, "is INCORRECT")
except Exception:
log.exception("error checking %s", name)
models[idx] = (name, None)
timings = np.zeros((args.repeat, len(models)), np.float64)
timings.fill(1.0e10)
for rep in range(args.repeat):
for idx, (name, model) in enumerate(models):
if model is not None:
try:
timings[rep, idx] = timed(model, model_iter_fn, example_inputs)
except Exception:
pass
pvalue = [
ttest_ind(timings[:, 0], timings[:, i]).pvalue
for i in range(1, timings.shape[1])
]
median = np.median(timings, axis=0)
speedup = median[0] / median[1:]
for idx, (name, model) in enumerate(models[1:]):
if model is None:
speedup[idx] = 0.0
result = " ".join(
[
format_speedup(s, p, m is not None)
for s, p, m in zip(speedup, pvalue, [m for n, m in models[1:]])
]
)
output_csv(
output_filename,
("dev", "name", "batch_size") + tuple(n for n, m in models[1:]),
[current_device, current_name, current_batch_size]
+ [f"{x:.4f}" for x in speedup],
)
return result
def xla(args, model_iter_fn, model, example_inputs):
xla_dev = xm.xla_device(devkind=current_device)
model_xla = copy.deepcopy(model).to("cpu").to(device=xla_dev)
example_inputs_xla = tree_map_only(
torch.Tensor, lambda x: x.to("cpu").to(device=xla_dev), example_inputs
)
for _ in range(3): # warmup
timed(model, model_iter_fn, example_inputs)
timed(model_xla, model_iter_fn, example_inputs_xla)
timings = np.zeros((args.repeat, 2), np.float64)
timings.fill(1.0e10)
for rep in range(args.repeat):
timings[rep, 0] = timed(model, model_iter_fn, example_inputs)
timings[rep, 1] = timed(model_xla, model_iter_fn, example_inputs_xla)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
time_baseline, time_xla = np.median(timings, axis=0)
speedup = time_baseline / time_xla
output_csv(
output_filename,
("dev", "name", "batch_size", "speedup", "time_baseline", "time_xla"),
[
current_device,
current_name,
current_batch_size,
speedup,
time_baseline,
time_xla,
],
)
return format_speedup(speedup, pvalue)
def try_script(model, example_inputs):
try:
return torch.jit.script(model)
except Exception:
return None
class AOTInductorModelCache:
cache = dict()
@classmethod
def load(cls, model, example_inputs, eager_forward):
key = id(model)
if key not in cls.cache:
# Register the output dataclass to pytree
example_outputs = eager_forward(
copy.deepcopy(model), clone_inputs(example_inputs)
)
_register_dataclass_output_as_pytree(example_outputs)
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
example_inputs = torch._export.combine_args_kwargs(
example_args, example_kwargs
)
so_path, exported = torch._export.aot_compile(
model, example_args, example_kwargs
)
output_node = list(exported.graph.nodes)[-1]
output_tensors = [
torch.empty(
node.meta["val"].size(),
dtype=node.meta["val"].dtype,
layout=node.meta["val"].layout,
device=node.meta["val"].device,
)
for node in output_node.args[0]
]
# Use a utility function for easier benchmarking
source = """
#include <torch/csrc/inductor/aot_runtime/model.h>
torch::aot_inductor::AOTInductorModel model;
void run(
const std::vector<at::Tensor>& input_tensors,
std::vector<at::Tensor>& output_tensors) {
model.run(input_tensors, output_tensors, at::cuda::getCurrentCUDAStream());
}
"""
module = torch.utils.cpp_extension.load_inline(
name="aot_inductor",
cpp_sources=[source],
functions=["run"],
extra_ldflags=[so_path],
with_cuda=True,
)
value = {
"module": module,
"exported": exported,
"output_tensors": output_tensors,
"output_spec": exported.call_spec.out_spec,
}
cls.cache[key] = value
return (
cls.cache[key]["module"],
cls.cache[key]["exported"],
cls.cache[key]["output_tensors"],
cls.cache[key]["output_spec"],
)
def export_aot_inductor(forward: Callable):
eager_forward = forward
def opt_aot_inductor(model, example_inputs, collect_outputs=False):
module, exported, output_tensors, output_spec = AOTInductorModelCache.load(
model, example_inputs, eager_forward
)
param_buffer_values = list(exported.state_dict.values())
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
example_inputs = torch._export.combine_args_kwargs(example_args, example_kwargs)
flat_example_inputs = fx_pytree.tree_flatten_spec(
example_inputs, exported.call_spec.in_spec
)
all_args = (*param_buffer_values, *flat_example_inputs)
module.run(all_args, output_tensors)
return pytree.tree_unflatten(output_tensors, output_spec)
return opt_aot_inductor
def download_retry_decorator(download_fn):
"""
Decorator function for applying retry logic to a download function.
The wrapped function will be called up to 5 times and raises an exception if the function fails each time.
After each unsuccessful attempt, there is a delay before the next attempt, which is increased linearly with the number of tries.
Usage:
@download_retry_decorator
def download_function(model_name: str):
# download logic goes here
"""
@functools.wraps(download_fn)
def wrapper(self, *args, **kwargs) -> Any:
tries = 0
total_allowed_tries = MAX_DOWNLOAD_ATTEMPTS
while tries <= total_allowed_tries:
try:
model = download_fn(self, *args, **kwargs)
return model
except Exception as e:
tries += 1
if tries <= total_allowed_tries:
wait = tries * 30
print(
f"Failed to load model: {e}. Trying again ({tries}/{total_allowed_tries}) after {wait}s"
)
time.sleep(wait)
else:
raise RuntimeError(
f"Failed to load model '{args}' with following error(s): {str(e)}."
)
return wrapper
class OnnxModelFromTorchScript:
"""TorchScript based onnx export. `torch.onnx.export`
TODO(bowbao):
* large model export failed.
Onnx Model is larger than 2GB, but exporter makes decision based pt model size, which is
smaller than 2GB.
* OOM on slightly larger model.
Both pt model and ort inference session are on gpu. Attempt has been made to move ORT to
cuda:1, however ORT perf drop significantly.
For now running everything with batch_size 1 set in launch script.
"""
TORCH_TO_NUMPY_DTYPE = {
torch.float16: np.float16,
torch.float32: np.float32,
torch.float64: np.float64,
torch.uint8: np.uint8,
torch.int8: np.int8,
torch.int16: np.int16,
torch.int32: np.int32,
torch.int64: np.longlong,
torch.bool: np.bool_,
}
def __init__(self, output_directory, model, example_inputs):
self.model_path = self._generate_onnx_model_path(output_directory)
self._export(
model,
example_inputs,
self.model_path,
opset_version=17,
do_constant_folding=False,
verbose=False,
)
self.onnx_session = self._init_ort_session(self.model_path)
def _generate_onnx_model_path(
self, output_directory: str, onnx_model_folder_name: str = "bench_onnx_models"
) -> str:
# Hack to get model name.
from torch._functorch import aot_autograd
model_name = aot_autograd.model_name
model_path = pathlib.Path(output_directory, onnx_model_folder_name, model_name)
if model_path.exists() and model_path.is_dir():
shutil.rmtree(model_path)
model_path.mkdir(parents=True, exist_ok=True)
return str(model_path / "model.onnx")
def _export(self, model, example_inputs, output_path: str, /, **kwargs) -> None:
# Hack for huggingface models (kwargs only).
if isinstance(example_inputs, dict):
class WrapperModel(torch.nn.Module):
def __init__(self, model, keys):
super().__init__()
self.model = model
self.keys = keys
def forward(self, *args):
return self.model(**dict(zip(self.keys, args)))
model = WrapperModel(model, list(example_inputs.keys()))
torch.onnx.export(
model,
self.format_pt_inputs(example_inputs),
output_path,
**kwargs,
)
def _init_ort_session(self, model_path: str):
import onnxruntime
if current_device == "cpu":
ort_providers = ["CPUExecutionProvider"]
else:
# NOTE(bowbao): Reduce OOM by running ORT on another gpu.
# TODO(bowbao): This works to avoid OOM, but performance is surprisingly very bad.
# cuda_provider_options = {
# "device_id": 1 if torch.cuda.device_count() > 1 else 0,
# }
# ort_providers = [("CUDAExecutionProvider", cuda_provider_options)]
ort_providers = ["CUDAExecutionProvider"]
ort_session = onnxruntime.InferenceSession(
self.model_path,
providers=ort_providers,
)
return ort_session
def format_pt_inputs(self, pt_inputs):
# NOTE(bowbao): For huggingface benchmark, pt_inputs are formatted as dictionary,
# and consumed like `model(**pt_inputs)`.
# For other benchmarks, pt_inputs are formatted as tuple and consumed
# like `model(*pt_inputs)`.
if isinstance(pt_inputs, dict):
pt_inputs = list(pt_inputs.values())
if isinstance(pt_inputs, torch.Tensor):
pt_inputs = (pt_inputs,)
return tuple(arg.contiguous() for arg in pt_inputs)
def format_pt_outputs(self, pt_outputs):
if isinstance(pt_outputs, torch.Tensor):
pt_outputs = (pt_outputs,)
pt_outputs, _ = pytree.tree_flatten(pt_outputs)
# Hack for huggingface model outputs
try:
from transformers import modeling_outputs
except ImportError:
pass
else:
def _to_tuple(x):
if isinstance(x, modeling_outputs.ModelOutput):
return x.to_tuple()
return x
pt_outputs = pytree.tree_map(_to_tuple, pt_outputs)
pt_outputs, _ = pytree.tree_flatten(pt_outputs)
return pt_outputs
def create_outputs(self, *example_outputs):
return tuple(torch.empty_like(x) for x in example_outputs)
def create_iobinding(self, pt_inputs, example_outputs):
pt_inputs = self.format_pt_inputs(pt_inputs)
example_outputs = self.format_pt_outputs(example_outputs)
iobinding = self.onnx_session.io_binding()
args = [arg.contiguous() for arg in pt_inputs]
for ort_input, arg in zip(self.onnx_session.get_inputs(), args):
# NOTE: Small hack to reduce OOM issue by running ORT on another device.
# Disabled due to ORT perf regression.
# if torch.cuda.device_count() > 1:
# arg = arg.detach().to("cuda:1")
device = arg.device
iobinding.bind_input(
ort_input.name,
device.type,
device.index or 0,
self.TORCH_TO_NUMPY_DTYPE[arg.dtype],
arg.size(),
arg.data_ptr(),
)
outputs = self.create_outputs(*example_outputs)
for ort_output, output in zip(self.onnx_session.get_outputs(), outputs):
# if torch.cuda.device_count() > 1:
# output = output.detach().to("cuda:1")
device = output.device
iobinding.bind_output(
ort_output.name,
device.type,
device.index or 0,
self.TORCH_TO_NUMPY_DTYPE[output.dtype],
output.size(),
output.data_ptr(),
)
return iobinding, outputs
def run_with_iobinding(self, iobinding, outputs):
# 'outputs' are torch empty tensors binded to 'iobinding'.
self.onnx_session.run_with_iobinding(iobinding)
return outputs
def run(self, pt_inputs):
# NOTE: For CUDA performance testing, use `run_with_iobinding` to exclude memory
# copying overhead for inputs/outputs between cpu and gpu.
# Otherwise perf number is inaccurate.
pt_inputs = self.format_pt_inputs(pt_inputs)
onnx_inputs = {
ort_input.name: pt_input.cpu().numpy()
for ort_input, pt_input in zip(self.onnx_session.get_inputs(), pt_inputs)
}
ort_outputs = self.onnx_session.run(None, onnx_inputs)
pt_outputs = [
torch.from_numpy(ort_output).to(current_device)
for ort_output in ort_outputs
]
if len(pt_outputs) == 1:
return pt_outputs[0]
return pt_outputs
class OnnxModelFromDynamo(OnnxModelFromTorchScript):
"""Dynamo and Fx based export. `torch.onnx.dynamo_export`."""
def __init__(self, output_directory, model, example_inputs):
self.model_path = self._generate_onnx_model_path(
output_directory, "bench_dynamo_onnx_model"
)
self._export_output = self._export(model, example_inputs, self.model_path)
self.onnx_session = self._init_ort_session(self.model_path)
def _export(
self, model, example_inputs, output_path: str
) -> torch.onnx.ExportOutput:
example_args, example_kwargs = _normalize_bench_inputs(example_inputs)
options = torch.onnx.ExportOptions()
export_output = torch.onnx.dynamo_export(
model, *example_args, **example_kwargs, export_options=options
)
export_output.save(output_path)
return export_output
def format_pt_inputs(self, pt_inputs):
pt_args, pt_kwargs = _normalize_bench_inputs(pt_inputs)
return self._export_output.adapt_torch_inputs_to_onnx(*pt_args, **pt_kwargs)
def format_pt_outputs(self, pt_outputs):
return self._export_output.adapt_torch_outputs_to_onnx(pt_outputs)
def optimize_onnx_ctx(
output_directory: str,
onnx_model_cls: Type[OnnxModelFromTorchScript],
run_n_iterations: Callable,
) -> Callable:
# NOTE(bowbao): This function creates and returns the onnx version of 'run_n_iterations',
# which does the following:
# 1. Export and cache model.
# 2. Create iobinding for ORT.
# 3. Run ORT for n iterations.
onnx_model: Optional[OnnxModelFromTorchScript] = None
def run_n_iterations_onnx(model, inputs, n=2):
from _onnx import reporter
from torch.onnx._internal import exporter
from torch.onnx._internal.fx import diagnostics
# NOTE(bowbao): Capture all export & ort errors and diagnostics.
# Serialize to csv, to be parsed and summarized later by '._onnx/reporter.py'.
# TODO: Accuracy mismatch is not reported here in csv.
assert (
output_filename.find(".csv") > 0
), f"expected output_filename to be a .csv, but got {output_filename}"
output_error_filename = output_filename[:-4] + "_export_error.csv"
parser = reporter.ExportErrorParser(
current_device, current_name, current_batch_size
)
try:
nonlocal onnx_model
if onnx_model is None:
onnx_model = onnx_model_cls(
output_directory, model, copy.deepcopy(inputs)
)
for _ in range(n - 1):
onnx_model.run(inputs)
return onnx_model.run(inputs)
except exporter.OnnxExporterError as e:
# `torch.onnx.dynamo_export` raises error that encloses diagnostics.
diagnostic_context = e.diagnostic_context
for parsed_error in parser.parse_diagnostic_context(diagnostic_context):
output_csv(
output_error_filename, parsed_error.headers, parsed_error.row
)
# Check also the raw exception that caused export failure.
# Skip if it is already analyzed by diagnostics.
cause_of_exception = e.__cause__
if not isinstance(
cause_of_exception, diagnostics.RuntimeErrorWithDiagnostic
):
parsed_error = parser.parse_exception(cause_of_exception)
output_csv(
output_error_filename, parsed_error.headers, parsed_error.row
)
raise
except Exception as e:
# `torch.onnx.export` errors.
# ORT errors.
parsed_error = parser.parse_exception(e)
output_csv(output_error_filename, parsed_error.headers, parsed_error.row)
raise
return run_n_iterations_onnx
def read_batch_size_from_file(args, filename, model_name):
batch_size = None
if os.path.exists("benchmarks"):
filename = os.path.join("benchmarks", filename)
assert os.path.exists(filename), filename
with open(filename) as f:
lines = f.readlines()
lines = [i.split(",") for i in lines if len(i.strip()) > 0]
for val in lines:
cur_name, b = val
if model_name == cur_name:
batch_size = int(b)
if batch_size is None:
log.warning("Could not find batch size for %s", model_name)
elif batch_size == -1:
raise RuntimeError(
f"Batch size is unset for {model_name} in {args.batch_size_file}"
)
print(f"batch size: {batch_size}")
return batch_size
class TimeOutException(Exception):
pass
def alarm_handler(signum, frame):
raise TimeOutException()
def exit_after(s):
"""
Decorator to raise TimeoutException if the fn is taking more than s seconds
to run.
"""
def outer(fn):
def inner(*args, **kwargs):
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(s)
try:
result = fn(*args, **kwargs)
finally:
signal.alarm(0)
return result
return inner
return outer
def get_peak_memory():
return torch.cuda.max_memory_allocated() / 10**9
def null_experiment(args, model_iter_fn, model, example_inputs):
"""
A no-op experiment useful for making sure TorchBenchark alone works properly.
"""
return []
def cast_to(dtype, model, inputs):
# cast model and inputs to fp16
if dtype == torch.float16:
model = model.half()
else:
model = model.to(dtype)
inputs = tree_map(
lambda x: x.to(dtype)
if isinstance(x, torch.Tensor) and x.is_floating_point()
else x,
inputs,
)
return model, inputs
def cast_to_bf16(model, inputs):
return cast_to(torch.bfloat16, model, inputs)
def cast_to_fp16(model, inputs):
return cast_to(torch.float16, model, inputs)
def cast_to_fp64(model, inputs):
return cast_to(torch.float64, model, inputs)
def cast_to_fp32(model, inputs):
return cast_to(torch.float32, model, inputs)
def reset_rng_state(use_xla=False):
torch.manual_seed(1337)
random.seed(1337)
np.random.seed(1337)
if use_xla:
xm.set_rng_state(1337, str(xm.xla_device()))
class DummyGradScaler:
def scale(self, loss):
return loss
def get_dynamo_stats():
# TODO: consider deepcopy'ing the entire counters struct and
# adding a helper to do subtraction on it
return collections.Counter(
{
"calls_captured": torch._dynamo.utils.counters["stats"]["calls_captured"],
"unique_graphs": torch._dynamo.utils.counters["stats"]["unique_graphs"],
"graph_breaks": sum(torch._dynamo.utils.counters["graph_break"].values()),
# NB: The plus removes zero counts
"unique_graph_breaks": len(+torch._dynamo.utils.counters["graph_break"]),
}
)
def maybe_fresh_cache(fn, is_cold_start):
def inner(*args, **kwargs):
cache_minder = contextlib.nullcontext()
if is_cold_start:
cache_entries = {}
cache_minder = fresh_inductor_cache(cache_entries)
try:
with cache_minder:
return fn(*args, **kwargs)
finally:
dump_cache = False
if dump_cache and is_cold_start:
output_csv(
output_filename[:-4] + "_triton_cache.csv",
["dev", "name", "batch_size", "triton_cache"],
[
current_device,
current_name,
current_batch_size,
cache_entries,
],
)
return inner
@contextmanager
def maybe_init_distributed(should_init_distributed, rank, world_size, port="6789"):
try:
if should_init_distributed:
torch.cuda.set_device(rank)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = port
torch.distributed.init_process_group(
"nccl", rank=rank, world_size=world_size
)
yield
finally:
if should_init_distributed:
torch.distributed.destroy_process_group()
class BenchmarkRunner:
def __init__(self):
self.model_iter_fn = None
self.grad_scaler = DummyGradScaler()
self.autocast = contextlib.nullcontext
self.optimizer = None
self._args = None
def setup_amp(self):
if self.args.only in self.fp32_only_models:
return
if self.args.amp and self.args.devices == ["cuda"]:
# AMP training can lead to small loss values which can undeflow
# gradient values returning in zero gradients. To solve this
# problem, PyTorch introduces GradScaler. GradScaler is a stateful
# structure, that scales the loss values to prevent underflow. Loss
# values are big at the beginning of training (therefore not
# requiring scaling), while loss value tends to be small as network
# starts getting better (requiring scaling). GradScaler manages all
# of this fine tuning, checking the gradients are turning to inf,
# discarding such batches.
# Since we are not running a long iteration, default value of
# init_scale 65536 is going to turn all gradients to inf. Therefore,
# we just use a init_scale of 2.0 for benchmarking purpose.
# Disabling Gradscaler because
# 1) Benchmark setup runs 2 iterations of fwd-bwd. So, not useful.
# 2) Current setup shares grad_scaler for eager and dynamo model,
# which is bad as Gradscaler has state and can adjust the scaling
# factor between eager and dynamo run, making accuracy check
# harder.
# self.grad_scaler = torch.cuda.amp.GradScaler(init_scale=2.0)
self.autocast = torch.cuda.amp.autocast
elif (self.args.bfloat16 or self.args.amp) and self.args.devices == ["cpu"]:
self.autocast = torch.cpu.amp.autocast
def init_optimizer(self, name, device, params):
if device == "cuda" and self.args.training and name not in CI_SKIP_OPTIMIZER:
self.optimizer = torch.optim.SGD(params, lr=0.01, foreach=True)
else:
self.optimizer = None
@property
def args(self):
return self._args
@args.setter
def args(self, args):
self._args = args
@property
def skip_models(self):
return set()
@property
def skip_models_for_cuda(self):
return set()
@property
def skip_models_for_cpu(self):
return set()
@property
def slow_models(self):
return set()
@property
def very_slow_models(self):
return set()
@property
def non_deterministic_models(self):
return set()
@property
def fp32_only_models(self):
return set()
@property
def force_amp_for_fp16_bf16_models(self):
return set()
@property
def skip_not_suitable_for_training_models(self):
return set()
@property
def failing_torchinductor_models(self):
return set()
@property
def failing_fx2trt_models(self):
return set()
@property
def skip_accuracy_checks_large_models_dashboard(self):
return set()
@property
def skip_accuracy_check_as_eager_non_deterministic(self):
return set()
@property
def get_tolerance_and_cosine_flag(self, is_training, current_device, name):
raise NotImplementedError()
@property
def equal_nan(self):
equal_nan = True
if self.args.float32:
equal_nan = False
return equal_nan
def iter_models(self, args):
for model_name in self.iter_model_names(args):
for device in args.devices:
try:
yield self.load_model(
device,
model_name,
batch_size=args.batch_size,
)
except NotImplementedError:
continue # bad benchmark implementation
def deepcopy_model(self, model):
return copy.deepcopy(model)
def cast_based_on_args(self, model, example_inputs):
if self.args.float32 or self.args.only in self.fp32_only_models:
if not self.args.float32:
log.warning("Model %s supports float32 only", self.args.only)
model, example_inputs = cast_to_fp32(model, example_inputs)
elif self.args.float16:
if self.args.only in self.force_amp_for_fp16_bf16_models:
log.warning(
"Model %s does not support float16, running with amp instead",
self.args.only,
)
self.args.amp = True
self.setup_amp()
else:
model, example_inputs = cast_to_fp16(model, example_inputs)
elif self.args.bfloat16:
if self.args.only in self.force_amp_for_fp16_bf16_models:
log.warning(
"Model %s does not support bfloat16, running with amp instead",
self.args.only,
)
self.args.amp = True
self.setup_amp()
else:
model, example_inputs = cast_to_bf16(model, example_inputs)
return model, example_inputs
def validate_model(self, model, example_inputs):
"""
Runs the eager model with example inputs to ensure that eager passes.
"""
model = self.deepcopy_model(model)
example_inputs = clone_inputs(example_inputs)
model, example_inputs = self.cast_based_on_args(model, example_inputs)
try:
self.model_iter_fn(model, example_inputs)
except Exception as e:
raise NotImplementedError("Eager model failed to run") from e
def maybe_cast(self, model, example_inputs):
model = self.deepcopy_model(model)
example_inputs = clone_inputs(example_inputs)
model, example_inputs = self.cast_based_on_args(model, example_inputs)
return model, example_inputs
def decay_batch_exp(self, batch_size, factor=0.5, divisor=2):
out_batch_size = batch_size * factor
if out_batch_size > divisor:
out_batch_size = (out_batch_size + 1) // divisor * divisor
else:
out_batch_size = batch_size - 1
return max(0, int(out_batch_size))
def batch_size_finder(self, device, model_name, initial_batch_size=1024):
batch_size = initial_batch_size
while batch_size >= 1:
torch.cuda.empty_cache()
try:
device, name, model, example_inputs, _ = self.load_model(
device,
model_name,
batch_size,
)
self.model_iter_fn(model, example_inputs)
return batch_size
except RuntimeError as e:
error_str = str(e)
if "channels_last" in error_str:
break
batch_size = self.decay_batch_exp(batch_size)
return 1
def run_n_iterations(self, mod, inputs):
n = self.args.iterations
for _ in range(n - 1):
self.model_iter_fn(mod, inputs, collect_outputs=False)
return self.model_iter_fn(mod, inputs, collect_outputs=True)
def optimizer_zero_grad(self, mod):
if self.optimizer is not None:
self.optimizer.zero_grad(True)
else:
mod.zero_grad(True)
def optimizer_step(self):
if self.optimizer is not None:
self.optimizer.step()
def get_benchmark_indices(self, length):
start = self._args.partition_id * (length // self._args.total_partitions)
end = (
(self._args.partition_id + 1) * (length // self._args.total_partitions)
if self._args.partition_id < self._args.total_partitions - 1
else length
)
return start, end
def deepcopy_and_maybe_ddp(self, model):
model = self.deepcopy_model(model)
if self.args.ddp:
assert (
torch.distributed.is_available()
), "Can't use DDP without a distributed enabled build"
from torch.nn.parallel import DistributedDataParallel as DDP
model = DDP(model, find_unused_parameters=True)
elif self.args.fsdp:
assert (
torch.distributed.is_available()
), "Can't use FSDP without a distributed enabled build"
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
MixedPrecision,
)
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy
if self.args.float16:
dtype = torch.float16
elif self.args.bfloat16:
dtype = torch.bfloat16
else:
dtype = torch.float32
mp_policy = MixedPrecision(
param_dtype=dtype,
# Gradient communication precision.
reduce_dtype=dtype,
# Buffer precision.
buffer_dtype=dtype,
)
my_auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, recurse=True, min_num_params=int(1e5)
)
model = FSDP(
model,
use_orig_params=True,
device_id=torch.cuda.current_device()
if self.args.devices[-1] == "cuda"
else None,
mixed_precision=mp_policy,
limit_all_gathers=True,
auto_wrap_policy=my_auto_wrap_policy,
)
if torch._inductor.config.triton.cudagraphs:
log.warning("Disabling cudagraphs for FSDP compatibility")
torch._inductor.config.triton.cudagraphs = False
return model
def check_accuracy(
self, name, model, example_inputs, optimize_ctx, experiment, tag
):
"""
Checks accuracy.
1) Collect the outputs with fp64 datatype. This is useful for error checking.
2) Checks if eager itself has variations.
"""
start_stats = get_dynamo_stats()
def record_status(accuracy_status, dynamo_start_stats):
"""
Records the status in the csv file
"""
if current_name in self.non_deterministic_models:
if accuracy_status in (
"pass",
"eager_two_runs_differ",
"fail_accuracy",
):
accuracy_status = "pass"
headers = ["dev", "name", "batch_size", "accuracy"]
fields = [current_device, current_name, current_batch_size, accuracy_status]
if tag is not None:
headers.insert(3, "tag")
fields.insert(3, tag)
dynamo_stats = get_dynamo_stats()
dynamo_stats.subtract(dynamo_start_stats)
for k, v in dynamo_stats.items():
headers.append(k)
fields.append(v)
output_csv(output_filename, headers, fields)
return accuracy_status
if name in self.skip_accuracy_checks_large_models_dashboard:
return record_status("pass_due_to_skip", dynamo_start_stats=start_stats)
# Collect the fp64 reference outputs to be used later for accuracy checking.
fp64_outputs = None
try:
model_fp64, inputs_fp64 = cast_to_fp64(
self.deepcopy_and_maybe_ddp(model),
clone_inputs(example_inputs),
)
self.init_optimizer(name, current_device, model_fp64.parameters())
fp64_outputs = self.run_n_iterations(model_fp64, inputs_fp64)
except Exception:
log.warning(
"fp64 golden ref were not generated for %s. Setting accuracy check to cosine",
name,
)
self.args.cosine = True
fp64_outputs = None
tolerance, cos_similarity = self.get_tolerance_and_cosine_flag(
self.args.training, current_device, name
)
# Cast the model to float16/float32 as necessary
model, example_inputs = self.maybe_cast(model, example_inputs)
accuracy_status = "pass"
with self.pick_grad(name, self.args.training):
# Get results of native pytorch
reset_rng_state()
try:
model_copy = self.deepcopy_and_maybe_ddp(model)
self.init_optimizer(name, current_device, model_copy.parameters())
correct_result = self.run_n_iterations(
model_copy, clone_inputs(example_inputs)
)
except Exception as e:
accuracy_status = (
"eager_1st_run_OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_1st_run_fail"
)
log.exception(e)
return record_status(accuracy_status, dynamo_start_stats=start_stats)
# Rerun native pytorch
reset_rng_state()
try:
model_copy = self.deepcopy_and_maybe_ddp(model)
self.init_optimizer(name, current_device, model_copy.parameters())
correct_rerun_result = self.run_n_iterations(
model_copy, clone_inputs(example_inputs)
)
except Exception as e:
accuracy_status = (
"eager_2nd_run_OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "eager_2nd_run_fail"
)
return record_status(accuracy_status, dynamo_start_stats=start_stats)
# Two eager runs should have exactly same result
is_same = True
try:
if (
name not in self.skip_accuracy_check_as_eager_non_deterministic
and not same(
correct_result,
correct_rerun_result,
fp64_ref=None,
cos_similarity=False,
tol=0,
equal_nan=self.equal_nan,
)
):
is_same = False
except Exception as e:
# Sometimes torch.allclose may throw RuntimeError
is_same = False
if not is_same:
accuracy_status = "eager_two_runs_differ"
return record_status(accuracy_status, dynamo_start_stats=start_stats)
correct_rerun_result = None
# Run with Dynamo
reset_rng_state()
torch._dynamo.reset()
try:
model_copy = self.deepcopy_and_maybe_ddp(model)
self.init_optimizer(name, current_device, model_copy.parameters())
if self.args.export:
# TB and TIMM use list example_inputs
# HF use dict example_inputs
example_args, example_kwargs = _normalize_bench_inputs(
example_inputs
)
# Register the output dataclass to pytree
example_outputs = model_copy(*example_args, **example_kwargs)
_register_dataclass_output_as_pytree(example_outputs)
# apply export on module directly
# no need for n iterations
# the logic should be the same to self.model_iter_fn (forward_pass)
with self.autocast():
optimized_model_iter_fn = optimize_ctx(
model_copy, example_args, example_kwargs
)
new_result = optimized_model_iter_fn(
*example_args, **example_kwargs
)
else:
optimized_model_iter_fn = optimize_ctx(self.run_n_iterations)
new_result = optimized_model_iter_fn(model_copy, example_inputs)
except Exception as e:
log.exception(e)
print(
"TorchDynamo optimized model failed to run because of following error"
)
accuracy_status = (
"OOM"
if isinstance(e, torch.cuda.OutOfMemoryError)
else "fail_to_run"
)
return record_status(accuracy_status, dynamo_start_stats=start_stats)
if name in self.skip_accuracy_check_as_eager_non_deterministic:
return record_status("pass_due_to_skip", dynamo_start_stats=start_stats)
# Workaround for ONNX for non-tensor outputs
if (
current_onnx_compiler == "torchscript"
or current_onnx_compiler == "dynamo"
):
from _onnx import patch
(
correct_result,
new_result,
fp64_outputs,
) = patch.patch_non_tensor_outputs(
correct_result, new_result, fp64_outputs
)
try:
if not same(
correct_result,
new_result,
fp64_outputs,
equal_nan=self.equal_nan,
cos_similarity=cos_similarity,
tol=tolerance,
):
is_same = False
except Exception as e:
# Sometimes torch.allclose may throw RuntimeError
is_same = False
if not is_same:
if self.args.skip_accuracy_check:
accuracy_status = "pass_due_to_skip"
else:
accuracy_status = "fail_accuracy"
return record_status(accuracy_status, dynamo_start_stats=start_stats)
return record_status(accuracy_status, dynamo_start_stats=start_stats)
def check_tolerance(
self, name, model, example_inputs, optimize_ctx, base_device="cpu"
):
"""
Checks tolerance based on https://pytorch.org/docs/stable/generated/torch.allclose.html.
"""
tolerance_status = "pass"
if name in self.skip_accuracy_checks_large_models_dashboard:
tolerance_status = "pass_due_to_skip"
return tolerance_status
# Cast the model to float16/float32 as necessary
model, example_inputs = self.maybe_cast(model, example_inputs)
with self.pick_grad(name, self.args.training):
# Get results of native pytorch
reset_rng_state()
model_copy = copy.deepcopy(model)
model_copy = model_copy.to(base_device)
example_inputs_copy = copy.deepcopy(example_inputs)
example_inputs_copy = tree_map(
lambda x: x.to(base_device), example_inputs_copy
)
self.init_optimizer(name, base_device, model_copy.parameters())
correct_result = self.run_n_iterations(model_copy, example_inputs_copy)
# Run with Dynamo
# Sometime CI fails with random triton compilation failure which will be skipped for now
# TODO: revisit this after switching to new Triton runtime
reset_rng_state()
torch._dynamo.reset()
try:
self.init_optimizer(name, current_device, model.parameters())
optimized_model_iter_fn = optimize_ctx(self.run_n_iterations)
new_result = optimized_model_iter_fn(model, example_inputs)
except Exception as e:
log.exception(e)
if (
self.args.ci
and isinstance(e, BackendCompilerFailed)
and (
"Internal Triton PTX codegen error" in str(e)
or "cubin" in str(e)
)
):
return "pass_due_to_skip"
else:
print(
"TorchDynamo optimized model failed to run because of following error"
)
return "fail_to_run"
def dump_max_mean_values(tol, ref, res):
if isinstance(ref, (list, tuple, torch.nn.ParameterList, torch.Size)):
for refi, resi in zip(ref, res):
dump_max_mean_values(tol, refi, resi)
elif isinstance(ref, dict):
for k in ref.keys():
dump_max_mean_values(tol, ref[k], res[k])
elif isinstance(ref, torch.Tensor):
res = res.to(base_device)
t = torch.abs(ref - res) / (1 + torch.abs(ref))
tol.append(t.flatten().to(torch.float32))
return tol
tol = []
dump_max_mean_values(tol, correct_result, new_result)
tol = torch.cat(tol)
tol = torch.tensor(tol)
max = torch.max(tol)
mean = torch.mean(tol)
div = torch.std(tol)
headers = ["dev", "name", "batch_size", "max", "mean", "std"]
fields = [
current_device,
current_name,
current_batch_size,
max.item(),
mean.item(),
div.item(),
]
output_csv(output_filename, headers, fields)
return tolerance_status
def run_performance_test(
self, name, model, example_inputs, optimize_ctx, experiment, tag=None
):
if self.args.xla:
with self.pick_grad(name, self.args.training):
return experiment(*self.maybe_cast(model, example_inputs))
def warmup(fn, model, example_inputs, mode, niters=5):
peak_mem = 0
start_stats = get_dynamo_stats()
try:
if current_device == "cuda":
torch.cuda.reset_peak_memory_stats()
torch.cuda.empty_cache()
t0 = time.perf_counter()
for _ in range(niters):
fn(model, example_inputs)
t1 = time.perf_counter()
latency = t1 - t0
if current_device == "cuda":
peak_mem = get_peak_memory()
elif current_device == "cpu":
total = psutil.virtual_memory().total
percentage = psutil.Process(os.getpid()).memory_percent()
peak_mem = percentage * total / 10**9
except Exception:
log.exception("Backend %s failed in warmup()", mode)
return sys.exit(-1)
dynamo_stats = get_dynamo_stats()
dynamo_stats.subtract(start_stats)
return latency, peak_mem, dynamo_stats
# Cast the model to float16/float32 as necessary
model, example_inputs = self.maybe_cast(model, example_inputs)
# Use distributed wrapping as necessary
model = self.deepcopy_and_maybe_ddp(model)
self.init_optimizer(name, current_device, model.parameters())
with self.pick_grad(name, self.args.training):
ok, total = Stats.reset_counters()
experiment_kwargs = {}
if tag is not None:
experiment_kwargs["tag"] = tag
results = []
eager_latency, eager_peak_mem, _ = warmup(
self.model_iter_fn, model, example_inputs, "eager"
)
optimized_model_iter_fn = optimize_ctx(self.model_iter_fn)
dynamo_latency, dynamo_peak_mem, dynamo_stats = warmup(
optimized_model_iter_fn, model, example_inputs, "dynamo"
)
compilation_time = dynamo_latency - eager_latency
compression_ratio = (
eager_peak_mem / dynamo_peak_mem if dynamo_peak_mem else 0.0
)
if self.args.print_memory:
print(
f"memory: eager: {eager_peak_mem:.2f} GB, "
f"dynamo: {dynamo_peak_mem:.2f} GB, "
f"ratio: {compression_ratio:.2f}"
)
if experiment.func is speedup_experiment:
experiment_kwargs["compilation_latency"] = compilation_time
experiment_kwargs["compression_ratio"] = compression_ratio
experiment_kwargs["eager_peak_mem"] = eager_peak_mem
experiment_kwargs["dynamo_peak_mem"] = dynamo_peak_mem
experiment_kwargs["dynamo_stats"] = dynamo_stats
if experiment.func is coverage_experiment:
ok, total = Stats.reset_counters()
results = []
# run with torch._dynamo few times to populate the cache
for _ in range(3):
optimized_model_iter_fn(model, example_inputs)
_, frames_second_pass = Stats.reset_counters() # should be 0
if frames_second_pass > 0:
optimized_model_iter_fn(model, example_inputs)
_, frames_third_pass = Stats.reset_counters() # should be 0
else:
frames_third_pass = 0
results.append(
f"{ok:3}/{total:3} +{frames_third_pass} frames {compilation_time:3.0f}s"
)
if not hasattr(model, name):
model.name = name
results.append(experiment(model, example_inputs, **experiment_kwargs))
return " ".join(map(str, results))
def minify_model(
self,
name,
model,
example_inputs,
optimize_ctx,
experiment,
tag,
):
logging.info("Minifying %s...", name)
os.environ["TORCH_COMPILE_DEBUG"] = "1"
os.environ["TORCHDYNAMO_REPRO_AFTER"] = "dynamo"
os.environ["TORCHDYNAMO_REPRO_LEVEL"] = "4"
self.check_accuracy(name, model, example_inputs, optimize_ctx, experiment, tag)
if self.args.output_directory:
repro_dir = self.args.output_directory
else:
repro_dir = torch._dynamo.config.base_dir
try:
shutil.move("repro.py", f"{repro_dir}/{name}_repro.py")
except OSError as e:
logging.error("Could not find repro script for model %s", name)
else:
logging.info(
"Repro script for model %s with minified graph saved to %s",
name,
repro_dir,
)
def run_one_model(
self,
name,
model,
example_inputs,
optimize_ctx,
experiment,
explain=False,
tag=None,
):
mode = "train" if self.args.training else "eval"
msg = f"{current_device:4} {mode:5} {current_name:34} "
if tag:
msg += f" {tag:26}"
print(msg, flush=True)
start_stats = get_dynamo_stats()
if self.args.accuracy:
status = self.check_accuracy(
name, model, example_inputs, optimize_ctx, experiment, tag
)
print(status)
if status == "fail_accuracy" and self.args.minify:
self.minify_model(
name, model, example_inputs, optimize_ctx, experiment, tag
)
elif self.args.tolerance:
status = self.check_tolerance(name, model, example_inputs, optimize_ctx)
print(status)
elif self.args.performance:
status = self.run_performance_test(
name, model, example_inputs, optimize_ctx, experiment, tag
)
print(status)
if self.args.timing:
from torch._dynamo.utils import op_count, print_time_report
from torch.utils._stats import simple_call_counter
print_time_report()
stats = "STATS: "
stats = stats + " | ".join(
itertools.chain(
[f"call_* op count: {op_count}"],
(f"{key}:{value}" for key, value in simple_call_counter.items()),
)
)
print(stats)
stats = get_dynamo_stats()
stats.subtract(start_stats)
if explain:
print(
f"Dynamo produced {stats['unique_graphs']} graphs "
f"covering {stats['calls_captured']} ops with "
f"{stats['graph_breaks']} graph breaks ({stats['unique_graph_breaks']} unique)"
)
if explain or self.args.log_graph_breaks or self.args.print_graph_breaks:
filename = f"{output_filename.rstrip('.csv')}_graph_breaks.csv"
def add_double_quotes(x):
# Delimiter because reason could have comma
return f'"{x}"'
for graph_break in graph_break_reasons:
reason = add_double_quotes(graph_break.reason)
user_stack = add_double_quotes(
", ".join([str(x) for x in graph_break.user_stack])
)
output_csv(
filename,
["model", "reason", "user_stack"],
[current_name, reason, user_stack],
)
if self.args.stats:
Stats.print_summary()
def help(fn):
return fn.__doc__
diff_branch_default = "DIFF-BRANCH-DEFAULT"
def should_diff_branch(args):
return args.diff_branch != diff_branch_default
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"--filter", "-k", action="append", help="filter benchmarks with regexp"
)
parser.add_argument(
"--exclude", "-x", action="append", help="filter benchmarks with regexp"
)
parser.add_argument(
"--exclude-exact", action="append", help="filter benchmarks with exact match"
)
parser.add_argument(
"--total-partitions",
type=int,
default=1,
choices=range(1, 10),
help="Total number of partitions we want to divide the benchmark suite into",
)
parser.add_argument(
"--partition-id",
type=int,
default=0,
help="ID of the benchmark suite partition to be run. Used to divide CI tasks",
)
parser.add_argument(
"--devices", "--device", "-d", action="append", help="cpu or cuda"
)
parser.add_argument("--device-index", help="CUDA device index")
parser.add_argument(
"--repeat", "-n", type=int, default=30, help="number of timing runs"
)
iterations_per_run_help = """
Run this may iterations for each time measurement. This is mainly used for
XLA training. We want to run multiple iterations per measurement so the
tracing and computation for different iteartions can overlap with each
other. This makes sure we have an accurate xla baseline.
"""
parser.add_argument(
"--iterations-per-run", type=int, default=1, help=iterations_per_run_help
)
parser.add_argument(
"--randomize-input",
action="store_true",
help="Whether to randomize the input values. Dimensions will be kept the same.",
)
parser.add_argument(
"--threads",
"-t",
type=int,
help="number of threads to use for eager and inductor",
)
parser.add_argument(
"--nopython", action="store_true", help="Turn graph breaks into errors"
)
parser.add_argument(
"--no-skip",
action="store_true",
help="run models that are in the global SKIP list",
)
parser.add_argument(
"--prims-nvfuser", action="store_true", help="user prims + nvfuser backend"
)
parser.add_argument(
"--dump-raw-metrics",
action="store_true",
help="dump raw timing metrics from speedup experiment",
)
parser.add_argument(
"--log-operator-inputs",
action="store_true",
default=False,
)
parser.add_argument(
"--channels-last",
action="store_true",
default=False,
help="use channels last format",
)
parser.add_argument(
"--batch-size", "--batch_size", type=int, help="batch size for benchmarking"
)
parser.add_argument(
"--iterations", type=int, default=2, help="how many iterations to run"
)
parser.add_argument(
"--batch-size-file", type=str, help="String to load batch size from"
)
parser.add_argument("--cosine", action="store_true", help="use cosine similarity")
parser.add_argument(
"--cpp-wrapper", action="store_true", help="turn on cpp/cuda wrapper codegen"
)
parser.add_argument(
"--freezing", action="store_true", help="turn on freezing", default=False
)
parser.add_argument(
"--ci", action="store_true", help="Flag to tell that its a CI run"
)
parser.add_argument(
"--dynamic-ci-skips-only",
action="store_true",
help=(
"Run only the models that would have been skipped in CI "
"if dynamic-shapes, compared to running without dynamic-shapes. "
"This is useful for checking if more models are now "
"successfully passing with dynamic shapes. "
"Implies --dynamic-shapes and --ci"
),
)
parser.add_argument(
"--dashboard", action="store_true", help="Flag to tell that its a Dashboard run"
)
parser.add_argument(
"--skip-fp64-check", action="store_true", help="skip accuracy check using fp64"
)
parser.add_argument(
"--fast", "-f", action="store_true", help="skip slow benchmarks"
)
parser.add_argument(
"--only",
help="""Run just one model from torchbench. Or
specify the path and class name of the model in format like:
--only=path:<MODEL_FILE_PATH>,class:<CLASS_NAME>
Due to the fact that dynamo changes current working directory,
the path should be an absolute path.
The class should have a method get_example_inputs to return the inputs
for the model. An example looks like
```
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(10, 10)
def forward(self, x):
return self.linear(x)
def get_example_inputs(self):
return (torch.randn(2, 10),)
```
""",
)
parser.add_argument(
"--multiprocess",
action="store_true",
help="Create n processes based on the number of devices (distributed use case).",
)
parser.add_argument(
"--ddp",
action="store_true",
help="Wraps model in DDP before running it, and uses dynamo DDPOptmizer (graph breaks) by default.",
)
parser.add_argument(
"--fsdp",
action="store_true",
help="""Wraps model in FSDP before running it. Disables cudagraphs by default.
Doesn't recursively wrap, mainly useful for checking dynamo UnspecNNModule compatibility
""",
)
parser.add_argument(
"--no-optimize-ddp",
action="store_true",
help="Disables dynamo DDPOptimizer (graph breaks). (Applies only when using --ddp benchmark mode).",
)
parser.add_argument(
"--distributed-master-port",
default="6789",
help="Port to bind for for torch.distributed. Use the default unless it's conflicting with another user",
)
parser.add_argument(
"--dynamic-shapes",
action="store_true",
help="Runs a dynamic shapes version of the benchmark, if available.",
)
parser.add_argument(
"--dynamic-batch-only",
action="store_true",
help="Only assume batch dimension is dynamic. Implies --dynamic-shapes",
)
parser.add_argument(
"--specialize-int", action="store_true", help="Run with specialize_int=True."
)
parser.add_argument(
"--use-eval-mode",
action="store_true",
help="sets model.eval() to reduce randomness",
)
parser.add_argument(
"--skip-accuracy-check",
action="store_true",
help="keeps running even when accuracy fails",
)
parser.add_argument(
"--generate-aot-autograd-stats",
action="store_true",
help="Generates AOT Autograd stats like how mnay graphs are sent to AOT",
)
parser.add_argument(
"--inductor-settings",
action="store_true",
help="Use same settings as --inductor for baseline comparisons",
)
parser.add_argument(
"--suppress-errors",
action="store_true",
help="Suppress errors instead of raising them",
)
parser.add_argument(
"--output",
help="Overrides the output filename",
)
parser.add_argument(
"--output-directory",
help="Overrides the directory to place output files.",
)
parser.add_argument(
"--baseline",
help="Compare with a prior --output",
)
parser.add_argument(
"--part",
default=None,
help="Specify the part of the model to run.",
)
parser.add_argument(
"--export-profiler-trace",
action="store_true",
help="exports trace of kineto profiler",
)
parser.add_argument(
"--profiler-trace-name",
"--profiler_trace_name",
help="Overwrites exported trace name",
)
parser.add_argument(
"--diff-branch",
default=diff_branch_default,
help="delta current branch against given branch.",
)
parser.add_argument(
"--tag", default=None, help="Specify a tag to be included in csv files."
)
parser.add_argument(
"--explain",
action="store_true",
help="print some graph/op statistics during the run, similar to .explain()",
)
parser.add_argument(
"--stats",
action="store_true",
help="print graph counter stats",
)
parser.add_argument(
"--print-memory",
action="store_true",
help="print extra memory statistics",
)
parser.add_argument(
"--print-dataframe-summary",
action="store_true",
help="print dataframe result used for calculating accuracy",
)
parser.add_argument(
"--cold-start-latency",
"--cold_start_latency",
action="store_true",
help="Use a fresh triton cachedir when running each model, to force cold-start compile.",
)
parser.add_argument(
"--disable-cudagraphs",
action="store_true",
help="Disables cudagraphs for Inductor",
)
parser.add_argument(
"--disable-split-reductions",
action="store_true",
help="Disables split reductions for Inductor",
)
parser.add_argument(
"--disable-persistent-reductions",
action="store_true",
help="Disables split reductions for Inductor",
)
parser.add_argument(
"--disable-divisible-by-16",
action="store_true",
help="Disables divisible by 16 hint to Triton for Inductor",
)
parser.add_argument(
"--inductor-compile-mode",
default=None,
help="torch.compile mode argument for inductor runs.",
)
parser.add_argument(
"--print-graph-breaks",
action="store_true",
help="Show a warning whenever graph break",
)
parser.add_argument(
"--log-graph-breaks",
action="store_true",
help="log graph breaks in a file",
)
parser.add_argument(
"--trace-on-xla",
action="store_true",
help="Whether to trace the model on XLA or on eager device",
)
parser.add_argument(
"--xla-tolerance",
type=float,
default=1e-2,
help="XLA needs a loose tolerance to pass the correctness check",
)
parser.add_argument(
"--collect-outputs",
action="store_true",
help="""Whether to collect outputs for training. Set this to true if we
want to verify the numerical correctness of graidents. But that may
cause time measurement not accurate""",
)
parser.add_argument(
"--enable-activation-checkpointing",
action="store_true",
help="Enables activation checkpointing for HF models",
)
parser.add_argument("--timing", action="store_true", help="Emits phase timing")
parser.add_argument(
"--progress",
action="store_true",
help="Print n/k models message between each model run.",
)
parser.add_argument(
"--timeout",
type=int,
default=2000,
help="timeout (second) for benchmarking.",
)
parser.add_argument(
"--per_process_memory_fraction",
type=float,
default=1,
help="Set per-process GPU memory fraction (limit) for reducing usable size and reproducing OOMs",
)
parser.add_argument(
"--no-translation-validation",
action="store_true",
help="Disable translation validation for accuracy builds.",
)
parser.add_argument(
"--minify",
action="store_true",
help="Enable minification when failure is below tolerance. Save repro script for each model.",
)
group_fuser = parser.add_mutually_exclusive_group()
# --nvfuser is now the default, keep the option to not break scripts
group_fuser.add_argument("--nvfuser", action="store_true", help=argparse.SUPPRESS)
group_fuser.add_argument("--nnc", action="store_true", help="enable NNC for GPUs")
group_prec = parser.add_mutually_exclusive_group()
group_prec.add_argument("--float16", action="store_true", help="cast model to fp16")
group_prec.add_argument(
"--bfloat16", action="store_true", help="cast model to bf16"
)
group_prec.add_argument("--float32", action="store_true", help="cast model to fp32")
group_prec.add_argument(
"--amp", action="store_true", help="use automatic mixed precision"
)
group_printout = parser.add_mutually_exclusive_group()
group_printout.add_argument(
"--verbose", "-v", action="store_true", help="enable verbose debug printouts"
)
group_printout.add_argument(
"--quiet", "-q", action="store_true", help="suppress debug printouts"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--coverage", action="store_true", help="(default) " + help(coverage_experiment)
)
group.add_argument(
"--overhead", action="store_true", help=help(overhead_experiment)
)
group.add_argument(
"--speedup-dynamo-ts",
action="store_true",
help="TorchDynamo frontend with torchscript backend",
)
group.add_argument(
"--speedup-fx2trt", action="store_true", help=help(speedup_experiment_fx2trt)
)
group.add_argument(
"--speedup-fx2trt-fp16",
action="store_true",
help=help(speedup_experiment_fx2trt),
)
group.add_argument(
"--print-fx",
action="store_true",
help="Print fx traces captured from model",
)
group.add_argument(
"--print-aten-ops",
action="store_true",
help="Print traces of aten ops captured by AOT autograd",
)
group.add_argument(
"--inductor",
action="store_true",
help="Measure speedup with TorchInductor",
)
group.add_argument(
"--export",
action="store_true",
help="Measure pass rate with export",
)
group.add_argument(
"--export-aot-inductor",
action="store_true",
help="Measure pass rate with Export+AOTInductor",
)
group.add_argument(
"--xla", action="store_true", help="Compare TorchXLA to eager PyTorch"
)
group.add_argument(
"--torchscript-onnx",
"--torchscript_onnx",
action="store_true",
help="Measure speedup with TorchScript ONNX, i.e. `torch.onnx.export`",
)
group.add_argument(
"--dynamo-onnx",
"--dynamo_onnx",
action="store_true",
help="Measure speedup with Dynamo ONNX, i.e. `torch.onnx.dynamo_export`",
)
group.add_argument(
"--backend",
choices=torch._dynamo.list_backends(exclude_tags=None),
help="measure speedup with a given backend",
)
group.add_argument("--nothing", action="store_true", help=help(null_experiment))
group.add_argument(
"--log-conv-args",
action="store_true",
help="Dump convolution input/weight/bias's shape/stride/dtype and other options to json",
)
group.add_argument(
"--recompile-profiler",
"--recompile_profiler",
action="store_true",
help="Run the dynamo recompilation profiler on each model.",
)
group.add_argument(
"--find-batch-sizes",
action="store_true",
help="finds the largest batch size that could fit on GPUs",
)
mode_group = parser.add_mutually_exclusive_group(required=True)
mode_group.add_argument(
"--accuracy",
action="store_true",
help="Checks accuracy with small batch size and eval mode",
)
mode_group.add_argument(
"--performance", action="store_true", help="Measures performance speedup"
)
mode_group.add_argument(
"--tolerance",
action="store_true",
help="extracts the tolerance for each model with small batch size and eval mode",
)
run_mode_group = parser.add_mutually_exclusive_group(required=True)
run_mode_group.add_argument(
"--training",
action="store_true",
help="Performs training",
)
run_mode_group.add_argument(
"--inference", action="store_true", help="Performs inference"
)
return parser.parse_args(args)
def process_entry(rank, runner, original_dir, args):
args.rank = rank
with maybe_init_distributed(
args.use_distributed,
rank=rank,
world_size=args.world_size,
port=args.distributed_master_port,
):
return maybe_fresh_cache(
run, (args.cold_start_latency and args.only) or args.ci
)(runner, args, original_dir)
def main(runner, original_dir=None, args=None):
if original_dir:
os.chdir(original_dir)
args = parse_args(args)
if args.baseline:
args.baseline = os.path.abspath(args.baseline)
if should_diff_branch(args):
import git
# We do this here so we error out earlier if there's an issue
repo = git.Repo()
if repo.is_dirty():
raise RuntimeError(
"--diff-branch called on dirty branch. Commit, stash, or reset."
)
main_branch = repo.active_branch.name
if main_branch == args.diff_branch:
raise RuntimeError(
f"--diff-branch: current branch is same as {args.diff_branch} branch, what are you diffing?"
)
device_count = torch.cuda.device_count()
args.use_distributed = (args.ddp or args.fsdp) and args.only
if args.multiprocess:
if device_count <= 1:
log.warning(
"The use multiprocess flag is set but there are <= 1 devices available."
)
# multiprocess path
args.world_size = device_count
mp.spawn(process_entry, args=(runner, original_dir, args), nprocs=device_count)
else:
# single process path just uses the main process
args.world_size = 1
process_entry(0, runner, original_dir, args)
def run(runner, args, original_dir=None):
# Pass the parsed args object to benchmark runner object
runner.args = args
args.filter = args.filter or [r"."]
args.exclude = args.exclude or [r"^$"]
args.exclude_exact = args.exclude_exact or []
if args.inductor:
assert args.backend is None
args.backend = "inductor"
if args.dynamic_ci_skips_only:
args.dynamic_shapes = True
args.ci = True
if args.dynamic_batch_only:
args.dynamic_shapes = True
torch._dynamo.config.assume_static_by_default = True
if args.dynamic_shapes:
if not args.dynamic_batch_only:
torch._dynamo.config.assume_static_by_default = False
if args.specialize_int:
torch._dynamo.config.specialize_int = True
if args.ci:
if args.accuracy:
# Run fewer iterations when checking accuracy
args.repeat = 2
# Set translation validation on by default on CI accuracy runs.
torch._dynamo.config.translation_validation = True
if args.dynamic_ci_skips_only:
# Test only the incremental set of jobs whose skipped was
# caused solely by turning on dynamic shapes
assert args.dynamic_shapes
ci = functools.partial(CI, args.backend, training=args.training)
args.filter = list(
set(CI_SKIP[ci(dynamic=True)]) - set(CI_SKIP[ci(dynamic=False)])
)
else:
ci = functools.partial(
CI, args.backend, training=args.training, dynamic=args.dynamic_shapes
)
for device in args.devices:
args.exclude_exact.extend(CI_SKIP[ci(device=device)])
if args.ddp:
# TODO: we could also hook DDP bench up to --speedup bench, _not_ for mgpu e2e perf,
# but just to measure impact on singlenode of performing graph-breaks.
# Left it as a follow up to keep this PR isolated.
assert (
args.accuracy
), "DDP benchmark is currently only hooked up to --accuracy bench"
assert args.training, "DDP benchmark requires --training mode"
if args.no_optimize_ddp:
torch._dynamo.config.optimize_ddp = False
else:
# TODO(whc) after enabling DDPOptimizer by default this could be removed or assert
torch._dynamo.config.optimize_ddp = True
if args.only == "dlrm":
log.error(
"DLRM+DDP is unsupported as it requires sharding the embedding layer separately from DDP"
)
return sys.exit(-1)
if args.accuracy:
# Use small batch size. We use >1 batch size to ensure we test
# batch_norm type of operators that work on batch dims.
# TODO - Go through the failures for batch size = 2
if args.batch_size is None:
if runner.suite_name == "huggingface":
args.batch_size = 1
elif runner.suite_name == "torchbench":
args.batch_size = 4
else:
# Larger batch size of TIMM models to have stable batch_norm
assert runner.suite_name == "timm_models"
args.batch_size = 8
# Remove sources of randomness
if runner.suite_name not in ("timm_models", "huggingface"):
# TODO - Using train mode for timm_models and HF models. Move to train mode for Torchbench as well.
args.use_eval_mode = True
inductor_config.fallback_random = True
if args.only is not None and args.only not in {
"alexnet",
"Background_Matting",
"pytorch_CycleGAN_and_pix2pix",
"pytorch_unet",
"Super_SloMo",
"vgg16",
# https://github.com/pytorch/pytorch/issues/96724
"Wav2Vec2ForCTC",
"Wav2Vec2ForPreTraining",
"sam",
}:
# some of the models do not support use_deterministic_algorithms
torch.use_deterministic_algorithms(True)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.allow_tf32 = False
torch.backends.cudnn.benchmark = False
torch.backends.cuda.matmul.allow_tf32 = False
# Remove randomeness when torch manual seed is called
patch_torch_manual_seed()
# Some models e.g. yolov3 assert batch size on n_gpus
if "CUDA_VISIBLE_DEVICES" not in os.environ:
args.device_index = "0"
# Stricter check to disable fallbacks
args.suppress_errors = False
if args.device_index is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device_index
elif args.performance:
# Ensure that we test on real scenarios
args.use_eval_mode = False
if args.partition_id > args.total_partitions or args.partition_id < 0:
print("Invalid partition id")
return sys.exit(-1)
if not args.devices:
if torch.cuda.is_available():
args.devices = ["cuda"]
else:
log.warning("torch.cuda.is_available() == False, using CPU")
args.devices = ["cpu"]
if args.devices != ["cpu"] and torch.cuda.is_available():
global synchronize
synchronize = torch.cuda.synchronize
if (
args.devices == ["cuda"]
and torch.cuda.get_device_properties(0).total_memory < 25 * 2**30
):
# OOM errors on an RTX 3090 with 24gb RAM
runner.skip_models.update(
{
# torchbench
"hf_Longformer",
"timm_nfnet",
"timm_efficientdet",
}
)
if args.training:
runner.skip_models.add("hf_T5")
if args.nnc:
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_set_nvfuser_enabled(False)
if args.threads:
torch.set_num_threads(args.threads)
if args.verbose:
torch._logging.set_logs(dynamo=logging.DEBUG)
if args.print_graph_breaks:
torch._dynamo.config.print_graph_breaks = True
if args.quiet:
torch._logging.set_logs(dynamo=logging.ERROR)
torch._dynamo.config.suppress_errors = args.suppress_errors
if args.training:
runner.model_iter_fn = runner.forward_and_backward_pass
runner.skip_models.update(runner.skip_not_suitable_for_training_models)
else:
runner.model_iter_fn = runner.forward_pass
if args.fast:
runner.skip_models.update(runner.slow_models)
if args.devices == ["cpu"]:
runner.skip_models.update(runner.very_slow_models)
runner.skip_models.update(runner.skip_models_for_cpu)
elif args.devices == ["cuda"]:
runner.skip_models.update(runner.skip_models_for_cuda)
if args.no_skip:
runner.skip_models.clear()
experiment = null_experiment
global current_name, current_device, current_batch_size, output_filename, optimize_ctx, current_onnx_compiler
optimize_ctx = contextlib.nullcontext()
if args.overhead:
optimize_ctx = torch._dynamo.optimize(dummy_fx_compile, nopython=args.nopython)
experiment = speedup_experiment
output_filename = "overheads.csv"
elif args.inductor:
inductor_config.debug = args.verbose
if args.threads:
inductor_config.cpp.threads = args.threads
optimize_ctx = functools.partial(
torch.compile,
backend="inductor",
fullgraph=args.nopython,
mode=args.inductor_compile_mode,
)
experiment = speedup_experiment
output_filename = "inductor.csv"
elif args.export:
optimize_ctx = torch._export.export
experiment = speedup_experiment
output_filename = "export.csv"
elif args.xla:
(dev,) = args.devices
os.environ["PJRT_DEVICE"] = {"cuda": "GPU", "cpu": "CPU"}[dev]
torch._dynamo.mark_dynamic = MagicMock()
experiment = xla
output_filename = "xla.csv"
elif args.torchscript_onnx:
optimize_ctx = functools.partial(
optimize_onnx_ctx, args.output_directory or ".", OnnxModelFromTorchScript
)
experiment = functools.partial(
speedup_experiment_onnx, OnnxModelFromTorchScript
)
output_filename = "torchscript_onnx.csv"
current_onnx_compiler = "torchscript"
elif args.dynamo_onnx:
optimize_ctx = functools.partial(
optimize_onnx_ctx, args.output_directory or ".", OnnxModelFromDynamo
)
experiment = functools.partial(speedup_experiment_onnx, OnnxModelFromDynamo)
output_filename = "dynamo_onnx.csv"
current_onnx_compiler = "dynamo"
elif args.speedup_dynamo_ts:
optimize_ctx = torch._dynamo.optimize("ts", nopython=args.nopython)
experiment = speedup_experiment
output_filename = "speedup_dynamo_ts.csv"
elif args.prims_nvfuser:
optimize_ctx = torch._dynamo.optimize("prims_nvfuser", nopython=args.nopython)
experiment = speedup_experiment
backend_str = "prims_nvfuser"
output_filename = f"accuracy_aot_{backend_str}.csv"
elif args.print_fx:
optimize_ctx = torch._dynamo.optimize(
print_fx,
nopython=args.nopython,
)
elif args.print_aten_ops:
optimize_ctx = torch._dynamo.optimize(
print_aten_ops,
nopython=args.nopython,
)
elif args.nothing:
optimize_ctx = nothing
experiment = speedup_experiment
output_filename = "nothing.csv"
elif args.backend or args.export_aot_inductor:
if args.export_aot_inductor:
assert not args.training, "AOTInductor only supports inference"
assert args.devices == ["cuda"], "AOTInductor only tested for CUDA"
optimize_ctx = export_aot_inductor
else:
optimize_ctx = torch._dynamo.optimize(args.backend, nopython=args.nopython)
experiment = speedup_experiment
if args.accuracy:
output_filename = f"accuracy_{args.backend}.csv"
elif args.tolerance:
output_filename = f"tolerance_{args.backend}.csv"
else:
output_filename = f"speedup_{args.backend}.csv"
elif args.recompile_profiler:
output_filename = "recompile_profiler_log.csv"
experiment = recompile_profiler_experiment
else:
optimize_ctx = torch._dynamo.optimize(
fx_insert_profiling, nopython=args.nopython
)
experiment = coverage_experiment
output_filename = "coverage.csv"
if args.inductor or args.backend == "inductor" or args.export_aot_inductor:
inductor_config.triton.cudagraphs = not args.disable_cudagraphs
inductor_config.triton.persistent_reductions = (
not args.disable_persistent_reductions
)
inductor_config.split_reductions = not args.disable_split_reductions
inductor_config.triton.divisible_by_16 = not args.disable_divisible_by_16
inductor_config.cpp_wrapper = args.cpp_wrapper
if args.inference:
inductor_config.freezing = args.freezing
runner.setup_amp()
if args.output:
output_filename = args.output
if output_filename:
if args.output_directory:
output_filename = os.path.join(args.output_directory, output_filename)
else:
output_filename = os.path.join(
torch._dynamo.config.base_dir, output_filename
)
if args.find_batch_sizes and args.only:
for device in args.devices:
batch_size = runner.batch_size_finder(device, args.only)
print(args.only, batch_size)
output_csv(output_filename, [], [args.only, batch_size])
return
if args.export_profiler_trace:
if args.profiler_trace_name is None:
if args.backend:
args.profiler_trace_name = args.backend
elif args.inductor:
args.profiler_trace_name = "inductor"
else:
args.profiler_trace_name = "profile"
else:
args.profiler_trace_name = args.profiler_trace_name
if args.no_translation_validation:
# Overwrite 'translation_validation' config, if specified.
torch._dynamo.config.translation_validation = False
experiment = functools.partial(experiment, args, runner.model_iter_fn)
if args.only and should_diff_branch(args):
import git
repo = git.Repo()
main_branch = repo.active_branch.name
try:
# Adding diff-branch again to the args will override previous value
call_args = (
[sys.executable] + sys.argv + [f"--diff-branch={diff_branch_default}"]
)
# Run for main branch
subprocess.check_call(call_args + [f"--tag={main_branch}"])
# Run for comparison branch
repo.git.checkout(args.diff_branch)
subprocess.check_call(call_args + [f"--tag={args.diff_branch}"])
finally:
# Go back to main branch
repo.git.checkout(main_branch)
elif args.only:
model_name = args.only
for device in args.devices:
batch_size = args.batch_size
if args.batch_size_file:
batch_size = read_batch_size_from_file(
args, args.batch_size_file, model_name
)
if model_specified_by_path(args.only):
model, example_inputs = load_model_from_path(args.only)
name = model.__class__.__name__
model = model.to(device=device)
example_inputs = tree_map_only(
torch.Tensor, lambda x: x.to(device=device), example_inputs
)
else:
try:
with tqdm(desc="loading model"):
if args.part:
(
device,
name,
model,
example_inputs,
batch_size,
) = runner.load_model(
device,
model_name,
batch_size=batch_size,
part=args.part,
)
else:
if args.fsdp:
# Always load model on cpu for fsdp
# When initializing FSDP, we will use the cuda device if args.cuda is set
(
_,
name,
model,
example_inputs,
batch_size,
) = runner.load_model(
"cpu", model_name, batch_size=batch_size
)
else:
(
device,
name,
model,
example_inputs,
batch_size,
) = runner.load_model(
device, model_name, batch_size=batch_size
)
except NotImplementedError as e:
print(e)
import traceback
print(traceback.format_exc())
logging.warning("%s failed to load", args.only)
continue # bad benchmark implementation
if args.trace_on_xla:
xla_dev = xm.xla_device()
model = model.to(device=xla_dev)
example_inputs = tree_map_only(
torch.Tensor, lambda x: x.to(device=xla_dev), example_inputs
)
current_name = name
current_device = device
current_batch_size = batch_size
set_model_name(name)
# Look for stuff that looks like batch size, and mark it dynamic.
# Better integration would integrate directly with benchmark suite
# but cannot conveniently do this
# NB: This must be done late enough so that we don't do more
# conversions on the inputs
# NB: Assumes only the first batch-y like dimension is the batch
marked = False
def detect_and_mark_batch(t):
nonlocal marked
for i, s in enumerate(t.size()):
if s == batch_size:
torch._dynamo.mark_dynamic(t, i)
marked = True
break
if (
args.dynamic_batch_only
and batch_size > 1
and model_name not in CI_SKIP_DYNAMIC_BATCH_ONLY
):
tree_map_only(torch.Tensor, detect_and_mark_batch, example_inputs)
assert marked, f"nothing in example_inputs had a dim with {batch_size}"
if args.log_operator_inputs:
log_operator_inputs(
model, example_inputs, runner.model_iter_fn, name, args
)
continue
if args.per_process_memory_fraction != 1:
torch.cuda.set_per_process_memory_fraction(
args.per_process_memory_fraction
)
model, example_inputs = runner.cast_based_on_args(model, example_inputs)
runner.run_one_model(
name,
model,
example_inputs,
optimize_ctx,
experiment,
explain=args.explain,
tag=args.tag,
)
if args.generate_aot_autograd_stats:
stats_file = output_filename.split(".csv")[0] + "_stats.csv"
output_csv(
stats_file,
("dev", "name", "batch_size", "total_aot_graphs", "ok_aot_graphs"),
[
current_device,
current_name,
current_batch_size,
*Stats.aot_summary(),
],
)
else:
if output_filename and os.path.exists(output_filename):
os.unlink(output_filename)
if original_dir:
os.chdir(original_dir)
model_names = list(runner.iter_model_names(args))
nmodels = len(model_names)
for i, name in enumerate(model_names):
current_name = name
placeholder_batch_size = 0
if args.progress:
print(f"Running model {i+1}/{nmodels}", flush=True)
def write_csv(status):
if args.accuracy:
headers = ["dev", "name", "batch_size", "accuracy"]
rows = [
[device, name, placeholder_batch_size, status]
for device in args.devices
]
elif args.performance:
headers = ["dev", "name", "batch_size", "speedup", "abs_latency"]
rows = [
[device, name, placeholder_batch_size, 0.0, 0.0]
for device in args.devices
]
else:
headers = []
rows = [
[device, name, placeholder_batch_size, 0.0]
for device in args.devices
]
for row in rows:
output_csv(output_filename, headers, row)
try:
timeout = args.timeout
if should_diff_branch(args):
timeout *= 2
subprocess.check_call(
[sys.executable] + sys.argv + [f"--only={name}"], timeout=timeout
)
except subprocess.TimeoutExpired:
print("TIMEOUT", file=sys.stderr)
write_csv("timeout")
except subprocess.SubprocessError:
print("ERROR", file=sys.stderr)
write_csv("infra_error")
print_summary(output_filename, print_dataframe=args.print_dataframe_summary)
def log_operator_inputs(model, example_inputs, model_iter_fn, name, args):
mode = "training" if args.training else "eval"
output = os.path.join(os.path.dirname(args.output), f"{name}_{mode}.txt")
# TODO - add option for coalescing inputs over multiple runs
if os.path.exists(output):
print(f"Skipping {name}, {output} already exists")
return
print(f"Running {name}")
operator_mode = OperatorInputsMode()
fake_tensor_mode = FakeTensorMode()
with torch._subclasses.fake_tensor.FakeCopyMode(fake_tensor_mode):
model_fake = copy.deepcopy(model)
example_inputs_fake = copy.deepcopy(example_inputs)
try:
with fake_tensor_mode, operator_mode:
model_iter_fn(model_fake, example_inputs_fake, collect_outputs=False)
except Exception as e:
print(f"{name} failed to run with fake tensors, trying real. Exception: {e}")
operator_mode = OperatorInputsMode()
try:
with operator_mode:
model_iter_fn(model, example_inputs, collect_outputs=False)
except Exception as e2:
print(f"{name} failed to run with real. Exception: {e2}")
raise
print(f"Writing output to {output}")
operator_mode.log_to_file(output)
if __name__ == "__main__":
raise RuntimeError(
f"You shouldn't run {sys.argv[0]} directly, instead try timm_model.py, torchbench.py or hugginface.py"
)
|
import atexit
import collections
import contextlib
import copy
import cProfile
import dataclasses
import datetime
import dis
import enum
import functools
import gc
import inspect
import itertools
import linecache
import logging
import math
import operator
import os
import pstats
import sys
import textwrap
import time
import types
import typing
import weakref
from contextlib import contextmanager
from functools import lru_cache, wraps
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
# import torch._logging
# import torch._numpy as tnp
# from torch._guards import detect_fake_mode # noqa: F401
from torch._dynamo import config
# NOTE: Make sure `NP_SUPPORTED_MODULES` and `NP_TO_TNP_MODULE` are in sync.
NP_SUPPORTED_MODULES = (np, np.fft, np.linalg, np.random)
# NP_TO_TNP_MODULE = {
# np: tnp,
# np.fft: tnp.fft,
# np.linalg: tnp.linalg,
# np.random: tnp.random,
# }
import importlib
import torch
import torch._functorch.config
import torch.fx.experimental.symbolic_shapes
from torch import fx
from torch._dispatch.python import enable_python_dispatcher
from torch._subclasses.fake_tensor import FakeTensor
from torch.nn.modules.lazy import LazyModuleMixin
from torch.utils._pytree import tree_map
counters = collections.defaultdict(collections.Counter)
troubleshooting_url = "https://pytorch.org/docs/master/compile/troubleshooting.html"
nnmodule_doc_url = "https://pytorch.org/docs/master/compile/nn-module.html"
nnmodule_doc_url_msg = f"See {nnmodule_doc_url} for more information and limitations."
log = logging.getLogger(__name__)
# profiling compilation time by function
compilation_time_metrics = collections.OrderedDict()
# profiling compilation time by frame phase
frame_phase_timing = collections.OrderedDict()
timer_counter = itertools.count()
def tabulate(rows, headers):
try:
import tabulate
return tabulate.tabulate(rows, headers=headers)
except ImportError:
return "\n".join(
", ".join(map(str, row)) for row in itertools.chain([headers], rows)
)
def dynamo_profiled(func):
@wraps(func)
def profile_wrapper(*args, **kwargs):
global timer_counter
datafn = (
func.__name__ + f"{next(timer_counter)}.profile"
) # Name the data file sensibly
prof = cProfile.Profile()
prof.enable()
retval = prof.runcall(func, *args, **kwargs)
prof.disable()
print(f"### Cprofile for {func.__name__} iter {next(timer_counter)} ###")
ps = pstats.Stats(prof)
ps.sort_stats(pstats.SortKey.TIME).print_stats(20)
ps.sort_stats(pstats.SortKey.CUMULATIVE).print_stats(20)
prof.dump_stats(datafn)
return retval
return profile_wrapper
curr_frame = 0
# Note: Called for you by dynamo - you almost never ever want to invoke this yourself.
def increment_frame():
global curr_frame
curr_frame = curr_frame + 1
# Note: Called for you by dynamo - you almost never ever want to invoke this yourself.
def reset_frame_count():
global curr_frame
frame_phase_timing.clear()
compilation_time_metrics.clear()
curr_frame = 0
op_count = 0
def increment_op_count(cnt):
global op_count
op_count += cnt
# Print a report of time spent so far
# Ex:
# TIMING:
# entire_frame_compile:8.574629999999999
# backend_compile:5.26806
def print_time_report():
total = 0
total_by_key = {}
for timings in frame_phase_timing.values():
for key, timing in timings.items():
total += timing
if key not in total_by_key:
total_by_key[key] = timing
else:
total_by_key[key] += timing
out = "TIMING:"
for key, value in total_by_key.items():
out = f"{out} {key}:{round(value, 5)}"
print(out)
# dynamo_timed API works as a function decorator
# By wrapping a function in dynamo_timed, we can store a record in compilation_time_metrics
# where the key is the functions name.
# For example:
#
# @dynamo_timed
# def _foo(...):
#
# Would show up as an entry in our timing dict:
# OrderedDict([('bar.<locals>._foo', [0.083690, 0.23949, 3.1425e-05])])
# This is extremely useful for granular debugging.
#
# For a higher-level mode, pass a phase_name into dynamo_timed
# phase_names record an extra record into a separate compilation timing structure,
# one keyed on frame+name rather than function.
# The frame is incremented outside of this function, in def increment_frame() above.
def dynamo_timed(original_function=None, phase_name=None):
def dynamo_timed_inner(func):
@wraps(func)
def time_wrapper(*args, **kwargs):
key = func.__qualname__
if key not in compilation_time_metrics:
compilation_time_metrics[key] = []
with torch.profiler.record_function(f"{key} (dynamo_timed)"):
t0 = time.time()
r = func(*args, **kwargs)
time_spent = time.time() - t0
compilation_time_metrics[key].append(time_spent)
if phase_name:
frame_key = str(curr_frame)
if frame_key not in frame_phase_timing:
frame_phase_timing[frame_key] = {}
assert (
phase_name not in frame_phase_timing[frame_key]
), f"Duplicate phase name {phase_name} for frame {frame_key}"
frame_phase_timing[frame_key][phase_name] = time_spent
return r
return time_wrapper
if original_function:
return dynamo_timed_inner(original_function)
return dynamo_timed_inner
def compile_times(repr="str", aggregate=False):
"""
Get metrics about torchdynamo frontend/backend compilation times.
Accumulates information from functions tagged with `@dynamo_timed`.
repr='str' returns a printable string for user interaction, and 'csv'
returns headers, rows which can be logged for output
aggregate causes values from multiple compilations (e.g. split graphs)
to be accumulated into one value. If false, expect more than one value
per metric.
"""
def fmt_fn(values, item_fn=lambda x: x):
if aggregate:
return item_fn(sum(values))
return ", ".join(map(item_fn, values))
if repr == "str":
rows = [
(k, fmt_fn(compilation_time_metrics[k], item_fn=lambda x: f"{x:.4f}"))
for k in compilation_time_metrics
]
out = "TorchDynamo compilation metrics:\n"
out += tabulate(rows, headers=("Function", "Runtimes (s)"))
return out
elif repr == "csv":
values = [
fmt_fn(v, item_fn=lambda x: f"{x:.6f}")
for v in compilation_time_metrics.values()
]
headers = list(compilation_time_metrics.keys())
return headers, values
@atexit.register
def dump_compile_times():
log.info(compile_times(repr="str", aggregate=True))
tensortype_to_dtype = {
torch.FloatTensor: (torch.float32, torch.float),
torch.DoubleTensor: (torch.float64, torch.double),
torch.HalfTensor: (torch.float16, torch.half),
torch.BFloat16Tensor: (torch.bfloat16,),
torch.ByteTensor: (torch.uint8,),
torch.CharTensor: (torch.int8,),
torch.LongTensor: (torch.int64, torch.long),
torch.IntTensor: (torch.int32, torch.int),
torch.ShortTensor: (torch.int16, torch.short),
torch.BoolTensor: (torch.bool,),
}
class DuplicateWarningChecker:
def __init__(self, maxsize=4096):
self.maxsize = maxsize
self.reset()
def reset(self):
self.set = collections.OrderedDict()
def add(self, key):
if key in self.set:
self.set.move_to_end(key, last=True)
if not config.verbose:
return False
else:
self.set[key] = None
while len(self.set) > self.maxsize:
self.set.popitem(last=False)
return True
graph_break_dup_warning_checker = DuplicateWarningChecker()
def setup_compile_debug():
compile_debug = os.environ.get("TORCH_COMPILE_DEBUG", "0") == "1"
if compile_debug:
torch._logging.set_logs(
dynamo=logging.DEBUG,
aot=logging.DEBUG,
inductor=logging.DEBUG,
output_code=True, # this is off by default
)
return add_file_handler()
return contextlib.ExitStack()
def reset_graph_break_dup_checker():
graph_break_dup_warning_checker.reset()
def add_file_handler():
log_path = os.path.join(get_debug_dir(), "torchdynamo")
if not os.path.exists(log_path):
os.makedirs(log_path)
log_file_handler = logging.FileHandler(os.path.join(log_path, "debug.log"))
logger = logging.getLogger("torch._dynamo")
logger.addHandler(log_file_handler)
exitstack = contextlib.ExitStack()
exitstack.callback(lambda: logger.removeHandler(log_file_handler))
return exitstack
def setup_log_file():
exitstack = contextlib.ExitStack()
if config.log_file_name is not None:
log_file_handler = logging.FileHandler(config.log_file_name)
for logger in logging.get_loggers():
logger.addHandler(log_file_handler)
exitstack.callback(lambda: logger.removeHandler(log_file_handler))
return exitstack
return exitstack
def gen_record_file_name(exc, code):
return f"{get_debug_dir()}/error_recordings/\
{code.co_name}_{type(exc).__name__}_{code.co_firstlineno}.rec"
def write_record_to_file(filename, exec_record):
try:
if os.path.exists(filename):
log.warning(
"Unable to write execution record %s; file already exists.", filename
)
else:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "wb") as f:
exec_record.dump(f)
except Exception:
log.error("Unable to write execution record %s", filename, exc_info=1)
def count_calls(g: fx.Graph):
c = 0
for n in g.nodes:
if "call" in n.op:
c += 1
return c
def identity(x):
return x
def nothing(*args, **kwargs):
pass
class ExactWeakKeyDictionary:
"""Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality"""
def __init__(self):
self.values = dict()
self.refs = dict()
def __getitem__(self, key):
return self.values[id(key)]
def get(self, key, default=None):
return self.values.get(id(key), default)
def __contains__(self, key):
return id(key) in self.values
def __setitem__(self, key, value):
idx = id(key)
if idx not in self.refs:
self.refs[idx] = weakref.ref(key, lambda ref: self._remove_id(idx))
self.values[idx] = value
def _remove_id(self, idx):
if idx in self.values:
del self.values[idx]
if idx in self.refs:
del self.refs[idx]
def clear(self):
self.refs.clear()
self.values.clear()
def istype(obj, allowed_types):
"""isinstance() without subclasses"""
if isinstance(allowed_types, (tuple, list, set)):
return type(obj) in allowed_types
return type(obj) is allowed_types
def is_typing(value):
if sys.version_info < (3, 9):
return isinstance(value, typing._GenericAlias)
else:
return isinstance(
value, (typing._SpecialGenericAlias, typing._UnionGenericAlias)
)
def is_numpy_int_type(value):
return istype(
value,
(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
)
def is_numpy_float_type(value):
return istype(
value,
(
np.float16,
np.float32,
np.float64,
),
)
def is_numpy_ndarray(value):
return istype(value, np.ndarray)
def istensor(obj):
"""Check of obj is a tensor"""
tensor_list = (
torch.Tensor,
torch.nn.Parameter,
*config.traceable_tensor_subclasses,
)
tensor_list = tensor_list + (torch._subclasses.FakeTensor,)
return istype(obj, tensor_list)
def is_lazy_module(mod):
return isinstance(mod, LazyModuleMixin)
@functools.lru_cache(4096)
def print_once(*args):
print(*args)
def make_cell(val=None):
"""Some black magic to create a cell object that usually only exists in a closure"""
x = val
def f():
return x
assert len(f.__closure__) == 1
return f.__closure__[0]
def proxy_args_kwargs(args, kwargs):
try:
proxy_args = tuple(arg.as_proxy() for arg in args)
proxy_kwargs = {key: arg.as_proxy() for key, arg in kwargs.items()}
return proxy_args, proxy_kwargs
except NotImplementedError as e:
from .exc import unimplemented
from .variables.base import typestr
raise unimplemented(
f"call_function args: {typestr(*args)} {typestr(*list(kwargs.values()))}"
) from e
@dataclasses.dataclass
class CompilationMetrics:
frame_key: str
co_name: str
co_filename: str
co_firstlineno: int
cache_size: int
guard_count: Optional[int]
graph_op_count: Optional[int]
graph_node_count: Optional[int]
graph_input_count: Optional[int]
entire_frame_compile_time_s: Optional[float]
backend_compile_time_s: Optional[float]
fail_reason: Optional[str]
@dataclasses.dataclass
class CleanupHook:
"""Remove a global variable when hook is called"""
scope: Dict[str, Any]
name: str
def __call__(self, *args):
CleanupManager.count -= 1
del self.scope[self.name]
@staticmethod
def create(scope, name, val):
assert name not in scope
CleanupManager.count += 1
scope[name] = val
return CleanupHook(scope, name)
class CleanupManager(ExactWeakKeyDictionary):
count = 0
def _remove_id(self, idx):
for hook in self.values[idx]:
hook()
super()._remove_id(idx)
CleanupManager.instance = CleanupManager()
def clone_tensor(x):
"""Clone the tensor and its gradient"""
y = x.clone().requires_grad_(x.requires_grad)
if x.is_leaf and x.grad is not None:
y.grad = x.grad.clone()
return y
def clone_input(x, *, dtype=None):
"""copy while preserving strides"""
# TODO: this is questionable
if isinstance(x, torch._subclasses.FakeTensor):
# this func fails on fake tensors in __torch_dispatch__
return x
def torch_clone(x):
y = torch.clone(x)
if x.is_leaf:
y.requires_grad_(x.requires_grad)
if x.is_leaf and x.grad is not None:
y.grad = clone_input(x.grad, dtype=dtype)
if hasattr(x, "_dynamo_dynamic_indices"):
y._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy()
return y
with torch.no_grad():
if x.device.type == "xla":
# Access data_ptr() for a xla tensor will cause crash
return torch_clone(x)
needed_size = sum(
(shape - 1) * stride for shape, stride in zip(x.size(), x.stride())
)
if x.is_quantized:
result = torch.empty_quantized((needed_size + 32,), x)
else:
result = torch.empty(
needed_size + 32, dtype=dtype or x.dtype, device=x.device
)
cache_line_offset = (
(x.data_ptr() - result.data_ptr()) % 32
) // x.element_size()
result.as_strided_(x.size(), x.stride(), cache_line_offset)
try:
result.copy_(x.clone())
if x.is_leaf:
result.requires_grad_(x.requires_grad)
if x.is_leaf and x.grad is not None:
result.grad = clone_input(x.grad, dtype=dtype)
except RuntimeError:
# RuntimeError: unsupported operation: more than one element of the written-to
# tensor refers to a single memory location. Please clone() the tensor before
# performing the operation.
return torch_clone(x)
if hasattr(x, "_dynamo_dynamic_indices"):
result._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy()
return result
def clone_inputs(example_inputs):
if type(example_inputs) is dict:
res = dict(example_inputs)
for key, value in res.items():
if isinstance(value, tuple):
res[key] = clone_inputs(value)
else:
assert isinstance(value, torch.Tensor), type(value)
res[key] = clone_input(value)
return res
res = list(example_inputs)
for i in range(len(res)):
if isinstance(res[i], torch.Tensor):
res[i] = clone_input(res[i])
return res
@contextmanager
def preserve_rng_state():
with torch.utils._python_dispatch._disable_current_modes():
rng_state = torch.clone(torch.random.get_rng_state())
if torch.cuda.is_available():
cuda_rng_state = torch.clone(torch.cuda.get_rng_state())
try:
yield
finally:
with torch.utils._python_dispatch._disable_current_modes():
torch.random.set_rng_state(rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
def is_jit_model(model0):
return isinstance(
model0,
(
torch.jit._trace.TopLevelTracedModule,
torch.jit._script.RecursiveScriptModule,
torch.jit.ScriptFunction,
torch.jit.ScriptModule,
),
)
def torchscript(model, example_inputs, verbose=False):
if is_jit_model(model):
# already done?
return model
try:
return torch.jit.trace(model, example_inputs)
except Exception:
try:
return torch.jit.script(model)
except Exception:
if verbose:
log.exception("jit error")
else:
log.error("Both torch.jit.trace and torch.jit.script failed")
return None
def getfile(obj):
try:
return inspect.getfile(obj)
except TypeError:
return None
def is_namedtuple(obj):
"""Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple"""
return is_namedtuple_cls(type(obj))
def is_namedtuple_cls(cls):
"""Test if an object is a namedtuple or a torch.return_types.* quasi-namedtuple"""
try:
if issubclass(cls, tuple):
bases = getattr(cls, "__bases__", []) or [None]
module = getattr(cls, "__module__", None)
return module == "torch.return_types" or (
bases[0] is tuple and hasattr(cls, "_make") and hasattr(cls, "_fields")
)
except TypeError:
pass
return False
@functools.lru_cache(1)
def namedtuple_fields(cls):
"""Get the fields of a namedtuple or a torch.return_types.* quasi-namedtuple"""
if cls is slice:
return ["start", "stop", "step"]
assert issubclass(cls, tuple)
if hasattr(cls, "_fields"):
# normal namedtuples
return cls._fields
@dataclasses.dataclass
class Marker:
index: int
# frustrating ones e.g. torch.return_types.max
assert cls.__module__ == "torch.return_types"
obj = cls(map(Marker, range(cls.n_fields)))
fields = [None] * cls.n_fields
for name in dir(obj):
if name[0] != "_" and isinstance(getattr(obj, name), Marker):
fields[getattr(obj, name).index] = name
return fields
def checkpoint_params(gm):
with torch.no_grad():
rng_state = torch.clone(torch.random.get_rng_state())
if torch.cuda.is_available():
cuda_rng_state = torch.clone(torch.cuda.get_rng_state())
saved_state = []
for param in itertools.chain(gm.parameters(), gm.buffers()):
saved_state.append((param, param._version, torch.clone(param)))
def restore():
with torch.no_grad():
torch.random.set_rng_state(rng_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
for param, version, original_value in saved_state:
if param._version != version:
param.copy_(original_value)
return restore
def timed(model, example_inputs, times=1):
if torch.cuda.is_available():
synchronize = torch.cuda.synchronize
else:
synchronize = nothing
synchronize()
gc.collect()
torch.manual_seed(1337)
t0 = time.perf_counter()
for _ in range(times):
result = model(*example_inputs)
synchronize()
t1 = time.perf_counter()
return result, t1 - t0
def check_is_cuda(gm, example_inputs):
return all(x.is_cuda for x in itertools.chain(example_inputs, gm.parameters(True)))
@lru_cache(32)
def rot_n_helper(n):
assert n > 1
vars = [f"v{i}" for i in range(n)]
rotated = reversed(vars[-1:] + vars[:-1])
fn = eval(f"lambda {','.join(vars)}: ({','.join(rotated)})")
fn.__name__ = f"rot_{n}_helper"
return fn
def is_safe_constant(v):
if istype(v, (tuple, frozenset)):
return all(map(is_safe_constant, v))
return isinstance(v, (enum.Enum, type)) or istype(
v,
(
types.CodeType,
int,
float,
bool,
str,
bytes,
type(None),
slice,
type(type),
torch.device,
torch.dtype,
),
)
def guard_if_dyn(arg):
from .variables import ConstantVariable, SymNodeVariable
if isinstance(arg, SymNodeVariable):
# This is because SymNodeVariable intentionally doesn't define
# as_python_constant to avoid shunting down some codepaths
# that expect consts. In this case, we know we definitely
# want to specialize though.
return arg.evaluate_expr()
elif isinstance(arg, ConstantVariable):
return arg.as_python_constant()
return arg
def check_constant_args(args, kwargs):
return all(x.is_python_constant() for x in itertools.chain(args, kwargs.values()))
def check_unspec_python_args(args, kwargs):
from torch._dynamo.variables.constant import ConstantVariable
from torch._dynamo.variables.tensor import UnspecializedPythonVariable
unspec_count = 0
for x in itertools.chain(args, kwargs.values()):
if isinstance(x, UnspecializedPythonVariable):
unspec_count += 1
elif not isinstance(x, (UnspecializedPythonVariable, ConstantVariable)):
return False
else:
pass
return unspec_count > 0
def check_numpy_ndarray_args(args, kwargs):
from torch._dynamo.variables.tensor import NumpyNdarrayVariable
return any(
isinstance(x, NumpyNdarrayVariable)
for x in itertools.chain(args, kwargs.values())
)
def specialize_args_kwargs(tx, args, kwargs):
specialized_args = []
specialized_kwargs = {}
for x in args:
specialized_args.append(x.as_specialized(tx))
for k, v in kwargs.items():
specialized_kwargs.update({k: v.as_specialized(tx)})
return specialized_args, specialized_kwargs
dict_values = type(dict().values())
odict_values = type(collections.OrderedDict().values())
tuple_iterator = type(iter(tuple()))
tuple_iterator_len = tuple_iterator.__length_hint__
object_new = object.__new__
def nn_module_new(cls):
obj = object_new(cls)
torch.nn.Module.__init__(obj)
return obj
def product(it):
return functools.reduce(operator.mul, it, 1)
def tuple_iterator_getitem(it, index):
_, (obj,), start = it.__reduce__()
return obj[start + index]
def enum_repr(value, local):
# enum class can override __str__ method. Use __class__ and name attribute
# to extract the class name and key name.
name = value.__class__.__name__
val = value.name
scope = "L" if local else "G"
local_name = f'{scope}["{name}"].{val}'
return local_name
def dict_param_key_ids(value):
return {
id(k) for k in value.keys() if isinstance(k, (torch.nn.Parameter, torch.Tensor))
}
def dict_const_keys(value):
return {
k for k in value.keys() if not isinstance(k, (torch.nn.Parameter, torch.Tensor))
}
def dict_const_keys_repr(const_keys, *, local):
if any(isinstance(k, enum.Enum) for k in const_keys):
# To workaround repr(Enum) returning invalid global reference before python 3.11
# by calling enum_repr and removing quotes to render enum in guard code.
const_keys_str = f"{ {enum_repr(k, local=local) if isinstance(k, enum.Enum) else repr(k) for k in const_keys} }".replace(
"'", ""
)
else:
const_keys_str = f"{const_keys!r}"
return const_keys_str
def global_key_name(key):
return f"__dict_key_{id(key)}"
from torch._subclasses import ( # noqa: F401
FakeTensorMode,
UnsupportedFakeTensorException,
)
def wrap_fake_exception(fn):
try:
return fn()
except UnsupportedFakeTensorException as e:
from .exc import unimplemented
msg = f"Unsupported: {e.reason} with fake tensor propagation."
log.warning(msg)
raise unimplemented(msg) from e
def deepcopy_to_fake_tensor(obj, fake_mode):
with torch._subclasses.fake_tensor.FakeCopyMode(fake_mode):
return wrap_fake_exception(lambda: copy.deepcopy(obj))
def rmse(ref, res):
"""
Calculate root mean squared error
"""
return torch.sqrt(torch.mean(torch.square(ref - res)))
def same(
ref,
res,
fp64_ref=None,
cos_similarity=False,
tol=1e-4,
equal_nan=False,
exact_dtype=True,
relax_numpy_equality=False,
ignore_non_fp=False,
log_error=log.error,
):
"""Check correctness to see if ref and res match"""
if fp64_ref is None:
fp64_ref = ref
if isinstance(ref, (list, tuple, torch.nn.ParameterList, torch.Size)):
assert isinstance(res, (list, tuple)), f"type mismatch {type(ref)} {type(res)}"
if len(ref) != len(res):
log_error("Length mismatch")
return False
return len(ref) == len(res) and all(
same(
ai,
bi,
fp64_refi,
cos_similarity,
tol,
equal_nan,
exact_dtype,
relax_numpy_equality,
ignore_non_fp,
log_error=log_error,
)
for ai, bi, fp64_refi in zip(ref, res, fp64_ref)
)
elif isinstance(ref, dict):
assert isinstance(res, dict)
assert set(ref.keys()) == set(
res.keys()
), f"keys mismatch {set(ref.keys())} == {set(res.keys())}"
for k in sorted(ref.keys()):
if not (
same(
ref[k],
res[k],
fp64_ref[k],
cos_similarity=cos_similarity,
tol=tol,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
relax_numpy_equality=relax_numpy_equality,
ignore_non_fp=ignore_non_fp,
log_error=log_error,
)
):
log_error("Accuracy failed for key name %s", k)
return False
return True
elif isinstance(ref, torch.Tensor):
assert not isinstance(ref, torch._subclasses.FakeTensor)
assert not isinstance(res, torch._subclasses.FakeTensor)
if ref.is_sparse:
assert res.is_sparse
ref = ref.to_dense()
res = res.to_dense()
assert isinstance(res, torch.Tensor), f"type mismatch {type(ref)} {type(res)}"
if exact_dtype:
if ref.dtype != res.dtype:
log_error("dtype mismatch %s, %s", ref.dtype, res.dtype)
return False
if ref.dtype == torch.bool:
if ignore_non_fp:
return True
# triton stores bool as int8, so add this for more accurate checking
r = torch.allclose(
ref.to(dtype=torch.uint8),
res.to(dtype=torch.uint8),
atol=tol,
rtol=tol,
equal_nan=equal_nan,
)
if not r:
log_error("Accuracy failed: uint8 tensor did not match")
return r
if cos_similarity:
ref = ref.flatten().to(torch.float32)
res = res.flatten().to(torch.float32)
if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=True):
# early exit that handles zero/nan better
# cosine_similarity(zeros(10), zeros(10), dim=0) is 0
return True
score = torch.nn.functional.cosine_similarity(ref, res, dim=0, eps=1e-6)
if score < 0.99:
log.warning("Similarity score=%s", score.cpu().detach().item())
return score >= 0.99
else:
if not exact_dtype:
ref = ref.to(res.dtype)
# First try usual allclose
if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=equal_nan):
return True
# Check error from fp64 version
if fp64_ref.dtype == torch.float64:
ref_error = rmse(fp64_ref, ref).item()
res_error = rmse(fp64_ref, res).item()
multiplier = 2.0
if (
fp64_ref.numel() < 1000
or (ref.ndim == 4 and ref.shape[-1] == ref.shape[-2] == 1)
# large tol means a benchmark has been specified as REQUIRE_HIGHER_TOLERANCE
or tol >= 2 * 1e-2
):
# In the presence of noise, noise might dominate our error
# metric for smaller tensors.
# Similary, for 1x1 kernels, there seems to be high noise with amp.
multiplier = 3.0
passes_test = res_error <= (multiplier * ref_error + tol / 10.0)
if not passes_test:
log_error(
"RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s",
res_error,
ref_error,
res.size(),
)
# import pdb; pdb.set_trace()
return passes_test
if ignore_non_fp:
return True
log_error("Accuracy failed: allclose not within tol=%s", tol)
return False
elif isinstance(ref, (str, int, type(None), bool, torch.device)):
if ignore_non_fp:
return True
r = ref == res
if not r:
log_error("Accuracy failed (%s): %s != %s", type(ref), ref, res)
return r
elif isinstance(ref, float):
r = math.isclose(ref, res, rel_tol=tol, abs_tol=tol)
if not r:
log_error(
"Accuracy failed (float): %s != %s (within tol=%s)", ref, res, tol
)
return r
elif is_numpy_int_type(ref) or is_numpy_float_type(ref):
if relax_numpy_equality and not (
is_numpy_int_type(res) or is_numpy_float_type(res)
):
ref = ref.item()
r = (type(ref) is type(res)) and (ref == res)
if not r:
log_error("Accuracy failed (numpy): %s != %s", ref, res)
return r
elif is_numpy_ndarray(ref):
return (type(ref) is type(res)) and same(
torch.as_tensor(ref),
torch.as_tensor(res),
fp64_ref,
cos_similarity=cos_similarity,
tol=tol,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
relax_numpy_equality=relax_numpy_equality,
ignore_non_fp=ignore_non_fp,
log_error=log_error,
)
elif type(ref).__name__ in (
"MaskedLMOutput",
"Seq2SeqLMOutput",
"CausalLMOutputWithCrossAttentions",
"LongformerMaskedLMOutput",
"Instances",
"SquashedNormal",
"Boxes",
"Normal",
"TanhTransform",
"Foo",
"Variable",
):
assert type(ref) is type(res)
return all(
same(
getattr(ref, key),
getattr(res, key),
getattr(fp64_ref, key),
cos_similarity=cos_similarity,
tol=tol,
equal_nan=equal_nan,
exact_dtype=exact_dtype,
relax_numpy_equality=relax_numpy_equality,
ignore_non_fp=ignore_non_fp,
log_error=log_error,
)
for key in ref.__dict__.keys()
)
else:
raise RuntimeError(f"unsupported type: {type(ref).__name__}")
def format_func_info(code):
short_filename = code.co_filename.split("/")[-1]
return f"'{code.co_name}' ({short_filename}:{code.co_firstlineno})"
@contextlib.contextmanager
def disable_cache_limit():
prior = config.cache_size_limit
config.cache_size_limit = sys.maxsize
try:
yield
finally:
config.cache_size_limit = prior
# map from transformed code back to original user code
orig_code_map = ExactWeakKeyDictionary()
# keep a record of code_obj -> list of guard failure reasons for logging
guard_failures = collections.defaultdict(list)
# Keep a record of graph break reasons for logging
graph_break_reasons = list()
# keep record of compiled code, if we are in "error if recompile"
# to track code that dynamo has compiled previously
seen_code_map = ExactWeakKeyDictionary()
class CompileProfiler:
"""Utility for profiling how and what dynamo would compile.
Can be used for
* diagnosing recompilation issues
* determining an appropriate compile cache limit
* (TODO)confirming which functions got compiled/skipped
"""
def __init__(self):
self.frame_count = 0
self.op_count = 0
self.backend_ctx_ctor = lambda: disable_cache_limit()
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
self.frame_count += 1
for node in gm.graph.nodes:
if "call" in node.op:
self.op_count += 1
return gm.forward
def __enter__(self):
self.old_report_guard_failure = config.report_guard_failures
config.report_guard_failures = True
return self
def __exit__(self, typ, val, traceback):
config.report_guard_failures = self.old_report_guard_failure
def get_metrics(self):
return {"guard_failures": guard_failures}
def report(self):
metrics = self.get_metrics()
gf = metrics["guard_failures"]
def num_recompiles(code):
return len(gf[code])
def recompile_reasons(code):
return "\n".join([str(x) for x in gf[code]])
summarized_gf = [
[format_func_info(code), num_recompiles(code), recompile_reasons(code)]
for code in gf
]
def graph_break_report():
if "graph_break" in counters:
graph_breaks = counters["graph_break"]
return tabulate(
[[msg, graph_breaks[msg]] for msg in graph_breaks],
headers=["Graph Break Reason", "Count"],
)
def recompilation_report():
if len(gf):
max_recompiles = max([num_recompiles(code) for code in gf])
recomp_table = tabulate(
summarized_gf,
headers=["Function", "Recompiles", "Recompile Reasons"],
)
return recomp_table + textwrap.dedent(
f"""
Set torch._dynamo.config.cache_size_limit to {max_recompiles} to avoid being cache limited.
"""
)
report = textwrap.dedent(
"""
Torchdynamo Profiler Report
===========================
Graph Breaks
------------
Graph breaks happen when torchdynamo encounters code it can't safely trace.
If you want to find out why breaks are happening, check below for each break reason
You may gain additional insight by passing `fullgraph=True` to torch.compile,
to stop at the first break.
"""
)
report += graph_break_report() or "No graph breaks detected."
report += textwrap.dedent(
"""
Recompilation
-------------
These subgraphs were recompiled more than once due to guard failures
Guard failures indicate some condition assumed to be static by the tracer changed,
making it unsafe to reuse the compiled program.
"""
)
report += recompilation_report() or "No recompilation detected.\n"
return report
# return same dir unless user changes config between calls
@functools.lru_cache(None)
def _get_debug_dir(root_dir):
dir_name = (
"run_"
+ datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
# use pid to avoid conflicts among ranks
+ "-pid_"
+ str(os.getpid())
)
return os.path.join(root_dir, dir_name)
def get_debug_dir():
debug_root = config.debug_dir_root
return _get_debug_dir(debug_root)
def get_fake_value(node, tx):
"""
Run the computation represented by `node` using fake tensors and return the result.
"""
from .exc import (
TorchRuntimeError,
unimplemented,
Unsupported,
UserError,
UserErrorType,
)
op = node.op
def fake_wrapper(e):
if isinstance(e, torch.Tensor):
assert is_fake(e)
return e
def visit(n: torch.fx.Node):
return n.meta["example_value"]
args, kwargs = torch.fx.node.map_arg((node.args, node.kwargs), visit)
args = tree_map(fake_wrapper, args)
kwargs = tree_map(fake_wrapper, kwargs)
nnmodule = None
if op == "call_method" and len(args) > 0 and isinstance(args[0], torch.nn.Module):
# If the first argument is nn.Module, should copy to fake mode.
args = (deepcopy_to_fake_tensor(args[0], tx.fake_mode),) + tuple(args[1:])
if op == "call_module":
nnmodule = tx.output.nn_modules[node.target]
if is_lazy_module(nnmodule) and hasattr(nnmodule, "_initialize_hook"):
# In the case of a lazy module, we want to run
# the pre-hooks which initialize it.
# Afterwards, lazy module deletes its pre-hooks
# to avoid treating it as lazy on subsequent recompile.
nnmodule._infer_parameters(nnmodule, args)
# no matter it's lazy module or not, we should copy to fake mode.
nnmodule = deepcopy_to_fake_tensor(nnmodule, tx.fake_mode)
try:
with tx.fake_mode, enable_python_dispatcher():
return wrap_fake_exception(
lambda: run_node(tx.output, node, args, kwargs, nnmodule)
)
except Unsupported:
raise
except RuntimeError as e:
cause = e
if e.__cause__ is not None:
cause = e.__cause__
if isinstance(
cause, torch._subclasses.fake_tensor.DataDependentOutputException
):
unimplemented(f"data dependent operator: {cause.func}")
elif isinstance(
cause, torch._subclasses.fake_tensor.DynamicOutputShapeException
):
unimplemented(f"dynamic shape operator: {cause.func}")
elif isinstance(
cause, torch._subclasses.fake_tensor.UnsupportedOperatorException
):
unimplemented(
f"unsupported operator: {cause.func} (see "
"https://docs.google.com/document/d/1GgvOe7C8_NVOMLOCwDaYV1mXXyHMXY7ExoewHqooxrs/edit#heading=h.64r4npvq0w0"
" for how to fix)"
)
elif isinstance(
cause, torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode
):
unimplemented("guard on data-dependent symbolic int/float")
elif isinstance(cause, torch.utils._sympy.value_ranges.ValueRangeError):
raise UserError(UserErrorType.CONSTRAIN_VIOLATION, e.args[0]) from e
raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
def run_node(tracer, node, args, kwargs, nnmodule):
"""
Runs a given node, with the given args and kwargs.
Behavior is dicatated by a node's op.
run_node is useful for extracting real values out of nodes.
See get_real_value for more info on common usage.
Note: The tracer arg is only used for 'get_attr' ops
Note: The nnmodule arg is only used for 'call_module' ops
Nodes that are not call_function, call_method, call_module, or get_attr will
raise an AssertionError.
"""
op = node.op
try:
if op == "call_function":
return node.target(*args, **kwargs)
elif op == "call_method":
return getattr(args[0], node.target)(*args[1:], **kwargs)
elif op == "call_module":
assert nnmodule is not None
return nnmodule(*args, **kwargs)
elif op == "get_attr":
return tracer.get_submodule(node.target)
elif op == "placeholder":
assert "example_value" in node.meta
return node.meta["example_value"]
except Exception as e:
fn_str = f"Failed running {op} {node.target}(*{args}, **{kwargs}):\n"
raise RuntimeError(fn_str + str(e)).with_traceback(e.__traceback__) from e
raise AssertionError(op)
def get_real_value(node, tracer):
"""
Run the actual computation represented by `node` and return the result.
This will execute any dependent nodes in the graph as well.
"""
from .exc import TorchRuntimeError
cache = tracer.real_value_cache
if node in cache:
return cache[node]
op = node.op
args, kwargs = torch.fx.node.map_arg(
(node.args, node.kwargs),
lambda n: get_real_value(n, tracer),
)
if op == "call_module":
nn_module = tracer.output_graph.nn_modules[node.target]
if not is_lazy_module(nn_module):
nn_module = copy.deepcopy(nn_module)
else:
# In the case of a lazy module, we want to run
# the pre-hooks which initialize it
nn_module(*args, **kwargs)
else:
nn_module = None
try:
real_value = run_node(tracer, node, args, kwargs, nn_module)
cache[node] = real_value
except RuntimeError as e:
raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None
return real_value
def assert_no_fake_params_or_buffers(gm):
from torch._subclasses.fake_tensor import FakeTensorConfig
def stack_or_hint(t):
if FakeTensorConfig.debug:
import traceback
return f"FAKE TENSOR CREATION TRACEBACK: \n {traceback.format_list(t._debug_trace)}"
else:
return "Enable TORCH_FAKE_TENSOR_DEBUG=1 to get creation stack traces on fake tensors."
for name, buffer in gm.named_buffers():
assert not isinstance(
buffer, torch._subclasses.FakeTensor
), f"Unexpected fake buffer {name} {stack_or_hint(buffer)}"
for name, param in gm.named_parameters():
assert not isinstance(
param, torch._subclasses.FakeTensor
), f"Unexpected fake param {name} {stack_or_hint(param)}"
def fqn(obj: Any):
"""
Returns the fully qualified name of the object.
"""
return f"{obj.__module__}.{obj.__qualname__}"
def ifdynstaticdefault(count1, count2):
if torch._dynamo.config.assume_static_by_default:
return count1
else:
return count2
def import_submodule(mod: types.ModuleType):
"""
Ensure all the files in a given submodule are imported
"""
for filename in sorted(os.listdir(os.path.dirname(mod.__file__))):
if filename.endswith(".py") and filename[0] != "_":
importlib.import_module(f"{mod.__name__}.{filename[:-3]}")
def object_has_getattribute(value: Any):
try:
if isinstance(
inspect.getattr_static(type(value), "__getattribute__"),
types.FunctionType,
):
return True
except AttributeError:
pass
return False
def get_custom_getattr(value: Any):
try:
getattr_fn = inspect.getattr_static(type(value), "__getattr__")
except AttributeError:
getattr_fn = None
if getattr_fn is torch.nn.Module.__getattr__:
# ignore this case of getattr
getattr_fn = None
return getattr_fn
class TensorStaticReason(enum.Enum):
PARAMETER = 2
NOT_TENSOR = 4
NN_MODULE_PROPERTY = 5
def tensor_static_reason_to_message(reason: TensorStaticReason):
if reason == TensorStaticReason.PARAMETER:
return "mark_dynamic on parameter, parameters are always static today."
if reason == TensorStaticReason.NOT_TENSOR:
return "mark_dynamic on a non tensor, how did this happen?"
if reason == TensorStaticReason.NN_MODULE_PROPERTY:
return "tensor is static because it is nn module associated."
raise AssertionError(f"Illegal reason {reason}")
def tensor_always_has_static_shape(
tensor: Union[torch.Tensor, Any], is_tensor: bool, guard_source: "GuardSource"
) -> Tuple[bool, TensorStaticReason]:
"""
Given a tensor, source, and is_tensor flag, determine if a shape should be static.
Args:
tensor - the real tensor to evaluate, parameters force a static shape.
is_tensor - internal dynamo check, esentially "is_tensor": target_cls is TensorVariable,
tensors not in a TensorVariable for whatever reason are forced static.
Returns a tuple, where the first element is the bool of whether or not this tensor should have a static shape.
The second element is a TensorStaticReason, useful for passing to tensor_static_reason_to_message if needed.
"""
if guard_source.is_nn_module() and config.force_nn_module_property_static_shapes:
return True, TensorStaticReason.NN_MODULE_PROPERTY
if type(tensor) is torch.nn.Parameter and config.force_parameter_static_shapes:
return True, TensorStaticReason.PARAMETER
if not is_tensor:
return True, TensorStaticReason.NOT_TENSOR
return False, None
class LazyString:
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.func(*self.args, **self.kwargs)
def lazy_format_graph_code(name, gm, maybe_id=None):
def format_name():
if maybe_id is not None:
return f"{name} {maybe_id}"
else:
return name
return LazyString(
lambda: _format_graph_code(
f"===== {format_name()} =====\n",
gm.forward.__code__.co_filename,
gm.print_readable(print_output=False),
)
)
def _format_graph_code(name, filename, graph_str):
return f"TRACED GRAPH\n {name} {filename} {graph_str}\n"
def lazy_format_graph_tabular(fn_name, gm):
def inner():
try:
from tabulate import tabulate # TODO: Check that this is installed
except ImportError:
return (
"Tabulate module missing, please install tabulate to log the graph in tabular format, logging code instead:\n"
+ str(lazy_format_graph_code(fn_name, gm))
)
node_specs = [
[n.op, n.name, n.target, n.args, n.kwargs] for n in gm.graph.nodes
]
graph_str = tabulate(
node_specs, headers=["opcode", "name", "target", "args", "kwargs"]
)
return _format_graph_code(fn_name, gm.forward.__code__.co_filename, graph_str)
return LazyString(inner)
def format_bytecode(prefix, name, filename, line_no, code):
return f"{prefix} {name} {filename} line {line_no} \n{dis.Bytecode(code).dis()}\n"
forward_hook_names = ["_forward_pre_hooks", "_forward_hooks"]
backward_hook_names = ["_backward_pre_hooks", "_backward_hooks"]
state_dict_hook_names = [
"_state_dict_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_load_state_dict_post_hooks",
]
all_hook_names = forward_hook_names + backward_hook_names + state_dict_hook_names
def nn_module_get_all_hooks(
mod,
check_forward_hooks=False,
check_backward_hooks=False,
check_state_dict_hooks=False,
):
reset_code = torch._C._dynamo.eval_frame.reset_code
"""
Sometimes its useful to differentiate between types of hooks such as forward/backward/pre
hooks executed during module.__call__, and state_dict hooks which are executed separately.
"""
hook_dicts_to_check = []
check_all_hooks = (
not check_forward_hooks
and not check_backward_hooks
and not check_state_dict_hooks
)
if check_forward_hooks or check_all_hooks:
hook_dicts_to_check.extend(forward_hook_names)
if check_backward_hooks or check_all_hooks:
hook_dicts_to_check.extend(backward_hook_names)
if check_state_dict_hooks:
hook_dicts_to_check.extend(state_dict_hook_names)
all_hooks = []
for hook_dict_name in hook_dicts_to_check:
hooks = getattr(mod, hook_dict_name, [])
for hook_name in hooks:
hook = hooks[hook_name]
all_hooks.append(hook)
return all_hooks
def nnmodule_has_hooks(
mod,
check_forward_hooks=False,
check_backward_hooks=False,
check_state_dict_hooks=False,
):
"""
Helper function to check if a module has any hooks attached to it.
"""
hooks = nn_module_get_all_hooks(
mod,
check_forward_hooks=check_forward_hooks,
check_backward_hooks=check_backward_hooks,
check_state_dict_hooks=check_state_dict_hooks,
)
return bool(hooks)
def to_numpy_helper(value):
"""Convert tensor and tnp.ndarray to numpy.ndarray."""
if isinstance(value, tnp.ndarray):
return to_numpy_helper(value.tensor)
elif isinstance(value, torch.Tensor):
return value.cpu().numpy()
elif isinstance(value, (tuple, list)):
return type(value)(to_numpy_helper(obj) for obj in value)
else:
return value
def numpy_to_tensor(value):
"""Convert tnp.ndarray to tensor, leave other types intact. If a list/tuple, loop through it to convert."""
if isinstance(value, np.ndarray):
return torch.as_tensor(value)
if isinstance(value, tnp.ndarray):
return value.tensor
elif isinstance(value, (tuple, list)):
return type(value)(numpy_to_tensor(obj) for obj in value)
else:
return value
class numpy_to_tensor_wrapper:
def __init__(self, f):
self.f = f
self.__name__ = "wrapped_" + self.f.__name__
def __repr__(self):
return f"<Wrapped function <original {self.f.__name__}>>"
def __call__(self, *args, **kwargs):
out = self.f(*args, **kwargs)
return numpy_to_tensor(out)
def numpy_attr_wrapper(obj, name):
if isinstance(obj, tnp.ndarray):
out = getattr(obj, name)
return numpy_to_tensor(out)
elif isinstance(obj, torch.Tensor):
out = getattr(tnp.ndarray(obj), name)
return numpy_to_tensor(out)
class numpy_method_wrapper:
"""Convert obj from torch.Tensor to tnp.ndarray and call method. Then convert result back to torch.Tensor."""
def __init__(self, method: str):
self.method = method
self.__name__ = "wrapped_" + self.method
def __repr__(self):
return f"<Wrapped method <original {self.method}>>"
def __call__(self, *args, **kwargs):
obj = args[0]
if isinstance(obj, torch.Tensor):
obj = tnp.ndarray(obj)
method_callable = getattr(obj, self.method)
out = method_callable(*args[1:], **kwargs)
return numpy_to_tensor(out)
def defake(x):
if not isinstance(x, FakeTensor):
return x
if x._has_symbolic_sizes_strides:
size = [
s.node.shape_env.size_hint(s.node.expr)
if isinstance(s, torch.SymInt)
else s
for s in x.size()
]
stride = [
s.node.shape_env.size_hint(s.node.expr)
if isinstance(s, torch.SymInt)
else s
for s in x.stride()
]
else:
size = x.size()
stride = x.stride()
y = torch.empty_strided(
size,
stride,
dtype=x.dtype,
device=x.device,
requires_grad=x.requires_grad,
)
y.zero_()
return y
def is_utils_checkpoint(obj):
# Lazy import to avoid circular dependenices
import torch.utils.checkpoint
return obj is torch.utils.checkpoint.checkpoint
def build_checkpoint_variable(**options):
import torch._higher_order_ops.wrap as higher_order_ops
from .variables.higher_order_ops import TorchHigherOrderOperatorVariable
# TODO - This is a temporary sitaution where we have two versions of
# checkpointing implemetation. We will converge on one and remove the other.
activation_checkpoint_op = higher_order_ops.tag_activation_checkpoint
if torch._functorch.config.functionalize_rng_ops:
activation_checkpoint_op = higher_order_ops.wrap_activation_checkpoint
return TorchHigherOrderOperatorVariable.make(
activation_checkpoint_op,
**options,
)
def is_compile_supported(device_type):
from .eval_frame import is_dynamo_supported
compile_supported = is_dynamo_supported()
if device_type == "cpu":
pass
elif device_type == "cuda" and compile_supported:
from torch._inductor.utils import has_triton
compile_supported = has_triton()
else:
compile_supported = False
return compile_supported
# The following 3.11 source code functions are adapted from
# https://github.com/python/cpython/blob/v3.11.4/Lib/traceback.py
# in order to output source code corresponding to bytecode in 3.11+.
# We need our own versions since we want to support multiline expressions.
def _fix_offset(str: str, offset: int) -> int:
"""
Convert byte offset `offset` of `str` into character offset.
Byte offset is used for 3.11+ instruction column data.
Takes things like unicode characters into consideration.
Unchanged from CPython implementation.
"""
as_utf8 = str.encode("utf-8")
return len(as_utf8[:offset].decode("utf-8", errors="replace"))
@dataclasses.dataclass
class _Anchors:
# inclusive
left_end_lineno: int
left_end_offset: int
right_start_lineno: int
# exclusive
right_start_offset: int
def _extract_anchors_from_expr(segment: str) -> Optional[_Anchors]:
"""
Given source code `segment` corresponding to a bytecode
instruction, determine:
- for binary ops, the location of the binary op
- for indexing, the location of the brackets.
`segment` is expected to be a valid Python expression
"""
assert sys.version_info >= (3, 11)
import ast
try:
# Without brackets, `segment` is parsed as a statement.
# We expect an expression, so wrap `segment` in
# brackets to handle multi-line expressions.
tree = ast.parse("(\n" + segment + "\n)")
except SyntaxError:
return None
if len(tree.body) != 1:
return None
lines = segment.split("\n")
# get character index given byte offset
def normalize(lineno, offset):
return _fix_offset(lines[lineno], offset)
# Gets the next valid character index in `lines`, if
# the current location is not valid. Handles empty lines.
def next_valid_char(lineno, col):
while lineno < len(lines) and col >= len(lines[lineno]):
col = 0
lineno += 1
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
# Get the next valid character index in `lines`.
def increment(lineno, col):
col += 1
lineno, col = next_valid_char(lineno, col)
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
# Get the next valid character at least on the next line
def nextline(lineno, col):
col = 0
lineno += 1
lineno, col = next_valid_char(lineno, col)
assert lineno < len(lines) and col < len(lines[lineno])
return lineno, col
statement = tree.body[0]
if isinstance(statement, ast.Expr):
expr = statement.value
if isinstance(expr, ast.BinOp):
# ast gives locations for BinOp subexpressions, e.g.
# ( left_expr ) + ( right_expr )
# left^^^^^ right^^^^^
# -2 since end_lineno is 1-indexed and because we added an extra
# bracket to `segment` when calling ast.parse
cur_lineno = expr.left.end_lineno - 2
cur_col = normalize(cur_lineno, expr.left.end_col_offset)
cur_lineno, cur_col = next_valid_char(cur_lineno, cur_col)
# Heuristic to find the operator character.
# The original CPython implementation did not look for ), \, or #,
# leading to incorrect anchor location, e.g.
# (x) + (y)
# ~~^~~~~~~
while (ch := lines[cur_lineno][cur_col]).isspace() or ch in ")\\#":
if ch in "\\#":
cur_lineno, cur_col = nextline(cur_lineno, cur_col)
else:
cur_lineno, cur_col = increment(cur_lineno, cur_col)
# binary op is 1 or 2 characters long, on the same line
right_col = cur_col + 1
if (
right_col < len(lines[cur_lineno])
and not (ch := lines[cur_lineno][right_col]).isspace()
and ch not in "\\#"
):
right_col += 1
# right_col can be invalid since it is exclusive
return _Anchors(cur_lineno, cur_col, cur_lineno, right_col)
elif isinstance(expr, ast.Subscript):
# ast gives locations for value and slice subexpressions, e.g.
# ( value_expr ) [ slice_expr ]
# value^^^^^ slice^^^^^
# subscript^^^^^^^^^^^^^^^^^^^^
# find left bracket (first '[' after value)
left_lineno = expr.value.end_lineno - 2
left_col = normalize(left_lineno, expr.value.end_col_offset)
left_lineno, left_col = next_valid_char(left_lineno, left_col)
while lines[left_lineno][left_col] != "[":
left_lineno, left_col = increment(left_lineno, left_col)
# find right bracket (final character of expression)
right_lineno = expr.end_lineno - 2
right_col = normalize(right_lineno, expr.end_col_offset)
return _Anchors(left_lineno, left_col, right_lineno, right_col)
elif isinstance(expr, ast.Call):
# ( func_expr ) (args, kwargs)
# func^^^^^
# call^^^^^^^^^^^^^^^^^^^^^^^^
# find left bracket (first '(' after func)
left_lineno = expr.func.end_lineno - 2
left_col = normalize(left_lineno, expr.func.end_col_offset)
left_lineno, left_col = next_valid_char(left_lineno, left_col)
while lines[left_lineno][left_col] != "(":
left_lineno, left_col = increment(left_lineno, left_col)
# find right bracket (final character of expression)
right_lineno = expr.end_lineno - 2
right_col = normalize(right_lineno, expr.end_col_offset)
return _Anchors(left_lineno, left_col, right_lineno, right_col)
return None
def get_instruction_source_311(code: types.CodeType, inst: dis.Instruction) -> str:
"""
Python 3.11+ only. Returns lines of source code (from code object `code`)
corresponding to `inst`'s location data, and underlines relevant code to `inst`.
Example: CALL on `g`:
f(g(
^^
h(x)))
^^^^^
We need our own implementation since `format_frame_summary` in
Python's `traceback` module doesn't handle multi-line expressions
(and their anchor extraction code is not completely correct).
"""
if inst.positions.lineno is None:
return ""
# The rstrip + "\n" pattern is used throughout this function to handle
# linecache.getline errors. Error lines are treated as empty strings "", but we want
# to treat them as blank lines "\n".
first_line = linecache.getline(code.co_filename, inst.positions.lineno).rstrip()
if inst.positions.end_lineno is None:
return first_line
if inst.positions.col_offset is None or inst.positions.end_col_offset is None:
return first_line
# character index of the start of the instruction
start_offset = _fix_offset(first_line, inst.positions.col_offset)
# character index of the end of the instruction
# compute later since end may be a different line
end_offset = None
# expression corresponding to the instruction so we can get anchors
segment = ""
# underline markers to be printed - start with `~` marker and replace with `^` later
markers = []
# Compute segment and initial markers
if inst.positions.end_lineno == inst.positions.lineno:
end_offset = _fix_offset(first_line, inst.positions.end_col_offset)
segment = first_line[start_offset:end_offset]
markers.append(" " * start_offset + "~" * (end_offset - start_offset))
else:
segment = first_line[start_offset:] + "\n"
markers.append(" " * start_offset + "~" * (len(first_line) - start_offset))
last_line = linecache.getline(
code.co_filename, inst.positions.end_lineno
).rstrip()
end_offset = _fix_offset(last_line, inst.positions.end_col_offset)
for lineno in range(inst.positions.lineno + 1, inst.positions.end_lineno):
line = linecache.getline(code.co_filename, lineno).rstrip()
segment += line + "\n"
# don't underline leading spaces
num_spaces = len(line) - len(line.lstrip())
markers.append(" " * num_spaces + "~" * (len(line) - num_spaces))
segment += last_line[:end_offset]
num_spaces = len(last_line) - len(last_line.lstrip())
markers.append(" " * num_spaces + "~" * (end_offset - num_spaces))
anchors: Optional[_Anchors] = None
try:
anchors = _extract_anchors_from_expr(segment)
except AssertionError:
pass
# replace `~` markers with `^` where necessary
if anchors is None:
markers = [marker.replace("~", "^") for marker in markers]
else:
# make markers mutable
markers = [list(marker) for marker in markers]
# anchor positions do not take start_offset into account
if anchors.left_end_lineno == 0:
anchors.left_end_offset += start_offset
if anchors.right_start_lineno == 0:
anchors.right_start_offset += start_offset
# Turn `~`` markers between anchors to `^`
for line in range(len(markers)):
for col in range(len(markers[line])):
if line < anchors.left_end_lineno:
continue
if line == anchors.left_end_lineno and col < anchors.left_end_offset:
continue
if (
line == anchors.right_start_lineno
and col >= anchors.right_start_offset
):
continue
if line > anchors.right_start_lineno:
continue
if markers[line][col] == "~":
markers[line][col] = "^"
# make markers into strings again
markers = ["".join(marker) for marker in markers]
result = ""
for i in range(len(markers)):
result += (
linecache.getline(code.co_filename, inst.positions.lineno + i).rstrip()
+ "\n"
)
result += markers[i] + "\n"
return result
def is_guard_failure_reporting_enabled():
return (
config.report_guard_failures
or torch._logging._internal.log_state.is_artifact_enabled("recompiles")
)
def get_static_address_type(t):
if isinstance(t, torch.Tensor):
return getattr(t, "_dynamo_static_input_type", None)
return None
|
import contextlib
import dis
import functools
import logging
import os.path
import re
import sys
import types
import unittest
from typing import Sequence, Union
from unittest.mock import patch
import torch
from torch import fx
from torch._dynamo.output_graph import OutputGraph
from torch._dynamo import config, eval_frame, optimize_assert, reset
from torch._dynamo.bytecode_transformation import (
create_instruction,
debug_checks,
is_generator,
transform_code_object,
)
from torch._dynamo.guards import CheckFunctionManager, GuardedCode
from .utils import same
unsupported = eval_frame.unsupported
three = 3
log = logging.getLogger(__name__)
def clone_me(x):
if x is None:
return None
return x.detach().clone().requires_grad_(x.requires_grad)
def skip_if_pytest(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
if "PYTEST_CURRENT_TEST" in os.environ:
raise unittest.SkipTest("does not work under pytest")
return fn(*args, **kwargs)
return wrapped
def named_parameters_for_optimized_module(mod):
assert isinstance(mod, eval_frame.OptimizedModule)
return mod._orig_mod.named_parameters
def named_buffers_for_optimized_module(mod):
assert isinstance(mod, eval_frame.OptimizedModule)
return mod._orig_mod.named_buffers
def remove_optimized_module_prefix(name):
return re.sub(r"^_orig_mod[.]", "", name)
def collect_results(model, prediction, loss, example_inputs):
results = []
results.append(prediction)
results.append(loss)
# if isinstance(loss, torch.Tensor) and loss.item() > 1:
# log.warning(
# f"High loss value alert - {loss:.2f}. Can result in unstable gradients."
# )
grads = dict()
params = dict()
for name, param in model.named_parameters():
if isinstance(model, eval_frame.OptimizedModule):
name = remove_optimized_module_prefix(name)
param_copy = param
grad = param.grad
# Treat None and zero grad as same
if param.grad is None:
grad = torch.zeros_like(param)
grads[name + ".grad"] = grad
params[name] = param_copy
results.append(grads)
results.append(params)
buffers = dict()
for name, buffer in model.named_buffers():
if isinstance(model, eval_frame.OptimizedModule):
name = remove_optimized_module_prefix(name)
buffers[name] = buffer
results.append(buffers)
for example in example_inputs:
if isinstance(example, (tuple, list)):
for inp in example:
if isinstance(inp, torch.Tensor):
results.append(inp.grad)
else:
if isinstance(example, torch.Tensor):
results.append(example.grad)
return results
def requires_bwd_pass(out):
if isinstance(out, torch.Tensor):
return out.requires_grad
elif isinstance(out, (list, tuple)):
return any(requires_bwd_pass(x) for x in out)
elif out is None:
return False
elif isinstance(out, int):
return False
raise NotImplementedError("Don't know how to reduce", type(out))
def reduce_to_scalar_loss(out):
"""Reduce the output of a model to get scalar loss"""
if isinstance(out, torch.Tensor):
# Mean does not work on integer tensors
return out.sum() / out.numel()
elif isinstance(out, (list, tuple)):
return sum([reduce_to_scalar_loss(x) for x in out]) / len(out)
elif type(out).__name__ in (
"MaskedLMOutput",
"Seq2SeqLMOutput",
"CausalLMOutputWithCrossAttentions",
):
return reduce_to_scalar_loss(out.logits)
elif type(out).__name__ == "SquashedNormal":
return out.mean.sum()
elif isinstance(out, dict):
return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len(
out.keys()
)
raise NotImplementedError("Don't know how to reduce", type(out))
def debug_dir():
path = os.path.join(os.path.dirname(__file__), "../debug")
if not os.path.exists(path):
os.mkdir(path)
return path
def debug_dump(name, code: types.CodeType, extra=""):
with open(os.path.join(debug_dir(), name), "w") as fd:
fd.write(
f"{dis.Bytecode(code).info()}\n\n{dis.Bytecode(code).dis()}\n\n{extra}\n"
)
def debug_insert_nops(frame, cache_size, hooks, _):
"""used to debug jump updates"""
def insert_nops(instructions, code_options):
instructions.insert(0, create_instruction("NOP"))
instructions.insert(0, create_instruction("NOP"))
if is_generator(frame.f_code):
return None
debug_checks(frame.f_code)
code = transform_code_object(frame.f_code, insert_nops)
graph = OutputGraph(
code_options={},
compiler_fn=None,
root_tx=None,
export=False,
export_constraints=None,
frame_state={"_id": 0},
# TODO: shouldn't this be f_locals/f_globals from frame?
local_scope=locals(),
global_scope=globals(),
f_code=frame.f_code,
)
return GuardedCode(code, CheckFunctionManager(graph).check_fn)
class CompileCounter:
def __init__(self):
self.frame_count = 0
self.op_count = 0
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
self.frame_count += 1
for node in gm.graph.nodes:
if "call" in node.op:
self.op_count += 1
return gm.forward
def clear(self):
self.frame_count = 0
self.op_count = 0
class CompileCounterWithBackend:
def __init__(self, backend):
self.frame_count = 0
self.op_count = 0
self.backend = backend
self.graphs = []
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
from .backends.registry import lookup_backend
self.frame_count += 1
for node in gm.graph.nodes:
if "call" in node.op:
self.op_count += 1
self.graphs.append(gm)
return lookup_backend(self.backend)(gm, example_inputs)
# Equivalent to backend="eager", but also records graphs that
# we can assert on
class EagerAndRecordGraphs:
def __init__(self):
self.graphs = []
def __call__(self, gm: torch.fx.GraphModule, example_inputs):
self.graphs.append(gm)
return gm
def strip_comment(code):
code = str(code)
return re.sub(r"(?m)^ *#.*\n?", "", code)
def remove_trailing_space(code):
return "\n".join([line.rstrip() for line in code.split("\n")])
def normalize_gm(gm_str):
# strip comments as comments have path to files which may differ from
# system to system.
return remove_trailing_space(strip_comment(gm_str))
def standard_test(self, fn, nargs, expected_ops=None, expected_ops_dynamic=None):
if not config.assume_static_by_default and expected_ops_dynamic is not None:
expected_ops = expected_ops_dynamic
actual = CompileCounter()
if expected_ops is None:
expected = CompileCounter()
try:
gm = torch.fx.symbolic_trace(fn)
expected(gm)
print("\nfx.symbolic_trace graph:")
gm.graph.print_tabular()
expected_ops = expected.op_count
except Exception:
pass # Silently ignore FX errors (not our issue)
args1 = [torch.randn(10, 10) for _ in range(nargs)]
args2 = [torch.randn(10, 10) for _ in range(nargs)]
correct1 = fn(*args1)
correct2 = fn(*args2)
reset()
opt_fn = optimize_assert(actual)(fn)
val1a = opt_fn(*args1)
val2a = opt_fn(*args2)
val1b = opt_fn(*args1)
val2b = opt_fn(*args2)
reset()
self.assertTrue(same(val1a, correct1))
self.assertTrue(same(val1b, correct1))
self.assertTrue(same(val2a, correct2))
self.assertTrue(same(val2b, correct2))
self.assertEqual(actual.frame_count, 1)
if expected_ops is not None:
self.assertEqual(actual.op_count, expected_ops)
def dummy_fx_compile(gm: fx.GraphModule, example_inputs):
return gm.forward
def format_speedup(speedup, pvalue, is_correct=True, pvalue_threshold=0.1):
if not is_correct:
return "ERROR"
if pvalue > pvalue_threshold:
return f"{speedup:.3f}x SAME"
return f"{speedup:.3f}x p={pvalue:.2f}"
def rand_strided(
size: Sequence[int],
stride: Sequence[int],
dtype: torch.dtype = torch.float32,
device: Union[str, torch.device] = "cpu",
extra_size: int = 0,
):
needed_size = (
sum((shape - 1) * stride for shape, stride in zip(size, stride))
+ 1
+ extra_size
)
if dtype.is_floating_point:
buffer = torch.randn(needed_size, dtype=dtype, device=device)
else:
buffer = torch.zeros(size=[needed_size], dtype=dtype, device=device)
return torch.as_strided(buffer, size, stride)
def _make_fn_with_patches(fn, *patches):
@functools.wraps(fn)
def _fn(*args, **kwargs):
with contextlib.ExitStack() as stack:
for module, attr, val in patches:
stack.enter_context(patch.object(module, attr, val))
return fn(*args, **kwargs)
return _fn
def make_test_cls_with_patches(cls, cls_prefix, fn_suffix, *patches, xfail_prop=None):
class DummyTestClass(cls):
pass
DummyTestClass.__name__ = f"{cls_prefix}{cls.__name__}"
DummyTestClass.__qualname__ = DummyTestClass.__name__
for name in dir(cls):
if name.startswith("test_"):
fn = getattr(cls, name)
if not callable(fn):
continue
new_name = f"{name}{fn_suffix}"
new_fn = _make_fn_with_patches(fn, *patches)
new_fn.__name__ = new_name
if xfail_prop is not None and hasattr(fn, xfail_prop):
new_fn = unittest.expectedFailure(new_fn)
setattr(DummyTestClass, new_name, new_fn)
return DummyTestClass
# test Python 3.11+ specific features
def skipIfNotPy311(fn):
if sys.version_info >= (3, 11):
return fn
return unittest.skip(fn)
# Controls tests generated in test/inductor/test_torchinductor_dynamic_shapes.py
# and test/dynamo/test_dynamic_shapes.py
def expectedFailureDynamic(fn):
fn._expected_failure_dynamic = True
return fn
# Controls tests generated in test/inductor/test_torchinductor_codegen_dynamic_shapes.py
def expectedFailureCodegenDynamic(fn):
fn._expected_failure_codegen_dynamic = True
return fn
# Controls test generated in test/inductor/test_cpp_wrapper.py
def expectedFailureDynamicWrapper(fn):
fn._expected_failure_dynamic_wrapper = True
return fn
|
import argparse
import csv
import functools
import gc
import io
import itertools
import logging
import numpy as np
import os
import re
import sys
import time
import torch
from torch import nn
from torch.jit import fuser, optimized_execution
from os.path import abspath
from scipy.stats import ttest_ind
import importlib
import glob
import collections
import random
import torch._lazy
import torch._lazy.metrics as metrics
import torch._lazy.ts_backend
def set_seeds(seed=1337):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed)
def get_unique_suffix():
return f"{time.time()}_{os.getpid()}"
def get_benchmark_cls(model_name):
if ("Benchmark(dims=[" in model_name):
# just evaluate the model name + args
# it should create a model with the right dim
return eval(model_name)
try:
module = importlib.import_module(f'.models.{model_name}', package="torchbenchmark")
Model = getattr(module, 'Model', None)
if Model is None:
raise RuntimeError(f"{module} does not define attribute Model, skip it")
if not hasattr(Model, 'name'):
Model.name = model_name
return Model
except ModuleNotFoundError as e:
raise RuntimeError(f"Could not find dependent module {e.name} for Model {model_name}, skip it")
# from caffe2.python import workspace
# workspace.GlobalInit(['caffe2', '--caffe2_log_level=-5'])
import torch._lazy.metrics
torch._lazy.ts_backend.init()
os.environ["KALDI_ROOT"] = "/tmp" # avoids some spam
log = logging.getLogger(__name__)
# Models that are known to crash or otherwise not work with lazy tensor are
# disabled, but should be removed from these lists once fixed
SKIP = {
"densenet121": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"timm_nfnet": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"moco": "Distributed/ProcessGroupNCCL: Tensors must be CUDA and dense",
"tacotron2": "Disabled by torchbench upstream due to OOM on T4 CI machine",
}
SKIP_TRAIN_ONLY = {
"squeezenet1_1": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"demucs": "Disabled by torchbench upstream due to OOM on T4 CI machine",
}
current_name = ""
current_device = ""
@functools.lru_cache(maxsize=None)
def output_csv(name, headers):
output = csv.writer(
io.TextIOWrapper(
open(name, "wb", buffering=0),
"utf-8",
write_through=True,
),
delimiter=",",
quotechar='"',
quoting=csv.QUOTE_MINIMAL
)
output.writerow(headers)
return output
class HardSwishBenchmark:
def __init__(self, dims):
self.name = "HardSwishBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'
self.dims = dims
# test and extra_args are placeholders to match TorchBench API
def __call__(self, device, test, extra_args):
return HardSwish(self.dims, device)
class HardSwish(nn.Module):
def __init__(self, dims, device='cuda'):
super(HardSwish, self).__init__()
self.name = "HardSwish[" + ','.join([str(d) for d in dims]) + ']'
self.example_inputs = (
torch.randn(*dims, device=device, dtype=torch.float32),
)
def get_module(self):
return self, self.example_inputs
def name(self):
return self.name
def forward(self, x):
return x * torch.clamp(x + 3.0, 0.0, 6.0) / 6.0
class DivAddMulBenchmark:
"""This wrapper helps interface with the same iterator as torchbench models
"""
def __init__(self, dims):
self.name = "DivAddMulBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'
self.dims = dims
# test and extra_args are placeholders to match TorchBench API
def __call__(self, device, test, extra_args):
return DivAddMul(self.dims, device)
class DivAddMul(nn.Module):
def __init__(self, dims, device='cuda'):
super(DivAddMul, self).__init__()
self.attention_head_size = dims[1]
self.W = torch.ones(*dims[-2:], device=device, dtype=torch.float32)
self.name = "DivAddMul[" + ','.join([str(d) for d in dims]) + ']'
self.example_inputs = (
torch.ones(*dims, device=device, dtype=torch.float32),
torch.randn(*dims, device=device, dtype=torch.float32),
)
def get_module(self):
return self, self.example_inputs
def name(self):
return self.name
def forward(self, inputs, mask):
out3 = ((inputs / 0.1) + mask) * 2.0
out5 = out3.matmul(self.W)
out8 = ((out5 / 0.1) + mask) * 2.00
return out8
toy_models = [
HardSwishBenchmark,
DivAddMulBenchmark,
]
toy_dims = [
[1, 1, 1, 1],
[32, 16, 128, 128],
[128, 16, 128, 128],
[256, 16, 128, 128],
]
for dims in toy_dims:
# The toy benchmarks don't support training..
# and it's too late to add it inside the generator func below...
SKIP_TRAIN_ONLY["DivAddMulBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'] = "This model has no train()"
SKIP_TRAIN_ONLY["HardSwishBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'] = "This model has no train()"
def iter_toy_model_names():
for dims in toy_dims:
for model in toy_models:
yield model(dims=dims).name
def pick_grad(args, name):
if args.test == 'train':
return torch.enable_grad()
if name in ("maml",):
return torch.enable_grad()
else:
return torch.no_grad()
def short_name(name, limit=20):
"""Truncate a model name to limit chars"""
return name if len(name) <= limit else f"{name[:limit - 3].rstrip('_')}..."
def iter_torchbench_model_names():
from torchbenchmark import _list_model_paths
for model_path in _list_model_paths():
model_name = os.path.basename(model_path)
yield model_name
def iter_models(args, dirpath):
for name in itertools.chain(iter_toy_model_names(), iter_torchbench_model_names()):
if (
(len(args.filter) and (not re.search("|".join(args.filter), name, re.I)))
or (len(args.exclude) and re.search("|".join(args.exclude), name, re.I))
):
save_error(name, args.test, "disabled via cmdline filter/exclude", dirpath)
continue
if name in SKIP:
save_error(name, args.test, f"SKIP because {SKIP[name]}", dirpath)
continue
if name in SKIP_TRAIN_ONLY and args.test == "train":
save_error(name, args.test, f"SKIP_TRAIN_ONLY because {SKIP_TRAIN_ONLY[name]}", dirpath)
continue
yield name
def call_model_with(model, inputs):
if isinstance(inputs, tuple) or isinstance(inputs, list):
return model(*inputs)
elif isinstance(inputs, dict):
return model(**inputs)
elif isistance(inputs, torch.Tensor):
return model(inputs)
raise RuntimeError("invalid example inputs ", inputs)
class CudaSync:
def __init__(self, sync_every_iter=False):
self.sync_every_iter = sync_every_iter
def iter_sync(self):
if self.sync_every_iter:
torch.cuda.synchronize()
def final_sync(self):
torch.cuda.synchronize()
class NoOpSync:
def __init__(self, sync_every_iter=False):
pass
def iter_sync(self):
pass
def final_sync(self):
pass
class LazySync:
def __init__(self, sync_every_iter=False, skip_final_sync=False):
self.sync_every_iter = sync_every_iter
self.skip_final_sync = skip_final_sync
def iter_sync(self):
torch._lazy.mark_step()
if self.sync_every_iter:
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def final_sync(self):
torch._lazy.mark_step()
if self.skip_final_sync:
return
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def dump_lazy_metrics(reset=False):
met = {name: int(metrics.counter_value(name)) for name in metrics.counter_names() if int(metrics.counter_value(name) > 0)}
if reset:
metrics.reset()
return met
def timed(args, benchmark, sync, times=1):
results = None
sync.final_sync()
set_seeds()
if args.test == 'eval':
model, example_inputs = benchmark.get_module()
if current_device == 'lazy':
torch.cuda.set_sync_debug_mode(2)
elif current_device == 'cuda':
torch.cuda.set_sync_debug_mode(0)
# keep the lazy tensor results alive until the final sync
t0 = time.perf_counter()
for i in range(times):
if args.test == 'eval':
results = call_model_with(model, example_inputs)
elif args.test == 'train':
benchmark.train()
# for the last i, let final_sync take care of it
if i < times - 1:
# may be just an async 'mark_step' for lazy, or no-op for cuda
sync.iter_sync()
if current_device in ['lazy', 'cuda']:
# don't assume torch.cuda present unless using cuda
torch.cuda.set_sync_debug_mode(0)
# should be a hard sync for lazy and cuda
# unless strictly measuring lazy trace overhead, then no-op
sync.final_sync()
t1 = time.perf_counter()
return results, t1 - t0
def to_device(tensors, device):
"""Handles moving tensor or tensors (in various containers) to a new device.
Used for various purposes (either correctness checking, or even as an impromptu
means of synchronization.) Note: this method doesn't apply a cuda sync, do that outside.
"""
try:
import transformers.modeling_outputs
if (
isinstance(tensors, transformers.modeling_outputs.MaskedLMOutput) or
isinstance(tensors, transformers.modeling_outputs.Seq2SeqLMOutput)
):
# huggingface transformers return classes as model output with many attributes
# we don't want to sync (such as hidden states of every layer) - just sync the logits
tensors = tensors.logits
except ImportError:
pass
try:
import torchbenchmark.models.soft_actor_critic.nets
import torchbenchmark.models.drq.drqutils
if (
isinstance(tensors, torchbenchmark.models.soft_actor_critic.nets.SquashedNormal) or
isinstance(tensors, torchbenchmark.models.drq.drqutils.SquashedNormal)
):
# a SquashedNormal is a py class that holds a loc and scale torch tensor,
# so convert it to a tuple for compatibility with downstream check_results
tensors = (tensors.loc, tensors.scale)
except ImportError:
pass
if isinstance(tensors, tuple) or isinstance(tensors, list):
return tuple(to_device(i, device) for i in tensors)
elif isinstance(tensors, dict):
return {k: to_device(tensors[k], device) for k in tensors}
elif isinstance(tensors, torch.Tensor):
return tensors.to(device)
raise RuntimeError("invalid example tensors ", tensors)
def lazy_overhead_experiment(args, results, benchmark, lazy_benchmark):
timings = np.zeros((args.repeat, 2), np.float64)
ref_sync = CudaSync if current_device == 'cuda' else NoOpSync
warmup0 = time.perf_counter()
for rep in range(args.warmup):
# interleave the runs to handle frequency scaling and load changes
timed(args, benchmark, sync=ref_sync(sync_every_iter=True))
timed(args, lazy_benchmark, sync=LazySync(sync_every_iter=True))
warmup_time = time.perf_counter() - warmup0
bench0 = time.perf_counter()
dump_lazy_metrics(reset=True)
for rep in range(args.repeat):
# interleave the runs to handle frequency scaling and load changes
_, timings[rep, 0] = timed(args, benchmark, sync=ref_sync(sync_every_iter=True))
_, timings[rep, 1] = timed(args, lazy_benchmark, sync=LazySync(skip_final_sync=True))
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
lazy_metrics = dump_lazy_metrics(reset=True)
bench_time = time.perf_counter() - bench0
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
fallbacks = ";".join([f"{m}:{lazy_metrics[m]}" for m in lazy_metrics if "aten::" in m])
ops = int(sum([lazy_metrics[m] for m in lazy_metrics if 'lazy::' in m or 'aten::' in m]) / args.repeat)
trace_us = median[1] / 1e-6
us_per_op = trace_us / ops
overhead = median[1] / median[0]
results.append(overhead)
output_csv(
os.path.join(args.output_dir, f"lazy-overheads_{args.test}_{get_unique_suffix()}.csv"),
("dev", "name", "test", "overhead", "pvalue", "ops", "trace_us", "us_per_op", "fallbacks"),
).writerow([current_device, current_name, args.test, f"{overhead:.4f}", f"{pvalue:.4e}",
f"{ops}", f"{trace_us:.4f}", f"{us_per_op:.4f}", f"{fallbacks}"])
print(f"{short_name(current_name, limit=30):<30} {current_device:<4} {args.test:<5} "
f"{'trace overheads':<20} overhead: {overhead:.3f} pvalue: {pvalue:.2e} us_per_op {us_per_op:.3f}")
if args.verbose:
print(f"CIDEBUGOUTPUT,lazy_overhead_experiment,"
f"{current_name},{args.test},{current_device},{overhead:.4f},"
f"{pvalue:.4e},{args.warmup},{args.repeat},{warmup_time:.2f},{bench_time:.2f}")
return (overhead, pvalue)
def lazy_compute_experiment(args, experiment, results, benchmark, lazy_benchmark, sync_every_iter=False):
timings = np.zeros((args.repeat, 2), np.float64)
ref_sync = CudaSync(sync_every_iter=sync_every_iter) if current_device == 'cuda' else NoOpSync()
lazy_sync = LazySync(sync_every_iter=sync_every_iter)
# interleave the runs to handle frequency scaling and load changes
warmup0 = time.perf_counter()
for rep in range(args.warmup):
# warmup
timed(args, benchmark, sync=ref_sync)
timed(args, lazy_benchmark, sync=lazy_sync)
warmup_time = time.perf_counter() - warmup0
# fresh metrics for each timed run
dump_lazy_metrics(reset=True)
bench0 = time.perf_counter()
for rep in range(args.repeat):
# measure
_, timings[rep, 0] = timed(args, benchmark, times=args.inner_loop_repeat, sync=ref_sync)
_, timings[rep, 1] = timed(args, lazy_benchmark, times=args.inner_loop_repeat, sync=lazy_sync)
bench_time = time.perf_counter() - bench0
lazy_metrics = dump_lazy_metrics(reset=True)
if 'CachedCompile' not in lazy_metrics or lazy_metrics['CachedCompile'] != args.repeat * args.inner_loop_repeat:
print("WARNING: lazy cached compile count indicates fallbacks, or something else")
fallbacks = {k: v for (k, v) in lazy_metrics.items() if 'aten::' in k}
if len(fallbacks):
print(f"WARNING: lazy-eager fallbacks detected for [{fallbacks}]")
if args.dump_lazy_counters:
print(lazy_metrics)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
results.append(speedup)
output_csv(
os.path.join(args.output_dir, f"lazy-compute_{args.test}_{get_unique_suffix()}.csv"),
("name", "dev", "experiment", "test", "speedup", "pvalue"),
).writerow([current_name, current_device, experiment, args.test, f"{speedup:.4f}", f"{pvalue:.2e}"])
print(f"{short_name(current_name, limit=30):<30} {current_device:<4} "
f"{args.test:<5} {experiment:<20} speedup: {speedup:.3f} pvalue: {pvalue:.2e}")
if args.verbose:
print(f"CIDEBUGOUTPUT,lazy_compute_experiment,"
f"{current_name},{current_device},{experiment},{args.test},{speedup:.4f},"
f"{pvalue:.2e},{args.warmup},{args.repeat},{warmup_time:.2f},{bench_time:.2f}")
return (speedup, pvalue)
def check_eval_correctness(args, benchmark, lazy_benchmark, name):
try:
set_seeds()
model, example_inputs = benchmark.get_module()
model.eval()
correct_result = call_model_with(model, example_inputs)
set_seeds()
lazy_model, lazy_inputs = lazy_benchmark.get_module()
lazy_model.eval()
lazy_result = call_model_with(lazy_model, lazy_inputs)
if not check_results(correct_result, lazy_result, args.device, args.allclose_atol):
print(f"INCORRECT: {name}")
save_error(name, args.test, "Incorrect results.", args.output_dir)
return False
except Exception as e:
print(f"ERROR: {name}: {e}")
save_error(name, args.test, e, args.output_dir)
return False
return True
def just_run_once(args, lazy_benchmark):
set_seeds()
if args.test == 'eval':
model, example_inputs = lazy_benchmark.get_module()
results.append(call_model_with(model, example_inputs))
elif args.test == 'train':
lazy_benchmark.train()
torch._lazy.mark_step()
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def check_results_impl(correct_result, lazy_result, atol):
# recursive helper for dealing with nested data structures
if type(correct_result) is tuple:
for c, l in zip(correct_result, lazy_result):
return check_results_impl(c, l, atol)
if type(correct_result) is dict:
print(correct_result.keys())
for k in correct_result:
assert k in lazy_result
return check_results_impl(correct_result[k], lazy_result[k], atol)
assert type(correct_result) is torch.Tensor, f"Expect torch.Tensor but got {type(correct_result)}."
ans = torch.allclose(correct_result, lazy_result, atol=atol)
if not ans:
print(f"correct_result:\n{correct_result}, lazy_result:\n{lazy_result}")
return ans
def check_results(correct_result, lazy_result, device, atol):
# to_device has recursive logic and special handling for
# extracting relevant tensors from huggingface data structures
correct_result = to_device(correct_result, device)
lazy_result = to_device(lazy_result, device)
return check_results_impl(correct_result, lazy_result, atol)
def check_fuser(args):
if args.fuser == 'noopt':
return
if args.fuser is None:
args.fuser = 'fuser1' if args.device == 'cpu' else 'fuser2'
if args.device == 'cpu':
assert args.fuser in ['fuser0', 'fuser1']
if args.fuser == 'fuser1':
assert torch._C._llvm_enabled(), "Can't use fuser1 (nnc) for CPU without building torch with llvm."
if args.device == 'cuda':
assert args.fuser in ['fuser0', 'fuser1', 'fuser2']
def run_tracing_execute_noops(test, lazy_benchmark):
ltm.set_noop_execution_mode(True)
if test == 'eval':
model, example_inputs = lazy_benchmark.get_module()
# doesn't actualyl collect a profile, but runs just the lazy trace
# so you can use a profiler on top of the program.
# note: depends on making the backend do a 'no-op' for executecomputation
results = []
for i in range(300):
if test == 'eval':
results.append(call_model_with(model, example_inputs))
elif test == 'train':
lazy_benchmark.train()
# we still do a mark step, to preserve the ratio of how often we split the graph
# and run through the process of 'compile and execute' (even though these are now noops)
torch._lazy.mark_step()
ltm.set_noop_execution_mode(False)
def merge_with_prefix(prefix, tmp_dir, out_dir, headers):
results = []
rfnames = glob.glob(os.path.join(tmp_dir, prefix + "*"))
for rfname in rfnames:
results.extend(open(rfname).readlines()[1:]) # skip header
# the header shouldn't require quotations and the results should already be properly
# quoted via output_csv
with open(os.path.join(out_dir, prefix + "acc.csv"), "a+") as acc_csv:
acc_csv.write(",".join(headers) + "\n")
for l in results:
acc_csv.write(l)
def merge_reformat(tmp_dir, out_dir, table):
out_dir = args.output_dir
# depending on the type of an experiment, fields can be in a different order
# `get_field` deals with all three types including `error`
def get_field(row, name, file_type):
headers = {
"error": ("name", "test", "error"),
"lazy-compute" : ("name", "dev", "experiment", "test", "speedup", "pvalue"),
"lazy-overheads" : ("dev", "name", "test", "overhead", "pvalue", "ops", "trace_us", "us_per_op", "fallbacks")
}
header = headers[file_type]
r = row[header.index(name)] if name in header else "N/A"
return r
csv_files = glob.glob(os.path.join(tmp_dir, "*.csv"))
for csvf in csv_files:
with open(csvf, "r") as csvfile:
prefix = os.path.basename(csvf).split("_")[0]
csvreader = csv.reader(csvfile, delimiter=",", quotechar='"')
# This skips the first row of the CSV file.
next(csvreader)
for r in csvreader:
key = (get_field(r, "name", prefix), get_field(r, "test", prefix))
entry = table[key]
if prefix == "error":
entry["error"] = f'{entry.get("error", "")} {get_field(r, "error", prefix)}'
elif prefix == "lazy-overheads":
entry["overhead"] = get_field(r, "overhead", prefix)
entry["ops"] = get_field(r, "ops", prefix)
entry["trace_us"] = get_field(r, "trace_us", prefix)
entry["us_per_op"] = get_field(r, "us_per_op", prefix)
entry["fallbacks"] = get_field(r, "fallbacks", prefix)
else:
entry[get_field(r, "experiment", prefix)] = get_field(r, "speedup", prefix)
amortized_header = f"amortized {args.inner_loop_repeat}x"
headers = ("name", "test", amortized_header, "unamortized", "overhead", "error", "rc",
"ops", "trace_us", "us_per_op", "fallbacks")
cw = output_csv(
os.path.join(out_dir, f"{args.test}_reformat.csv"),
headers
)
for k, v in table.items():
cw.writerow((k[0], k[1], v.get(amortized_header, 'N/A'),
v.get('unamortized', 'N/A'), v.get('overhead', 'N/A'), v.get('error', 'N/A'), v.get('rc'),
v.get('ops', 'N/A'), v.get('trace_us', 'N/A'), v.get('us_per_op', 'N/A'), v.get('fallbacks', 'N/A')))
def save_error(name, test, error, dir):
output_csv(
os.path.join(dir, f"error_{get_unique_suffix()}.csv"),
("name", "test", "error"),
).writerow([name, test, error])
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--filter", "-k", action="append", default=[], help="filter benchmarks")
parser.add_argument("--exclude", "-x", action="append", default=[], help="filter benchmarks")
parser.add_argument("--device", "-d", default='cuda', help="cpu or cuda")
parser.add_argument("--warmup", type=int, default=4, help="number of warmup runs")
parser.add_argument("--timeout", type=int, default=60 * 10, help="time allocated to each model")
parser.add_argument("--repeat", "-n", type=int, default=4, help="number of timing runs (samples)")
parser.add_argument("--inner_loop_repeat", type=int, default=10, help="repeat the computation this many times per sample")
parser.add_argument("--fuser", type=str, choices=['noopt', 'fuser0', 'fuser1', 'fuser2'], help="0=legacy, 1=nnc, 2=nvfuser")
parser.add_argument("--test", type=str, choices=['eval', 'train'], default='eval')
parser.add_argument("--verbose", action='store_true')
parser.add_argument("--torchbench_dir", type=str, help="path to torchbenchmark repo")
parser.add_argument("--output_dir", type=str, default=".", help="path to write output files")
parser.add_argument("--dump_lazy_counters", action='store_true', help="dump lazy counter values after each timing run")
parser.add_argument("--just_run_once", action="store_true")
parser.add_argument("--run_tracing_execute_noops", action='store_true',
help="Run the tracing portion only, with noop backend, useful for running under a profiler.")
parser.add_argument("--run_in_subprocess", "-s", type=str,
help="which model run in subprocess. This will ignore filter and exclude")
parser.add_argument("--allclose_atol", type=float, default=1e-4,
help="Absolute tolerance to check lazy result again the correct result")
parser.add_argument("--precision", choices=["fp32", "fp16", "amp"], default="fp32", help="enable fp16 modes from: fp32, fp16/half, or amp")
args = parser.parse_args()
results = []
check_fuser(args)
# torchbench_dir = abspath(args.torchbench_dir) if args.torchbench_dir else abspath("../../benchmark")
# assert os.path.exists(os.path.join(torchbench_dir, "torchbenchmark")), "set --torchbench_dir to installed torchbench repo"
# sys.path.append(torchbench_dir)
copy_argv = [] + sys.argv
if args.run_in_subprocess:
try:
from fastNLP.core import logger
logger.setLevel(logging.WARNING)
current_name = args.run_in_subprocess
benchmark_cls = get_benchmark_cls(args.run_in_subprocess)
current_device = args.device
if args.device == 'cuda':
assert 'LTC_TS_CUDA' in os.environ and bool(os.environ['LTC_TS_CUDA']), "set LTC_TS_CUDA for cuda device"
with pick_grad(args, current_name):
with fuser(args.fuser) if args.fuser != 'noopt' else optimized_execution(False):
if args.fuser == 'noopt':
# TODO(whc) cleaner way to configure the fusers; seems i have to set both optimized_execution(False)
# _and_ disable fusers to get no-optimization
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
if args.fuser == 'fuser2':
# special case to disable nvfuser horizontal fusion as it is currently broken
# TODO(whc) remove this once it's fixed
torch._C._jit_set_nvfuser_horizontal_mode(False)
# no try since we should've already filtered out models we can't create
set_seeds()
benchmark = benchmark_cls(test=args.test, device=args.device, extra_args=["--precision", args.precision])
set_seeds()
lazy_benchmark = benchmark_cls(test=args.test, device='lazy', extra_args=["--precision", args.precision])
# TODO: might be redundant
gc.collect()
if args.run_tracing_execute_noops:
print(f"Profiling {current_name}")
run_tracing_execute_noops(args.test, lazy_benchmark)
# when profiling, we really don't want to do anything else
exit(0)
if args.just_run_once:
just_run_once(args, lazy_benchmark)
exit(0)
if args.test == 'eval':
if not check_eval_correctness(args, benchmark, lazy_benchmark, current_name):
exit(3)
lazy_overhead_experiment(args, results, benchmark, lazy_benchmark)
lazy_compute_experiment(args, f"amortized {args.inner_loop_repeat}x", results, benchmark, lazy_benchmark)
lazy_compute_experiment(args, "unamortized", results, benchmark, lazy_benchmark, sync_every_iter=True)
except Exception as e:
print(f"ERROR: {current_name}: {e}")
save_error(current_name, args.test, e, args.output_dir)
exit(13)
exit(0)
import psutil
import subprocess
import tempfile
dirpath = tempfile.mkdtemp()
table = collections.defaultdict(dict)
for model_name in iter_models(args, dirpath):
# if `--run_in_subprocess` is specified, it will override any filters and excludes
# pass the rest of arguments intact such as device, test, repeat, etc
# note, the latest output_dir will override the original one and this is exactly what we want
# for child processes
launch_command = f"python {' '.join(copy_argv)} --run_in_subprocess '{model_name}' --output_dir={dirpath}"
env = os.environ
env["LTC_TS_CUDA"] = "1" if args.device == "cuda" else "0"
rc = 0
try:
if args.verbose:
cp = subprocess.run("nvidia-smi --query-gpu=timestamp,utilization.memory,memory.total,memory.free,memory.used"
" --format=csv,noheader",
capture_output=True, text=True, shell=True)
print(f"CIDEBUGOUTPUT,BEFORE subprocess.run,{model_name},{cp.stdout}")
proc = subprocess.Popen(launch_command,
env=env,
shell=True,
stderr=subprocess.STDOUT)
outs, errs = proc.communicate(timeout=args.timeout)
rc = proc.poll()
except subprocess.TimeoutExpired:
print(f"{model_name} timed out after {args.timeout // 60} minutes! Include it in SKIP or SKIP_TRAIN_ONLY")
save_error(model_name, args.test, "Timed out.", dirpath)
# to visualize highlight timeouts, they will also have
# "timed out" in the error column
rc = 17
process = psutil.Process(proc.pid)
for p in process.children(recursive=True):
p.kill()
process.kill()
if args.verbose:
cp = subprocess.run("nvidia-smi --query-gpu=timestamp,utilization.memory,memory.total,memory.free,memory.used"
" --format=csv,noheader",
capture_output=True, text=True, shell=True)
print(f"CIDEBUGOUTPUT,AFTER subprocess.run,{model_name},{args.test},{cp.stdout}")
entry = table[(model_name, args.test)]
entry["rc"] = rc
merge_with_prefix("lazy-overheads_", dirpath, args.output_dir, ("dev", "name", "test", "overhead", "pvalue"))
merge_with_prefix("lazy-compute_", dirpath, args.output_dir, ("name", "dev", "experiment", "test", "speedup", "pvalue"))
merge_with_prefix("error_", dirpath, args.output_dir, ("name", "test", "error"))
merge_reformat(dirpath, args, table)
|
"""
Run PyTorch nightly benchmarking.
"""
import re
import argparse
import itertools
import json
import math
import os
import yaml
import numpy
from typing import List, Tuple, Dict, Optional, Any
from ..utils import REPO_PATH, add_path, get_output_json, get_default_output_json_path
from . import BM_NAME
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig, \
list_devices, list_tests
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_DELTA_THRESHOLD = 0.07
DEFAULT_TARGET_SCORE = 1000.0
def generate_model_configs(devices: List[str], tests: List[str], model_names: List[str]) -> List[TorchBenchModelConfig]:
"""Use the default batch size and default mode."""
if not model_names:
model_names = list_models()
cfgs = itertools.product(*[devices, tests, model_names])
result = [TorchBenchModelConfig(
name=model_name,
device=device,
test=test,
batch_size=None,
extra_args=[],
extra_env=None,
) for device, test, model_name in cfgs]
return result
def get_metrics(_config: TorchBenchModelConfig) -> List[str]:
return ["latencies",]
def compute_score(results, reference_latencies: Dict[str, float]) -> float:
# sanity checks
latency_results = {k: v for k, v in results.items() if k.endswith("_latency")}
test_set = set(latency_results.keys())
reference_set = set(reference_latencies.keys())
test_only_set = test_set.difference(reference_set)
assert not test_only_set, f"Tests {test_only_set} only appears in result json, not in reference yaml."
reference_only_set = reference_set.difference(test_set)
assert not reference_only_set, f"Tests {reference_only_set} only appears in reference yaml, not in result json."
# check that for every test in reference_latencies, we can find the corresponding tests in latency_results
total_score = 0.0
weight = 1.0 / len(reference_latencies)
for key, ref_latency in reference_latencies.items():
test_latency = latency_results[key]
ref_latency = float(ref_latency)
delta = (test_latency - ref_latency) / test_latency
# If less than threshold, treat it as noise
if abs(delta) <= DEFAULT_DELTA_THRESHOLD:
test_latency = ref_latency
total_score += weight * math.log(ref_latency / test_latency)
score = math.exp(total_score) * DEFAULT_TARGET_SCORE
return score
def result_to_output_metrics(results: List[Tuple[TorchBenchModelConfig, TorchBenchModelMetrics]]) -> Dict[str, float]:
# metrics name examples:
# test_eval[timm_regnet-cuda-eager]_latency
# test_eval[timm_regnet-cuda-eager]_cmem
# test_eval[timm_regnet-cuda-eager]_gmem
result_metrics = {}
for _config_id, (config, metrics) in enumerate(results):
metrics_base = f"test_{config.test}[{config.name}-{config.device}-eager]"
latency_metric = f"{metrics_base}_latency"
median_latency = numpy.median(metrics.latencies)
assert median_latency, f"Run failed for metric {latency_metric}"
result_metrics[latency_metric] = median_latency
if metrics.cpu_peak_mem:
cpu_peak_mem = f"{metrics_base}_cmem"
result_metrics[cpu_peak_mem] = metrics.cpu_peak_mem
if metrics.gpu_peak_mem:
gpu_peak_mem = f"{metrics_base}_gmem"
result_metrics[gpu_peak_mem] = metrics.gpu_peak_mem
return result_metrics
def validate(candidates: List[str], choices: List[str]) -> List[str]:
"""Validate the candidates provided by the user is valid"""
for candidate in candidates:
assert candidate in choices, f"Specified {candidate}, but not in available list: {choices}."
return candidates
def generate_model_configs_from_yaml(yaml_file: str) -> Tuple[List[TorchBenchModelConfig], Dict[str, float], Any]:
yaml_file_path = os.path.join(CURRENT_DIR, yaml_file)
with open(yaml_file_path, "r") as yf:
config_obj = yaml.safe_load(yf)
devices = config_obj["metadata"]["devices"]
configs = []
reference_latencies = {}
for device in devices:
for c in config_obj[device]:
if not c["stable"]:
continue
config = TorchBenchModelConfig(
name=c["model"],
device=device,
test=c["test"],
batch_size=c["batch_size"] if "batch_size" in c else None,
extra_args=[],
extra_env=None,
)
configs.append(config)
metrics_base = f"test_{config.test}[{config.name}-{config.device}-eager]"
latency_metric_key = f"{metrics_base}_latency"
reference_latencies[latency_metric_key] = c["median_latency"]
return configs, reference_latencies, config_obj
def parse_test_name(test_name: str) -> TorchBenchModelConfig:
regex = "test_(.*)\[(.*)-(.*)-eager\]"
test, model, device = re.match(regex, test_name).groups()
return TorchBenchModelConfig(
name=model,
device=device,
test=test,
batch_size=None,
extra_args=[],
extra_env=None,
)
def generate_model_configs_from_bisect_yaml(bisect_yaml_file: str) -> List[TorchBenchModelConfig]:
def _remove_suffix(test_name: str):
index_last_underscore = test_name.rfind("_")
return test_name[:index_last_underscore]
with open(bisect_yaml_file, "r") as yf:
bisect_obj = yaml.safe_load(yf)
# remove the suffix
bisect_tests = [ _remove_suffix(test_name) for test_name in bisect_obj["details"] ]
bisect_tests = set(bisect_tests)
configs = [ parse_test_name(test_name_str) for test_name_str in sorted(bisect_tests) ]
return configs
def parse_str_to_list(candidates):
if isinstance(candidates, list):
return candidates
candidates = list(map(lambda x: x.strip(), candidates.split(",")))
return candidates
def run_config(config: TorchBenchModelConfig, dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
metrics = get_metrics(config)
print(f"Running {config} ...", end='', flush=True)
if dryrun:
print(" [Skip: Dryrun]", flush=True)
return None
# We do not allow RuntimeError in this test
try:
# load the model instance in subprocess
model = load_model_isolated(config)
# get the model test metrics
result: TorchBenchModelMetrics = get_model_test_metrics(model, metrics=metrics)
except NotImplementedError as e:
print(" [NotImplemented]", flush=True)
return None
print(" [Done]", flush=True)
return result
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cuda", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice models, splited by comma.")
parser.add_argument("--config", "-c", default=None, help="YAML config to specify tests to run.")
parser.add_argument("--run-bisect", help="Run with the output of regression detector.")
parser.add_argument("--dryrun", action="store_true", help="Dryrun the command.")
parser.add_argument("--score", default=None, help="Generate score from the past run json only.")
parser.add_argument("--output", default=get_default_output_json_path(BM_NAME), help="Specify the path of the output file")
return parser.parse_args(args)
def run(args: List[str]):
args = parse_args(args)
if args.score:
assert args.config, f"To compute score, you must specify the config YAML using --config."
configs, reference_latencies, config_obj = generate_model_configs_from_yaml(args.config)
with open(args.score, "r") as sp:
run_result = json.load(sp)
input_metrics = run_result["metrics"]
score = compute_score(input_metrics, reference_latencies)
score_version = config_obj["metadata"]["score_version"]
score_name = f"{score_version}_score"
print(f"TorchBench {score_name}: {score}.")
exit(0)
elif args.config:
configs, reference_latencies, config_obj = generate_model_configs_from_yaml(args.config)
elif args.run_bisect:
configs = generate_model_configs_from_bisect_yaml(args.run_bisect)
reference_latencies = None
else:
# If not specified, use the entire model set
if not args.model:
args.model = list_models()
devices = validate(parse_str_to_list(args.device), list_devices())
tests = validate(parse_str_to_list(args.test), list_tests())
models = validate(parse_str_to_list(args.model), list_models())
configs = generate_model_configs(devices, tests, model_names=models)
reference_latencies = None
results = []
try:
for config in configs:
metrics = run_config(config, dryrun=args.dryrun)
if metrics:
results.append([config, metrics])
except KeyboardInterrupt:
print("User keyboard interrupted!")
if not args.dryrun:
metrics = result_to_output_metrics(results)
if reference_latencies:
score = compute_score(metrics, reference_latencies)
score_version = config_obj["metadata"]["score_version"]
score_name = f"{score_version}_score"
metrics[score_name] = score
result = get_output_json(BM_NAME, metrics)
import torch
result["environ"]["device"] = torch.cuda.get_device_name()
with open(args.output, 'w') as f:
json.dump(result, f, indent=4)
|
BM_NAME = "rocm-test" |
"""
Test user-customized invoke function.
"""
import argparse
from typing import List
from ..utils import REPO_PATH, add_path, get_output_json, dump_output
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig, \
list_devices, list_tests, inject_model_invoke
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
from typing import Optional
def user_defined_invoke(self):
print(f"Model {self.name} invoke has been replaced!")
self.output_metrics_list = [1.0, 2.0, 3.0, 4.0]
self.output_metrics_dict ={
"m1": 1.0,
"m2": 2.0,
"m3": 3.0,
}
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cuda", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--bs", type=int, default=1, help="Test batch size")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice models, splited by comma.")
parser.add_argument("--inject", action="store_true", help="Inject user defined invoke function to the model.")
return parser.parse_args(args)
def get_metrics(_config: TorchBenchModelConfig) -> List[str]:
return ["latencies"]
def run_config(config: TorchBenchModelConfig, dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
metrics = get_metrics(config)
print(f"Running {config} ...", end='')
if dryrun:
return None
# We do not allow RuntimeError in this test
result ={}
try:
# load the model instance within the same process
model = load_model_isolated(config)
inject_model_invoke(model, user_defined_invoke)
# get the model test metrics
model.invoke()
result["list_result"] = model.get_model_attribute("output_metrics_list")
result["dict_output"] = model.get_model_attribute("output_metrics_dict")
except NotImplementedError as e:
print(" [NotImplemented]")
return None
print(" [Done]")
return result
def run(args: List[str]):
args = parse_args(args)
config = TorchBenchModelConfig(
name=args.model,
device=args.device,
test=args.test,
batch_size=args.bs,
extra_args=[],
extra_env=None,
)
result = run_config(config)
print(result)
|
import torch
from ..utils import dump_output
from .cases import benchmark_cases
from .util import benchmark
import pprint
from typing import List
BM_NAME = 'functorch'
def run_benchmarks():
metrics = {}
for case_ctor in benchmark_cases:
case = case_ctor()
runtime_ms = benchmark(case)
metrics[case.name()] = runtime_ms
return metrics
def run(args: List[str]):
metrics = run_benchmarks()
result = {
'name': BM_NAME,
'environ': {
'pytorch_git_version': torch.version.git_version,
},
'metrics': metrics,
}
pprint.pprint(result)
dump_output(BM_NAME, result)
|
import torch
import torch.nn as nn
from functorch import vmap, jacfwd, jacrev
from .util import BenchmarkCase
# batched hessians of fully connected layers is a popular quantity
# in physics-related models.
# This test case is from https://github.com/pytorch/functorch/issues/989
# We haven't been able to get the full model yet, so, this test case
# is going into the functorch userbenchmark instead of torchbenchmark.
class VmapHessianFC(BenchmarkCase):
def __init__(self):
device = 'cuda'
D1 = 2 # x, y
D2 = 3 # u, v, p
B = 10000
x = torch.randn(B, D1).to(device)
model = nn.Sequential(
nn.Linear(D1, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, D2),
).to(device)
self.model = model
self.x = x
def name(self):
return 'vmap_hessian_fc_cuda'
def run(self):
def predict(x):
out = self.model(x)
return out, out
hessian, pred = vmap(
jacfwd(jacrev(predict, argnums=0, has_aux=True), argnums=0, has_aux=True),
in_dims=0,
)(
self.x
)
|
from abc import ABC, abstractmethod
from typing import Any, Callable
from torch.utils.benchmark import Timer
from torch.utils._pytree import tree_flatten
class BenchmarkCase(ABC):
@abstractmethod
def name(self) -> str:
pass
@abstractmethod
def run(self) -> Callable:
pass
def time(fn: Callable, test_runs: int) -> float:
t = Timer(stmt="fn()", globals={"fn": fn})
times = t.blocked_autorange()
return times.median * 1000 # time in ms
def benchmark(case: BenchmarkCase, warmup_runs: int = 10, test_runs: int = 20) -> float:
for _ in range(warmup_runs):
case.run()
return time(case.run, test_runs)
|
from .util import BenchmarkCase
from torchbenchmark.models.lennard_jones import Model as LJModel
from torchbenchmark.models.functorch_maml_omniglot import Model as FTMamlOmniglot
from torchbenchmark.models.functorch_dp_cifar10 import Model as FTDPCifar10
from .vmap_hessian_fc import VmapHessianFC
from .simple_models import (
SimpleCNN,
SimpleMLP,
VmapWrapper,
EnsembleMultiWrapper,
EnsembleSingleWrapper,
PerSampleGradWrapper,
)
class TorchBenchModelWrapper(BenchmarkCase):
def __init__(self, name, model, device):
self.model = model('train', device)
self.name_ = f'{name}_{device}'
def name(self):
return self.name_
def run(self):
return self.model.train()
# functorch user benchmark
# ------------------------
# This userbenchmark is used for regression testing of:
# - microbenchmarks,
# - low-quality models that shouldn't go into torchbenchmark
# - pieces of models where we do not have access to the full model.
# - models in torchbenchmark that have not yet made it to a release branch
# (and therefore are not being tracked for regressions).
#
# When adding a functorch-related benchmark, please prefer finding a high-quality
# model that uses the benchmark and adding it to the torchbenchmark suite.
# There is better infra support there and other folks use those models
# for cross-cutting tests.
benchmark_cases = [
# [models from torchbench that haven't made it to stable yet]
lambda: TorchBenchModelWrapper('lennard_jones', LJModel, 'cpu'),
lambda: TorchBenchModelWrapper('lennard_jones', LJModel, 'cuda'),
lambda: TorchBenchModelWrapper('functorch_maml_omniglot', FTMamlOmniglot, 'cpu'),
lambda: TorchBenchModelWrapper('functorch_maml_omniglot', FTMamlOmniglot, 'cuda'),
lambda: TorchBenchModelWrapper('functorch_dp_cifar10', FTDPCifar10, 'cuda'),
# end [models from torchbench that haven't made it to stable yet]
VmapHessianFC,
# [combinations from functorch tutorials]
lambda: VmapWrapper(SimpleMLP, 'cpu'),
lambda: VmapWrapper(SimpleMLP, 'cuda'),
lambda: EnsembleMultiWrapper(SimpleMLP, 'cpu'),
lambda: EnsembleMultiWrapper(SimpleMLP, 'cuda'),
lambda: EnsembleMultiWrapper(SimpleCNN, 'cuda'),
lambda: EnsembleSingleWrapper(SimpleMLP, 'cpu'),
lambda: EnsembleSingleWrapper(SimpleMLP, 'cuda'),
lambda: EnsembleSingleWrapper(SimpleCNN, 'cuda'),
lambda: PerSampleGradWrapper(SimpleMLP, 'cpu'),
lambda: PerSampleGradWrapper(SimpleMLP, 'cuda'),
lambda: PerSampleGradWrapper(SimpleCNN, 'cuda'),
# end [combinations from functorch tutorials]
]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from functorch import vmap, grad, combine_state_for_ensemble, make_functional_with_buffers
import functools
from .util import BenchmarkCase
class SimpleMLP(nn.Module):
def __init__(self):
super(SimpleMLP, self).__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 10)
def forward(self, x):
x = x.flatten(1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
@classmethod
def make_input(cls, bs=None):
shape = [64, 1, 28, 28]
if bs is None:
return torch.randn(*shape)
return torch.randn(bs, *shape)
@classmethod
def make_target(cls, bs=None):
shape = [64]
if bs is None:
return torch.randint(10, shape)
return torch.randn(10, [bs] + shape)
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
output = x
return output
@classmethod
def make_input(cls, bs=None):
shape = [64, 1, 28, 28]
if bs is None:
return torch.randn(*shape)
return torch.randn(bs, *shape)
@classmethod
def make_target(cls, bs=None):
shape = [64]
if bs is None:
return torch.randint(10, shape)
return torch.randn(10, [bs] + shape)
class VmapWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_vmap_{device}'
self.model = model_cls().to(device)
self.inputs = model_cls.make_input().to(device)
def name(self):
return self.name_
def run(self):
vmap(self.model)(self.inputs)
def ensemble_setup(self, model_cls, device):
num_models = 10
models = [model_cls().to(device) for _ in range(num_models)]
fmodel, params, buffers = combine_state_for_ensemble(models)
self.fmodel = fmodel
self.params = params
self.buffers = buffers
self.inputs = model_cls.make_input(num_models).to(device)
class EnsembleMultiWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_ensemble_multi_{device}'
ensemble_setup(self, model_cls, device)
def name(self):
return self.name_
def run(self):
vmap(self.fmodel)(self.params, self.buffers, self.inputs)
class EnsembleSingleWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_ensemble_single_{device}'
ensemble_setup(self, model_cls, device)
self.inputs = self.inputs[0]
def name(self):
return self.name_
def run(self):
vmap(self.fmodel, (0, 0, None))(self.params, self.buffers, self.inputs)
def loss_fn(predictions, targets):
return F.nll_loss(predictions, targets)
def compute_loss(fmodel, params, buffers, sample, target):
sample = sample.unsqueeze(0) # prepend batch dimension for processing
target = target.unsqueeze(0)
prediction = fmodel(params, buffers, sample)
return loss_fn(prediction, target)
class PerSampleGradWrapper(BenchmarkCase):
def __init__(self, model_cls, device):
self.name_ = f'{model_cls.__name__}_persamplegrad_{device}'
model = model_cls().to(device)
self.model = make_functional_with_buffers(model)
self.inputs = model_cls.make_input().to(device)
self.targets = model_cls.make_target().to(device)
def name(self):
return self.name_
def run(self):
fmodel, params, buffers = self.model
loss = functools.partial(compute_loss, fmodel)
vmap(grad(loss), (None, None, 0, 0))(params, buffers, self.inputs, self.targets)
|
import argparse
import traceback
import torch
import numpy as np
import json
import os
import time
from datetime import datetime
from typing import List, Union
from torchbenchmark.util.experiment.instantiator import (
TorchBenchModelConfig,
load_model_isolated,
list_models,
)
from torchbenchmark import (
ModelTask,
load_canary_model_by_name,
load_model_by_name,
ModelNotFoundError,
)
from torchbenchmark.util.model import BenchmarkModel
def cli(args: List[str]):
"""Parse input arguments, extracting model specification and batch size"""
arg_parser = argparse.ArgumentParser(args)
arg_parser.add_argument(
"--model",
help="Full or partial name of a model to run. If partial, picks the first match.",
default="",
type=str,
)
arg_parser.add_argument(
"--bs",
help="Input batch size to test.",
default=1,
type=int,
)
arg_parser.add_argument(
"--num_warmup",
help="Number of inference warmup iterations.",
default=10,
type=int,
)
arg_parser.add_argument(
"--num_iter",
help="Number of inference iterations for benchmarking.",
default=100,
type=int,
)
parsed_args, unknown = arg_parser.parse_known_args()
return vars(parsed_args), unknown
def save_metrics(metrics):
"""Save metrics to a JSON file with formatted filename"""
metrics_json = {
"name": "torch_trt",
"environ": {
"metrics_version": "v0.1",
"pytorch_git_version": torch.version.git_version,
},
"metrics": metrics,
}
# Obtain target save directory for JSON metrics from current save directory
current_dir = os.path.dirname(os.path.abspath(__file__))
target_dir = os.path.normpath(
os.path.join(current_dir, "../../.userbenchmark/torch_trt/")
)
os.makedirs(target_dir, exist_ok=True)
# Format filename and path to save metrics
metrics_file = "metrics-{}.json".format(
datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
)
metrics_save_path = os.path.join(target_dir, metrics_file)
with open(metrics_save_path, "w") as f:
json.dump(metrics_json, f, indent=4)
def run_single_model(
model: Union[BenchmarkModel, ModelTask],
selected_ir: str,
num_warmup: int,
num_iter: int,
):
"""Run inference benchmarking on a single model"""
# Get basic metrics for the model
metrics = run_one_step(model.invoke, model, num_warmup, num_iter, selected_ir)
# Get PT2 compilation time for the model
try:
if isinstance(model, ModelTask):
pt2_compilation_time = model.get_model_attribute("pt2_compilation_time")
name = model.get_model_attribute("name")
batch_size = model.get_model_attribute("batch_size")
precision = model.get_model_attribute("dargs", "precision")
else:
pt2_compilation_time = getattr(model, "pt2_compilation_time", None)
name = getattr(model, "name", None)
batch_size = getattr(model, "batch_size", None)
precision = getattr(model, "precision", None)
if pt2_compilation_time is not None and pt2_compilation_time:
metrics[
f"{name}.bs_{batch_size}.precision_{precision}."
+ f"ir_{selected_ir}.pt2_compilation_time"
] = pt2_compilation_time
except:
pass
return metrics
def run_one_step(
func,
model: Union[BenchmarkModel, ModelTask],
num_warmup: int,
num_iter: int,
selected_ir: str,
):
"""Run one step of inference benchmarking on a single model"""
# Warmup model inference
for _ in range(num_warmup):
func()
result_summary = []
# Run inference for the specified number of iterations
for _ in range(num_iter):
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
start_event.record()
func()
end_event.record()
torch.cuda.synchronize()
t1 = time.time_ns()
result_summary.append(
(start_event.elapsed_time(end_event), (t1 - t0) / 1_000_000)
)
# Get median times for GPU and CPU Walltime
gpu_time = np.median(list(map(lambda x: x[0], result_summary)))
cpu_walltime = np.median(list(map(lambda x: x[1], result_summary)))
# Differentiate model attribute access based on input type
if isinstance(model, ModelTask):
num_batches = model.get_model_attribute("NUM_BATCHES")
name = model.get_model_attribute("name")
batch_size = model.get_model_attribute("batch_size")
precision = model.get_model_attribute("dargs", "precision")
else:
num_batches = getattr(model, "NUM_BATCHES", None)
name = getattr(model, "name", None)
batch_size = getattr(model, "batch_size", None)
precision = getattr(model, "precision", None)
if num_batches is not None:
median_gpu_time_per_batch = gpu_time / num_batches
median_cpu_walltime_per_batch = cpu_walltime / num_batches
else:
median_gpu_time_per_batch = gpu_time
median_cpu_walltime_per_batch = cpu_walltime
# Store metrics as dictionary
metrics = {
f"{name}.bs_{batch_size}.precision_{precision}."
+ f"ir_{selected_ir}.median_gpu_time_ms_per_batch": median_gpu_time_per_batch,
f"{name}.bs_{batch_size}.precision_{precision}."
+ f"ir_{selected_ir}.median_cpu_walltime_ms_per_batch": median_cpu_walltime_per_batch,
}
return metrics
def run(args: List[str]):
"""Run inference and extract requested metrics"""
parsed_args, unknown_args = cli(args)
# Attempt to extract specified IR for logging purposes
try:
ir_idx = unknown_args.index("--ir")
selected_ir = unknown_args[ir_idx + 1]
except (ValueError, IndexError):
selected_ir = "default"
# Parse model string if specified, otherwise run all models
# Adapted from benchmark/run.py
if parsed_args["model"]:
try:
Model = load_model_by_name(parsed_args["model"])
except ModuleNotFoundError:
traceback.print_exc()
exit(-1)
except ModelNotFoundError:
print(
f"Warning: The model {parsed_args['model']} cannot be found at core set."
)
if not Model:
try:
Model = load_canary_model_by_name(parsed_args["model"])
except ModuleNotFoundError:
traceback.print_exc()
exit(-1)
except ModelNotFoundError:
print(
f"Error: The model {parsed_args['model']} cannot be found at either core or canary model set."
)
exit(-1)
# For single models, use a BenchmarkModel instance
model = Model(
device="cuda",
test="eval",
batch_size=parsed_args["bs"],
extra_args=[
"--backend",
]
+ unknown_args,
)
all_metrics = run_single_model(
model,
selected_ir,
parsed_args["num_warmup"],
parsed_args["num_iter"],
)
else:
all_metrics = {}
# For all models, use ModelTask instances
for model_name in list_models():
config = TorchBenchModelConfig(
name=model_name,
test="eval",
device="cuda",
batch_size=parsed_args["bs"],
extra_args=[
"--backend",
]
+ unknown_args,
)
try:
Model = load_model_isolated(config=config)
except ValueError as e:
print(
f"Loading model {model_name} failed with:\n{e}\nSkipping the model."
)
continue
metrics = run_single_model(
Model,
selected_ir,
parsed_args["num_warmup"],
parsed_args["num_iter"],
)
all_metrics = {**all_metrics, **metrics}
# Delete model instance and clean up workspace
del Model
save_metrics(all_metrics)
|
import torch
import argparse
import json
import os
import time
import torch.utils.jit.log_extract as log_extract
from datetime import datetime
from typing import Any, List
def parse_fusers(extra_args: List[str]):
parser = argparse.ArgumentParser()
parser.add_argument(
"--fusers",
nargs="*",
default=[],
choices=["no_fuser", "fuser0", "fuser1", "fuser2"],
help="List of fusers to run tests on")
parser.add_argument("--filters", nargs="*", default=[], help='List of fuser microbenchmarks to test')
parser.add_argument("--output", help="specifiy the output file name")
args = parser.parse_args(extra_args)
return args
class NVFuserBenchmark():
def __init__(self, name, ir, warmup_runs=10, test_runs=20):
self.name = name
self.ir = ir
self.warmup_runs = warmup_runs
self.test_runs = test_runs
def run_test(self, inputs, fuser_name: str) -> float:
if fuser_name == "no_fuser":
return log_extract.run_baseline_no_fusion(self.ir, inputs)
elif fuser_name == "nnc-static":
return log_extract.run_nnc(self.ir, inputs, dynamic=False)
elif fuser_name == "nnc-dynamic" or fuser_name == "fuser1":
return log_extract.run_nnc(self.ir, inputs, dynamic=True)
elif fuser_name == "fuser2" or fuser_name == "nvfuser":
return log_extract.run_nvfuser(self.ir, inputs)
assert False
def get_inputs(self) -> List[Any]:
_, inputs = log_extract.load_graph_and_inputs(self.ir)
return inputs
def dump_metrics(metrics, output_name):
output = {
"name": "nvfuser",
"environ": {"pytorch_git_version": torch.version.git_version},
"metrics": metrics,
}
current_dir = os.path.dirname(os.path.abspath(__file__))
target_dir = os.path.normpath(os.path.join(current_dir, "../../.userbenchmark/nvfuser/"))
os.makedirs(target_dir, exist_ok=True)
fname = "metrics-{}.json".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S"))
full_fname = os.path.join(target_dir, fname)
if output_name is not None:
full_fname = output_name
with open(full_fname, 'w') as f:
json.dump(output, f, indent=4)
def run_nvfuser_microbenchmarks(extra_args: List[str]):
from userbenchmark.nvfuser.ir import ir_list
benchmarks = [NVFuserBenchmark(name, ir) for name, ir in ir_list]
args = parse_fusers(extra_args)
filters, fusers = args.filters, args.fusers
if len(filters) > 0:
benchmarks = [x for x in benchmarks if x.name in filters]
if len(fusers) == 0:
fusers = ["no_fuser", "nnc-static", "nnc-dynamic", "nvfuser"]
metrics = {}
for b in benchmarks:
outputs = []
for fuser in fusers:
inputs = b.get_inputs()
runtime = b.run_test(inputs, fuser)
outputs.append((fuser, runtime))
metrics[f"{fuser}:{b.name}"] = runtime
print(f"{b.name}:", "; ".join(f"{name} = {time:.3f} ms" for name, time in outputs))
dump_metrics(metrics, args.output)
def run(args: List[str]):
run_nvfuser_microbenchmarks(extra_args=args)
|
# contains the list of microbenchmark strings
# format: list of tuples (name, IR)
ir_list = [("autogen-0", """graph(%0 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(4096, 512, strides=[512, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : int[] = prim::Constant[value=[4096, 512]]()
%4 : int[] = prim::Constant[value=[1, 4096, 512]]()
%5 : Float(1, 4096, 512, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %4)
%6 : Float(1, 4096, 512, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%5, %0, %2)
%7 : Float(4096, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %3)
%8 : Float(1, 4096, 512, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %4)
%9 : Float(1, 4096, 512, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0) = aten::relu(%8)
%10 : Float(4096, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %3)
return (%10)
"""), ("autogen-1", """graph(%0 : Float(1, 12, 4096, 64, strides=[3145728, 64, 768, 1], requires_grad=0, device=cuda:0),
%1 : Float(requires_grad=0, device=cuda:0)):
%2 : int[] = prim::Constant[value=[1, 12, 64, 64, 64]]()
%3 : Float(1, 12, 4096, 64, strides=[3145728, 64, 768, 1], requires_grad=0, device=cuda:0) = aten::div(%0, %1)
%4 : Float(1, 12, 64, 64, 64, strides=[768, 64, 49152, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %2)
return (%4)
"""), ("autogen-2", """graph(%0 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0),
%6 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%10 : int):
%11 : float = prim::Constant[value=1.0000000000000001e-05]()
%12 : float = prim::Constant[value=0.10000000000000001]()
%13 : bool = prim::Constant[value=0]()
%14 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0), %15 : Tensor, %16 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %13, %12, %11)
%17 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0), %18 : Tensor, %19 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %13, %12, %11)
%20 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0) = aten::add(%17, %14, %10)
%21 : Float(8, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0) = aten::relu(%20)
return (%21)
"""), ("autogen-3", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%7 : Double(requires_grad=0, device=cuda:0),
%8 : Double(requires_grad=0, device=cuda:0),
%9 : Double(requires_grad=0, device=cuda:0),
%10 : Double(requires_grad=0, device=cuda:0),
%11 : Double(requires_grad=0, device=cuda:0),
%12 : Double(requires_grad=0, device=cuda:0),
%13 : Double(requires_grad=0, device=cuda:0),
%14 : Double(requires_grad=0, device=cuda:0),
%15 : Double(requires_grad=0, device=cuda:0),
%16 : Double(requires_grad=0, device=cuda:0),
%17 : Double(requires_grad=0, device=cuda:0),
%18 : Double(requires_grad=0, device=cuda:0),
%19 : Double(requires_grad=0, device=cuda:0),
%20 : Double(requires_grad=0, device=cuda:0),
%21 : Double(requires_grad=0, device=cuda:0),
%22 : Double(requires_grad=0, device=cuda:0),
%23 : Double(1, 1, 26, strides=[26, 26, 1], requires_grad=0, device=cuda:0),
%24 : Double(requires_grad=0, device=cuda:0),
%25 : Double(requires_grad=0, device=cuda:0),
%26 : Double(requires_grad=0, device=cuda:0),
%27 : int,
%28 : int,
%29 : int,
%30 : int,
%31 : int,
%32 : int,
%33 : int,
%34 : int,
%35 : int,
%36 : int,
%37 : int,
%38 : int,
%39 : int,
%40 : int,
%41 : int,
%42 : int,
%43 : int,
%44 : int,
%45 : int,
%46 : int,
%47 : int,
%48 : int,
%49 : int,
%50 : int,
%51 : int,
%52 : int,
%53 : int,
%54 : int,
%55 : int,
%56 : int,
%57 : int,
%58 : int,
%59 : int,
%60 : int,
%61 : int,
%62 : int,
%63 : int,
%64 : int,
%65 : int,
%66 : int):
%67 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %16)
%68 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%67, %12)
%69 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %26)
%70 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %25)
%71 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%70, %10, %66)
%72 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %71)
%73 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%72, %24, %65)
%74 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%73, %69, %64)
%75 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%74, %23)
%76 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %22)
%77 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%76, %9, %63)
%78 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %77)
%79 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%78, %8, %62)
%80 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %79)
%81 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%80, %21, %61)
%82 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sqrt(%6)
%83 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%82, %81)
%84 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %20)
%85 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%84, %7, %60)
%86 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %85)
%87 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%86, %19, %59)
%88 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%87, %83, %58)
%89 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %88)
%90 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %18)
%91 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%90, %5, %57)
%92 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %91)
%93 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%92, %3, %56)
%94 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %93)
%95 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%94, %17, %55)
%96 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%95, %89, %54)
%97 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%96, %74)
%98 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %16)
%99 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%98, %15, %53)
%100 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %99)
%101 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%100, %12)
%102 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %14)
%103 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%102, %13, %52)
%104 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %103)
%105 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%104, %12)
%106 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%105, %11, %51)
%107 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%106, %101, %50)
%108 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::pow(%107, %49)
%109 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%108, %97, %48)
%110 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sqrt(%109)
%111 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%105, %11, %47)
%112 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%111, %101, %46)
%113 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%112, %110, %45)
%114 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%113, %75, %44)
%115 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%114)
%116 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%115, %2)
%117 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%105, %11, %43)
%118 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%117, %101, %42)
%119 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%118, %110, %41)
%120 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%119)
%121 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%120, %2)
%122 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%110, %121)
%123 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%122, %116)
%124 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%75, %0)
%125 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%124, %123)
%126 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%125, %2, %40)
%127 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%70, %0)
%128 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%127, %10, %39)
%129 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%76, %0)
%130 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%129, %9, %38)
%131 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %130)
%132 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%78, %8, %37)
%133 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%132, %131, %36)
%134 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%82, %133)
%135 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%84, %0)
%136 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%135, %7, %35)
%137 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%136, %134, %34)
%138 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %137)
%139 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%90, %0)
%140 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%139, %5, %33)
%141 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %140)
%142 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%92, %3, %32)
%143 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%142, %141, %31)
%144 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%143, %138, %30)
%145 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%144, %74)
%146 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%102, %2)
%147 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%146, %1, %29)
%148 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%147, %68, %28)
%149 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%107, %0)
%150 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%149, %148)
%151 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%150, %145, %27)
return (%151, %148, %146, %144, %128, %126, %123, %121, %119, %116, %114, %110, %109, %108, %107, %104, %102, %100, %96, %82, %75, %74, %68, %67)
"""), ("batchnorm-silu", """graph(%0 : Float(32, 480, 14, 14, strides=[94080, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(480, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(480, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(480, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(480, strides=[1], requires_grad=0, device=cuda:0)):
%5 : float = prim::Constant[value=1.0000000000000001e-05]()
%6 : float = prim::Constant[value=0.10000000000000001]()
%7 : bool = prim::Constant[value=0]()
%8 : Float(32, 480, 14, 14, strides=[94080, 196, 14, 1], requires_grad=0, device=cuda:0), %9 : Tensor, %10 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %7, %6, %5)
%11 : Float(32, 480, 14, 14, strides=[94080, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::silu(%8)
return (%11)
"""), ("autogen-4", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : float = prim::Constant[value=9.9999999999999998e-13]()
%8 : int[] = prim::Constant[value=[768]]()
%9 : int[] = prim::Constant[value=[4096, 768]]()
%10 : int[] = prim::Constant[value=[1, 4096, 768]]()
%11 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%14, %2, %5)
%16 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0), %17 : Tensor, %18 : Tensor = aten::native_layer_norm(%15, %8, %0, %1, %7)
%19 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %9)
return (%19)
"""), ("autogen-5", """graph(%0 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0),
%1 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0),
%6 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%10 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0),
%11 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%12 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%13 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%14 : Float(160, strides=[1], requires_grad=0, device=cuda:0),
%15 : int,
%16 : int):
%17 : float = prim::Constant[value=1.0000000000000001e-05]()
%18 : float = prim::Constant[value=0.10000000000000001]()
%19 : bool = prim::Constant[value=0]()
%20 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0), %21 : Tensor, %22 : Tensor = aten::native_batch_norm(%10, %11, %12, %13, %14, %19, %18, %17)
%23 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0), %24 : Tensor, %25 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %19, %18, %17)
%26 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0), %27 : Tensor, %28 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %19, %18, %17)
%29 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::add(%26, %23, %16)
%30 : Float(96, 160, 7, 7, strides=[7840, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::add(%29, %20, %15)
return (%30)
"""), ("autogen-6", """graph(%0 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Double(requires_grad=0, device=cuda:0),
%7 : Double(requires_grad=0, device=cuda:0),
%8 : Double(requires_grad=0, device=cuda:0),
%9 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%10 : Double(requires_grad=0, device=cuda:0),
%11 : Double(requires_grad=0, device=cuda:0),
%12 : Double(requires_grad=0, device=cuda:0),
%13 : Double(requires_grad=0, device=cuda:0),
%14 : Double(requires_grad=0, device=cuda:0),
%15 : Double(requires_grad=0, device=cuda:0),
%16 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%17 : Double(requires_grad=0, device=cuda:0),
%18 : Double(requires_grad=0, device=cuda:0),
%19 : Double(requires_grad=0, device=cuda:0),
%20 : Double(requires_grad=0, device=cuda:0),
%21 : Double(requires_grad=0, device=cuda:0),
%22 : Double(requires_grad=0, device=cuda:0),
%23 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%24 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%25 : Double(requires_grad=0, device=cuda:0),
%26 : Double(requires_grad=0, device=cuda:0),
%27 : Double(requires_grad=0, device=cuda:0),
%28 : Double(requires_grad=0, device=cuda:0),
%29 : Double(requires_grad=0, device=cuda:0),
%30 : Double(requires_grad=0, device=cuda:0),
%31 : Double(requires_grad=0, device=cuda:0),
%32 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%33 : Double(requires_grad=0, device=cuda:0),
%34 : Double(requires_grad=0, device=cuda:0),
%35 : Double(requires_grad=0, device=cuda:0),
%36 : Double(requires_grad=0, device=cuda:0),
%37 : Double(requires_grad=0, device=cuda:0),
%38 : Double(requires_grad=0, device=cuda:0),
%39 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%40 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%41 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%42 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%43 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%44 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%45 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%46 : Double(requires_grad=0, device=cuda:0),
%47 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%48 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%49 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%50 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%51 : Double(1, 1, 26, strides=[26, 26, 1], requires_grad=0, device=cuda:0),
%52 : int,
%53 : int,
%54 : int,
%55 : int,
%56 : int,
%57 : int,
%58 : int,
%59 : int,
%60 : int,
%61 : int,
%62 : int,
%63 : int,
%64 : int,
%65 : int,
%66 : int,
%67 : int,
%68 : int,
%69 : int,
%70 : int,
%71 : int,
%72 : int,
%73 : int,
%74 : int,
%75 : int,
%76 : int,
%77 : int,
%78 : int,
%79 : int,
%80 : int,
%81 : int,
%82 : int,
%83 : int,
%84 : int,
%85 : int,
%86 : int,
%87 : int,
%88 : int,
%89 : int,
%90 : int,
%91 : int,
%92 : int,
%93 : int,
%94 : int):
%95 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%50, %51)
%96 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%24, %50)
%97 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%49, %96, %94)
%98 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%47)
%99 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%98, %3)
%100 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%99, %97)
%101 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%100, %22)
%102 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%45, %46, %93)
%103 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%102, %44, %92)
%104 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%103, %101, %91)
%105 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%104, %95, %90)
%106 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::pow(%48, %89)
%107 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%42, %47)
%108 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%107, %22)
%109 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%108, %41)
%110 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%109, %106)
%111 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%110, %105)
%112 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%45, %46, %88)
%113 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%112, %44, %87)
%114 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%113, %101, %86)
%115 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::pow(%43, %85)
%116 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%108, %115)
%117 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%116, %40)
%118 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%117, %114)
%119 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%42, %99)
%120 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%119, %41)
%121 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%120, %40)
%122 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%121, %97)
%123 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%95, %22)
%124 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%123, %39)
%125 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%124, %122, %84)
%126 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%125, %118, %83)
%127 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%126, %111, %82)
%128 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%2)
%129 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%128, %3)
%130 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %38)
%131 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %37)
%132 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%131, %36, %81)
%133 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%132, %130, %80)
%134 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %133)
%135 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%134, %35, %79)
%136 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%135, %22)
%137 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%136, %23)
%138 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %34)
%139 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%138, %33, %78)
%140 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%139, %24)
%141 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%139, %32)
%142 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%141, %31)
%143 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%142, %129)
%144 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%143, %140, %77)
%145 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%144, %137, %76)
%146 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%145, %129)
%147 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %30)
%148 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%147, %29, %75)
%149 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %148)
%150 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %28)
%151 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%150, %27, %74)
%152 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %151)
%153 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%152, %26, %73)
%154 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %153)
%155 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%154, %25, %72)
%156 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%155, %149, %71)
%157 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%156, %146, %70)
%158 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%157, %23)
%159 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%135, %24)
%160 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%23, %129)
%161 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%140, %22)
%162 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%161, %160)
%163 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%162, %159, %69)
%164 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%163, %129)
%165 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %21)
%166 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%165, %20, %68)
%167 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %166)
%168 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%167, %19, %67)
%169 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %168)
%170 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%169, %18, %66)
%171 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %170)
%172 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%171, %17, %65)
%173 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%16, %172)
%174 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %15)
%175 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %14)
%176 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%175, %13, %64)
%177 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %176)
%178 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%177, %12, %63)
%179 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %178)
%180 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%179, %11, %62)
%181 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %180)
%182 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%181, %10, %61)
%183 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%182, %174, %60)
%184 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%183, %173, %59)
%185 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %184)
%186 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %8)
%187 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%186, %7, %58)
%188 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %187)
%189 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%188, %6, %57)
%190 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %189)
%191 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%190, %5, %56)
%192 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %191)
%193 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%192, %3, %55)
%194 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%193, %185, %54)
%195 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%194, %164, %53)
%196 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%195, %2)
%197 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%196, %158, %52)
%198 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%197, %129)
%199 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%198, %1)
%200 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%199, %99)
%201 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%200, %127)
%202 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%201, %0)
return (%202, %198, %197, %195, %190, %188, %186, %179, %177, %175, %169, %167, %165, %163, %161, %160, %157, %152, %150, %145, %142, %139, %136, %135, %134, %131, %130, %129, %99, %97, %95)
"""), ("autogen-7", """graph(%0 : Float(8, 197, 6, 64, strides=[75648, 64, 12608, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[1576, 384]]()
%2 : int[] = prim::Constant[value=[8, 197, 384]]()
%3 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%4 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %1)
return (%4)
"""), ("autogen-8", """graph(%0 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%1 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%2 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%3 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%4 : Double(requires_grad=0, device=cuda:0),
%5 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0)):
%6 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::log(%5)
%7 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%3, %4)
%8 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%7, %2)
%9 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::div(%8, %1)
%10 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %6)
%11 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%10, %0)
return (%11, %6)
"""), ("autogen-9", """graph(%0 : Float(1, 12, 1, 64, 64, strides=[768, 64, 49152, 768, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[1, 12, 64, 64, 1, 1]]()
%2 : int[] = prim::Constant[value=[1, 12, 64, 64, 1]]()
%3 : int[] = prim::Constant[value=[1, 12, 64, 64]]()
%4 : Float(1, 12, 64, 64, strides=[768, 64, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %3)
%5 : Float(1, 12, 64, 64, 1, strides=[768, 64, 768, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %2)
%6 : Float(1, 12, 64, 64, 1, 1, strides=[768, 64, 768, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %1)
return (%6, %4)
"""), ("autogen-10", """graph(%0 : Long(1, 1, 26, strides=[26, 26, 1], requires_grad=0, device=cuda:0),
%1 : Long(200, 200, strides=[200, 1], requires_grad=0, device=cuda:0)):
%2 : int[] = prim::Constant[value=[200, 200, 1]]()
%3 : Long(200, 200, 1, strides=[200, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %2)
%4 : Bool(200, 200, 26, strides=[5200, 26, 1], requires_grad=0, device=cuda:0) = aten::ge(%0, %3)
return (%4)
"""), ("autogen-11", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : int[] = prim::Constant[value=[1, 512, 12, 64]]()
%4 : int[] = prim::Constant[value=[512, 768]]()
%5 : int[] = prim::Constant[value=[1, 512, 768]]()
%6 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %5)
%7 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%6, %0, %2)
%8 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %4)
%9 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %5)
%10 : Float(1, 512, 12, 64, strides=[393216, 768, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %3)
return (%10)
"""), ("autogen-12", """graph(%0 : Float(32, 360, 14, 14, strides=[70560, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(32, 360, 1, 1, strides=[360, 1, 1, 1], requires_grad=0, device=cuda:0)):
%2 : Float(32, 360, 1, 1, strides=[360, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::sigmoid(%1)
%3 : Float(32, 360, 14, 14, strides=[70560, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %2)
return (%3)
"""), ("autogen-13", """graph(%0 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(32, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0),
%5 : Float(256, strides=[1], requires_grad=0, device=cuda:0)):
%6 : float = prim::Constant[value=1.0000000000000001e-05]()
%7 : float = prim::Constant[value=0.10000000000000001]()
%8 : bool = prim::Constant[value=0]()
%9 : int[] = prim::Constant[value=[1, 256, 1, 1]]()
%10 : Float(1, 256, 1, 1, strides=[256, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %9)
%11 : Float(32, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0) = aten::div(%4, %10)
%12 : Float(32, 256, 56, 56, strides=[802816, 3136, 56, 1], requires_grad=0, device=cuda:0), %13 : Tensor, %14 : Tensor = aten::native_batch_norm(%11, %0, %1, %2, %3, %8, %7, %6)
return (%12, %13, %14)
"""), ("autogen-14", """graph(%0 : Float(8, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0),
%1 : Float(8, 2048, 2048, strides=[1, 16384, 8], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 1, 2048, 2048, strides=[4194304, 4194304, 2048, 1], requires_grad=0, device=cuda:0),
%5 : Float(1, 1, 1, 2048, strides=[2048, 2048, 2048, 1], requires_grad=0, device=cuda:0),
%6 : int,
%7 : int,
%8 : int):
%9 : bool = prim::Constant[value=0]()
%10 : int = prim::Constant[value=-1]()
%11 : int[] = prim::Constant[value=[8, 2048, 2048]]()
%12 : int[] = prim::Constant[value=[1, 8, 2048, 2048]]()
%13 : Float(1, 1, 2048, 2048, strides=[4194304, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %5)
%14 : Float(1, 1, 2048, 2048, strides=[4194304, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %13, %8)
%15 : Float(1, 1, 2048, 2048, strides=[4194304, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %2)
%16 : Float(1, 8, 2048, 2048, strides=[8, 1, 16384, 8], requires_grad=0, device=cuda:0) = aten::reshape(%1, %12)
%17 : Float(1, 8, 2048, 2048, strides=[8, 1, 16384, 8], requires_grad=0, device=cuda:0) = aten::add(%16, %15, %7)
%18 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %12)
%19 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::add(%18, %17, %6)
%20 : Float(8, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%19, %11)
%21 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%20, %12)
%22 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%21, %10, %9)
return (%22, %17)
"""), ("batchnorm-silu-mean", """graph(%0 : Float(32, 240, 14, 14, strides=[47040, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(240, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(240, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(240, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(240, strides=[1], requires_grad=0, device=cuda:0)):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=1]()
%7 : int[] = prim::Constant[value=[2, 3]]()
%8 : float = prim::Constant[value=1.0000000000000001e-05]()
%9 : float = prim::Constant[value=0.10000000000000001]()
%10 : bool = prim::Constant[value=0]()
%11 : Float(32, 240, 14, 14, strides=[47040, 196, 14, 1], requires_grad=0, device=cuda:0), %12 : Tensor, %13 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %10, %9, %8)
%14 : Float(32, 240, 14, 14, strides=[47040, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::silu(%11)
%15 : Float(32, 240, 1, 1, strides=[240, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%14, %7, %6, %5)
return (%15, %14)
"""), ("autogen-15", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : int[] = prim::Constant[value=[512, 768]]()
%4 : int[] = prim::Constant[value=[1, 512, 768]]()
%5 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %4)
%6 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%5, %0, %2)
%7 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %3)
%8 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %4)
%9 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %3)
return (%9, %8)
"""), ("autogen-16", """graph(%0 : Float(1, 1, 512, 512, strides=[262144, 262144, 512, 1], requires_grad=0, device=cuda:0),
%1 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : bool = prim::Constant[value=0]()
%4 : int = prim::Constant[value=-1]()
%5 : int[] = prim::Constant[value=[12, 512, 512]]()
%6 : int[] = prim::Constant[value=[1, 12, 512, 512]]()
%7 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %6)
%8 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%7, %0, %2)
%9 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %5)
%10 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%9, %4, %3)
return (%10)
"""), ("autogen-17", """graph(%0 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0),
%1 : Float(768, 64, 128, strides=[8192, 128, 1], requires_grad=0, device=cuda:0),
%2 : int,
%3 : int,
%4 : int):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=1]()
%7 : int[] = prim::Constant[value=[-1]]()
%8 : int[] = prim::Constant[value=[1, 12, 64, 64, 128]]()
%9 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %8)
%10 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%9, %0, %4)
%11 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::exp(%10)
%12 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::sum(%11, %7, %6, %5)
%13 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::log(%12)
%14 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%13, %0, %3)
%15 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%9, %14, %2)
%16 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::exp(%15)
return (%16)
"""), ("autogen-18", """graph(%0 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0),
%3 : Float(1, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0),
%4 : int):
%5 : int[] = prim::Constant[value=[1576, 384]]()
%6 : float = prim::Constant[value=9.9999999999999995e-07]()
%7 : int[] = prim::Constant[value=[384]]()
%8 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %3, %4)
%9 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0), %10 : Tensor, %11 : Tensor = aten::native_layer_norm(%8, %7, %0, %1, %6)
%12 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %5)
return (%12, %8)
"""), ("autogen-19", """graph(%0 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 4096, 256, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0),
%3 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0),
%4 : int):
%5 : int[] = prim::Constant[value=[4096, 256]]()
%6 : float = prim::Constant[value=9.9999999999999998e-13]()
%7 : int[] = prim::Constant[value=[256]]()
%8 : int[] = prim::Constant[value=[1, 4096, 256]]()
%9 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %8)
%10 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %9, %4)
%11 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0), %12 : Tensor, %13 : Tensor = aten::native_layer_norm(%10, %7, %0, %1, %6)
%14 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %5)
return (%14, %10)
"""), ("autogen-20", """graph(%0 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0),
%1 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0),
%2 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%6 : int):
%7 : int[] = prim::Constant[value=[16, 512]]()
%8 : NoneType = prim::Constant()
%9 : bool = prim::Constant[value=1]()
%10 : int[] = prim::Constant[value=[-1, -2]]()
%11 : float = prim::Constant[value=1.0000000000000001e-05]()
%12 : float = prim::Constant[value=0.10000000000000001]()
%13 : bool = prim::Constant[value=0]()
%14 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0), %15 : Tensor, %16 : Tensor = aten::native_batch_norm(%1, %2, %3, %4, %5, %13, %12, %11)
%17 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::add(%14, %0, %6)
%18 : Float(16, 512, 7, 7, strides=[25088, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::relu(%17)
%19 : Float(16, 512, 1, 1, strides=[512, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%18, %10, %9, %8)
%20 : Float(16, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%19, %7)
return (%20)
"""), ("autogen-21", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%4 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[512, 768]]()
%8 : float = prim::Constant[value=9.9999999999999998e-13]()
%9 : int[] = prim::Constant[value=[768]]()
%10 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%3, %4, %6)
%11 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%10, %2, %5)
%12 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0), %13 : Tensor, %14 : Tensor = aten::native_layer_norm(%11, %9, %0, %1, %8)
%15 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %7)
return (%15, %12)
"""), ("autogen-22", """graph(%0 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%1 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%4 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%5 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%6 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%7 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%8 : Double(requires_grad=0, device=cuda:0),
%9 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%10 : Double(requires_grad=0, device=cuda:0),
%11 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%12 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%13 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%14 : Double(requires_grad=0, device=cuda:0),
%15 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%16 : Double(requires_grad=0, device=cuda:0),
%17 : Double(requires_grad=0, device=cuda:0),
%18 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%19 : int,
%20 : int,
%21 : int,
%22 : int,
%23 : int,
%24 : int,
%25 : int,
%26 : int,
%27 : int):
%28 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::pow(%18, %27)
%29 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::reciprocal(%28)
%30 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%29, %17)
%31 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%15, %16)
%32 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %14)
%33 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%30, %12)
%34 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %7)
%35 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%32, %0)
%36 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%11, %8)
%37 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%36, %10, %26)
%38 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%37, %9, %25)
%39 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%38, %8)
%40 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%39, %4)
%41 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %7)
%42 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%3, %5)
%43 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%3, %4)
%44 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%43, %2)
%45 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%44, %34)
%46 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%35, %45, %24)
%47 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%1, %33)
%48 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%46, %47, %23)
%49 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%48, %31, %22)
%50 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%49, %42, %21)
%51 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%50, %40, %20)
%52 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::sub(%51, %41, %19)
%53 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%52, %0)
return (%53, %43, %42, %38, %36, %34, %33, %31, %30)
"""), ("autogen-23", """graph(%0 : Float(32, 2, 256, 28, 28, strides=[401408, 200704, 784, 28, 1], requires_grad=0, device=cuda:0),
%1 : Float(32, 2, 1, 256, strides=[512, 256, 512, 1], requires_grad=0, device=cuda:0)):
%2 : NoneType = prim::Constant()
%3 : int[] = prim::Constant[value=[1]]()
%4 : int[] = prim::Constant[value=[32, 2, 256, 1, 1]]()
%5 : int[] = prim::Constant[value=[32, 512, 1, 1]]()
%6 : int[] = prim::Constant[value=[32, 512]]()
%7 : bool = prim::Constant[value=0]()
%8 : int = prim::Constant[value=1]()
%9 : Float(32, 2, 1, 256, strides=[512, 256, 256, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%1, %8, %7)
%10 : Float(32, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %6)
%11 : Float(32, 512, 1, 1, strides=[512, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%10, %5)
%12 : Float(32, 2, 256, 1, 1, strides=[512, 256, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %4)
%13 : Float(32, 2, 256, 28, 28, strides=[401408, 200704, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %12)
%14 : Float(32, 256, 28, 28, strides=[200704, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::sum(%13, %3, %7, %2)
return (%14)
"""), ("autogen-24", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1024, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int,
%7 : float):
%8 : int[] = prim::Constant[value=[1024, 3072]]()
%9 : int[] = prim::Constant[value=[1, 1024, 3072]]()
%10 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %9)
%11 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::pow(%10, %7)
%12 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%11, %3)
%13 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%10, %12, %6)
%14 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %2)
%15 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::tanh(%14)
%16 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %1, %5)
%17 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%10, %0)
%18 : Float(1, 1024, 3072, strides=[3145728, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%17, %16)
%19 : Float(1024, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%18, %8)
return (%19)
"""), ("autogen-25", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(16, 128, 1, strides=[128, 1, 1], requires_grad=0, device=cuda:0),
%4 : Double(requires_grad=0, device=cuda:0),
%5 : int,
%6 : int,
%7 : int):
%8 : int[] = prim::Constant[value=[2048, 768]]()
%9 : NoneType = prim::Constant()
%10 : bool = prim::Constant[value=1]()
%11 : int[] = prim::Constant[value=[-1]]()
%12 : Float(16, 128, 1, strides=[128, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%3, %4, %7)
%13 : Float(16, 128, 1, strides=[128, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%2, %11, %10, %9)
%14 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %13, %6)
%15 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%1, %14)
%16 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::div(%15, %12)
%17 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %0, %5)
%18 : Float(2048, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%17, %8)
return (%18)
"""), ("autogen-26", """graph(%0 : Float(1, 8, 2048, 2048, strides=[8, 1, 16384, 8], requires_grad=0, device=cuda:0),
%1 : Float(8, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : bool = prim::Constant[value=0]()
%4 : int = prim::Constant[value=-1]()
%5 : int[] = prim::Constant[value=[8, 2048, 2048]]()
%6 : int[] = prim::Constant[value=[1, 8, 2048, 2048]]()
%7 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %6)
%8 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::add(%7, %0, %2)
%9 : Float(8, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %5)
%10 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %6)
%11 : Float(1, 8, 2048, 2048, strides=[33554432, 4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%10, %4, %3)
return (%11)
"""), ("autogen-27", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : float = prim::Constant[value=9.9999999999999998e-13]()
%8 : int[] = prim::Constant[value=[768]]()
%9 : int[] = prim::Constant[value=[512, 768]]()
%10 : int[] = prim::Constant[value=[1, 512, 768]]()
%11 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%14, %2, %5)
%16 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0), %17 : Tensor, %18 : Tensor = aten::native_layer_norm(%15, %8, %0, %1, %7)
%19 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %9)
return (%19, %16)
"""), ("autogen-28", """graph(%0 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0),
%3 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0),
%4 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[512, 128]]()
%8 : float = prim::Constant[value=9.9999999999999998e-13]()
%9 : int[] = prim::Constant[value=[128]]()
%10 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%3, %4, %6)
%11 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%10, %2, %5)
%12 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0), %13 : Tensor, %14 : Tensor = aten::native_layer_norm(%11, %9, %0, %1, %8)
%15 : Float(512, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %7)
return (%15)
"""), ("autogen-29", """graph(%0 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0),
%1 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0),
%2 : Float(1, 12, 60, 64, 64, 1, strides=[64, 245760, 4096, 64, 1, 64], requires_grad=0, device=cuda:0),
%3 : Float(1, 12, 60, 64, 64, 1, strides=[64, 245760, 4096, 64, 1, 64], requires_grad=0, device=cuda:0),
%4 : int,
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[720, 64, 64]]()
%8 : int[] = prim::Constant[value=[1, 12, 60, 64, 64]]()
%9 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %8)
%10 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%2, %8)
%11 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %8)
%12 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %8)
%13 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::add(%12, %11, %6)
%14 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %7)
%15 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %8)
%16 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %10, %5)
%17 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %7)
%18 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%17, %8)
%19 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::add(%18, %9, %4)
%20 : Float(720, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%19, %7)
%21 : Float(1, 12, 60, 64, 64, strides=[2949120, 245760, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%20, %8)
return (%21)
"""), ("autogen-30", """graph(%0 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0),
%3 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0),
%4 : int):
%5 : int[] = prim::Constant[value=[4096, 256]]()
%6 : float = prim::Constant[value=9.9999999999999998e-13]()
%7 : int[] = prim::Constant[value=[256]]()
%8 : int[] = prim::Constant[value=[1, 4096, 256]]()
%9 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %8)
%10 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %9, %4)
%11 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0), %12 : Tensor, %13 : Tensor = aten::native_layer_norm(%10, %7, %0, %1, %6)
%14 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %5)
return (%14)
"""), ("autogen-31", """graph(%0 : Float(1, 64, 64, 256, strides=[1048576, 16384, 256, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[4096, 256]]()
%2 : int[] = prim::Constant[value=[1, 4096, 256]]()
%3 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%4 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %2)
%5 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %1)
return (%5)
"""), ("autogen-32", """graph(%0 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0),
%1 : Float(1, 4096, strides=[4096, 1], requires_grad=0, device=cuda:0)):
%2 : int[] = prim::Constant[value=[1, 12, 4096, 64]]()
%3 : int[] = prim::Constant[value=[1, 1, 4096, 1]]()
%4 : Float(1, 1, 4096, 1, strides=[4096, 4096, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %3)
%5 : Float(1, 12, 4096, 64, strides=[3145728, 262144, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%6 : Float(1, 12, 4096, 64, strides=[3145728, 262144, 64, 1], requires_grad=0, device=cuda:0) = aten::mul(%5, %4)
return (%6, %4)
"""), ("autogen-33", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(12, 64, 4096, strides=[262144, 4096, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[12, 64, 4096]]()
%8 : bool = prim::Constant[value=0]()
%9 : int = prim::Constant[value=-1]()
%10 : int[] = prim::Constant[value=[1, 12, 64, 4096]]()
%11 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %4, %6)
%12 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%11, %2)
%13 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %10)
%14 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %0)
%15 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::add(%14, %12, %5)
%16 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%15, %9, %8)
%17 : Float(12, 64, 4096, strides=[262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %7)
return (%17)
"""), ("autogen-34", """graph(%0 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0),
%3 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : float = prim::Constant[value=9.9999999999999995e-07]()
%8 : int[] = prim::Constant[value=[384]]()
%9 : int[] = prim::Constant[value=[1576, 384]]()
%10 : int[] = prim::Constant[value=[8, 197, 384]]()
%11 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %14, %5)
%16 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0), %17 : Tensor, %18 : Tensor = aten::native_layer_norm(%15, %8, %0, %1, %7)
%19 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %9)
return (%19, %15)
"""), ("autogen-35", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0),
%4 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int,
%7 : int):
%8 : int[] = prim::Constant[value=[2048, 512]]()
%9 : NoneType = prim::Constant()
%10 : bool = prim::Constant[value=1]()
%11 : int[] = prim::Constant[value=[-1]]()
%12 : int[] = prim::Constant[value=[1, 2048, 512]]()
%13 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %12)
%14 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%3, %13, %7)
%15 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::pow(%14, %6)
%16 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%15, %11, %10, %9)
%17 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %2, %5)
%18 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::rsqrt(%17)
%19 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %18)
%20 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%1, %19)
%21 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%20, %0)
%22 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%21, %8)
return (%22)
"""), ("autogen-36", """graph(%0 : Float(32, 512, 28, 28, strides=[401408, 784, 28, 1], requires_grad=0, device=cuda:0),
%1 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(512, strides=[1], requires_grad=0, device=cuda:0)):
%5 : bool = prim::Constant[value=1]()
%6 : int[] = prim::Constant[value=[2, 3]]()
%7 : NoneType = prim::Constant()
%8 : int[] = prim::Constant[value=[1]]()
%9 : int[] = prim::Constant[value=[32, 2, 256, 28, 28]]()
%10 : float = prim::Constant[value=1.0000000000000001e-05]()
%11 : float = prim::Constant[value=0.10000000000000001]()
%12 : bool = prim::Constant[value=0]()
%13 : Float(32, 512, 28, 28, strides=[401408, 784, 28, 1], requires_grad=0, device=cuda:0), %14 : Tensor, %15 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %12, %11, %10)
%16 : Float(32, 512, 28, 28, strides=[401408, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::relu(%13)
%17 : Float(32, 2, 256, 28, 28, strides=[401408, 200704, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %9)
%18 : Float(32, 256, 28, 28, strides=[200704, 784, 28, 1], requires_grad=0, device=cuda:0) = aten::sum(%17, %8, %12, %7)
%19 : Float(32, 256, 1, 1, strides=[256, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%18, %6, %5, %7)
return (%19, %17)
"""), ("autogen-37", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(720, 64, 192, strides=[12288, 192, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[1, 12, 60, 64, 192]]()
%8 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %4, %6)
%9 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%8, %2)
%10 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %7)
%11 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%10, %0)
%12 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %9, %5)
return (%12)
"""), ("autogen-38", """graph(%0 : Float(1, 4096, 256, strides=[2097152, 512, 1], requires_grad=0, device=cuda:0),
%1 : Float(256, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(256, strides=[1], requires_grad=0, device=cuda:0)):
%3 : int[] = prim::Constant[value=[4096, 256]]()
%4 : float = prim::Constant[value=9.9999999999999998e-13]()
%5 : int[] = prim::Constant[value=[256]]()
%6 : Float(1, 4096, 256, strides=[1048576, 256, 1], requires_grad=0, device=cuda:0), %7 : Tensor, %8 : Tensor = aten::native_layer_norm(%0, %5, %1, %2, %4)
%9 : Float(4096, 256, strides=[256, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %3)
return (%9)
"""), ("autogen-39", """graph(%0 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0),
%1 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0),
%2 : int,
%3 : int,
%4 : int):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=1]()
%7 : int[] = prim::Constant[value=[-1]]()
%8 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%0, %1, %4)
%9 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::exp(%8)
%10 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::sum(%9, %7, %6, %5)
%11 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::log(%10)
%12 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %1, %3)
%13 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%0, %12, %2)
%14 : Float(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::exp(%13)
return (%14)
"""), ("autogen-40", """graph(%0 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%6 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%10 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%11 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%12 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%13 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%14 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%15 : int,
%16 : int):
%17 : float = prim::Constant[value=0.001]()
%18 : float = prim::Constant[value=0.01]()
%19 : bool = prim::Constant[value=0]()
%20 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %21 : Tensor, %22 : Tensor = aten::native_batch_norm(%10, %11, %12, %13, %14, %19, %18, %17)
%23 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %24 : Tensor, %25 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %19, %18, %17)
%26 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%23, %20, %16)
%27 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %28 : Tensor, %29 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %19, %18, %17)
%30 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%27, %26, %15)
return (%30)
"""), ("autogen-41", """graph(%0 : Float(12, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[1, 12, 1, 64, 64]]()
%2 : int[] = prim::Constant[value=[12, 64, 64]]()
%3 : int = prim::Constant[value=2]()
%4 : int[] = prim::Constant[value=[1, 12, 64, 64]]()
%5 : Float(1, 12, 64, 64, strides=[49152, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %4)
%6 : Float(1, 12, 1, 64, 64, strides=[49152, 4096, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::unsqueeze(%5, %3)
%7 : Float(1, 12, 64, 64, strides=[49152, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %4)
%8 : Float(12, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %2)
%9 : Float(1, 12, 64, 64, strides=[49152, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %4)
%10 : Float(1, 12, 1, 64, 64, strides=[49152, 4096, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %1)
return (%10)
"""), ("autogen-42", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(3072, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(512, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0),
%6 : int,
%7 : int,
%8 : float,
%9 : int):
%10 : int[] = prim::Constant[value=[512, 3072]]()
%11 : int[] = prim::Constant[value=[1, 512, 3072]]()
%12 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %11)
%13 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%12, %4, %9)
%14 : Float(512, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %11)
%16 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::pow(%15, %8)
%17 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%16, %3)
%18 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %17, %7)
%19 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %2)
%20 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::tanh(%19)
%21 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%20, %1, %6)
%22 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%15, %0)
%23 : Float(1, 512, 3072, strides=[1572864, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%22, %21)
%24 : Float(512, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%23, %10)
return (%24)
"""), ("autogen-43", """graph(%0 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%6 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%10 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%11 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%12 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%13 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%14 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%15 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0),
%16 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%17 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%18 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%19 : Float(80, strides=[1], requires_grad=0, device=cuda:0),
%20 : int,
%21 : int,
%22 : int):
%23 : float = prim::Constant[value=0.001]()
%24 : float = prim::Constant[value=0.01]()
%25 : bool = prim::Constant[value=0]()
%26 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %27 : Tensor, %28 : Tensor = aten::native_batch_norm(%15, %16, %17, %18, %19, %25, %24, %23)
%29 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %30 : Tensor, %31 : Tensor = aten::native_batch_norm(%10, %11, %12, %13, %14, %25, %24, %23)
%32 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%29, %26, %22)
%33 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %34 : Tensor, %35 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %25, %24, %23)
%36 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%33, %32, %21)
%37 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0), %38 : Tensor, %39 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %25, %24, %23)
%40 : Float(32, 80, 14, 14, strides=[15680, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%37, %36, %20)
return (%40)
"""), ("autogen-44", """graph(%0 : Float(128, 1024, 7, 7, strides=[50176, 49, 7, 1], requires_grad=0, device=cuda:0),
%1 : Float(1024, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1024, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(1024, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(1024, strides=[1], requires_grad=0, device=cuda:0)):
%5 : NoneType = prim::Constant()
%6 : int[] = prim::Constant[value=[2, 3]]()
%7 : float = prim::Constant[value=1.0000000000000001e-05]()
%8 : float = prim::Constant[value=0.10000000000000001]()
%9 : bool = prim::Constant[value=0]()
%10 : Float(128, 1024, 7, 7, strides=[50176, 49, 7, 1], requires_grad=0, device=cuda:0), %11 : Tensor, %12 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %9, %8, %7)
%13 : Float(128, 1024, 7, 7, strides=[50176, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::relu(%10)
%14 : Float(128, 1024, strides=[1024, 1], requires_grad=0, device=cuda:0) = aten::mean(%13, %6, %9, %5)
return (%14)
"""), ("autogen-45", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%8 : int,
%9 : int,
%10 : int):
%11 : float = prim::Constant[value=9.9999999999999998e-13]()
%12 : int[] = prim::Constant[value=[768]]()
%13 : int[] = prim::Constant[value=[4096, 768]]()
%14 : int[] = prim::Constant[value=[1, 4096, 768]]()
%15 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %14)
%16 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %6, %10)
%17 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%16, %13)
%18 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%17, %14)
%19 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %5)
%20 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%19, %18)
%21 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%20, %3, %9)
%22 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %4)
%23 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%22, %21)
%24 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::tanh(%23)
%25 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%24, %3, %8)
%26 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %2)
%27 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0) = aten::mul(%26, %25)
%28 : Float(1, 4096, 768, strides=[3145728, 768, 1], requires_grad=0, device=cuda:0), %29 : Tensor, %30 : Tensor = aten::native_layer_norm(%27, %12, %0, %1, %11)
%31 : Float(4096, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%28, %13)
return (%31)
"""), ("autogen-46", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(3072, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(4096, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0),
%6 : int,
%7 : int,
%8 : int):
%9 : int[] = prim::Constant[value=[4096, 3072]]()
%10 : int[] = prim::Constant[value=[1, 4096, 3072]]()
%11 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %10)
%12 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %4, %8)
%13 : Float(4096, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %3)
%16 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%15, %14)
%17 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %1, %7)
%18 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %2)
%19 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%18, %17)
%20 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::tanh(%19)
%21 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::add(%20, %1, %6)
%22 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %0)
%23 : Float(1, 4096, 3072, strides=[12582912, 3072, 1], requires_grad=0, device=cuda:0) = aten::mul(%22, %21)
%24 : Float(4096, 3072, strides=[3072, 1], requires_grad=0, device=cuda:0) = aten::reshape(%23, %9)
return (%24)
"""), ("autogen-47", """graph(%0 : Float(1, 12, 4096, 64, strides=[3145728, 64, 768, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[768, 64, 64]]()
%2 : int[] = prim::Constant[value=[1, 12, 64, 64, 64]]()
%3 : Float(1, 12, 64, 64, 64, strides=[768, 64, 49152, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%4 : Float(768, 64, 64, strides=[4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %1)
return (%4, %3)
"""), ("autogen-48", """graph(%0 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0),
%3 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0),
%4 : int,
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[2048, 512]]()
%8 : NoneType = prim::Constant()
%9 : bool = prim::Constant[value=1]()
%10 : int[] = prim::Constant[value=[-1]]()
%11 : int[] = prim::Constant[value=[1, 2048, 512]]()
%12 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %11)
%13 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %12, %6)
%14 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::pow(%13, %5)
%15 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%14, %10, %9, %8)
%16 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %1, %4)
%17 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::rsqrt(%16)
%18 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %17)
%19 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %18)
%20 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%19, %7)
return (%20, %13)
"""), ("autogen-49", """graph(%0 : Long(requires_grad=0, device=cuda:0),
%1 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%2 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=0]()
%7 : int[] = prim::Constant[value=[1]]()
%8 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%1, %2, %4)
%9 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::div(%8, %0)
%10 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::pow(%9, %3)
%11 : Float(96, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mean(%10, %7, %6, %5)
%12 : Float(96, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::mean(%11, %7, %6, %5)
%13 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::mean(%12, %7, %6, %5)
return (%13)
"""), ("autogen-50", """graph(%0 : Float(1, 12, 1, 4096, 64, 1, strides=[64, 262144, 64, 64, 1, 64], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[1, 12, 1, 4096, 64]]()
%2 : Float(1, 12, 1, 4096, 64, strides=[3145728, 262144, 262144, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %1)
%3 : Float(1, 12, 1, 4096, 64, strides=[3145728, 262144, 262144, 64, 1], requires_grad=0, device=cuda:0) = aten::neg(%2)
return (%3, %2)
"""), ("autogen-51", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 512, strides=[512, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : bool = prim::Constant[value=0]()
%8 : int = prim::Constant[value=-1]()
%9 : int[] = prim::Constant[value=[1, 12, 512, 512]]()
%10 : int[] = prim::Constant[value=[1, 1, 1, 512]]()
%11 : int[] = prim::Constant[value=[1, 1, 512]]()
%12 : Float(1, 1, 512, strides=[512, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %11)
%13 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %10)
%14 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %13, %6)
%15 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %2)
%16 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %9)
%17 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::div(%16, %0)
%18 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%17, %15, %5)
%19 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%18, %8, %7)
return (%19, %15)
"""), ("autogen-52", """graph(%0 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0),
%1 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0),
%6 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%10 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0),
%11 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%12 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%13 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%14 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%15 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0),
%16 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%17 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%18 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%19 : Float(64, strides=[1], requires_grad=0, device=cuda:0),
%20 : int,
%21 : int,
%22 : int):
%23 : float = prim::Constant[value=1.0000000000000001e-05]()
%24 : float = prim::Constant[value=0.10000000000000001]()
%25 : bool = prim::Constant[value=0]()
%26 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0), %27 : Tensor, %28 : Tensor = aten::native_batch_norm(%15, %16, %17, %18, %19, %25, %24, %23)
%29 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0), %30 : Tensor, %31 : Tensor = aten::native_batch_norm(%10, %11, %12, %13, %14, %25, %24, %23)
%32 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0), %33 : Tensor, %34 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %25, %24, %23)
%35 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0), %36 : Tensor, %37 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %25, %24, %23)
%38 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%35, %32, %22)
%39 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%38, %29, %21)
%40 : Float(96, 64, 14, 14, strides=[12544, 196, 14, 1], requires_grad=0, device=cuda:0) = aten::add(%39, %26, %20)
return (%40)
"""), ("autogen-53", """graph(%0 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Float(128, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(512, 128, strides=[128, 1], requires_grad=0, device=cuda:0),
%8 : int,
%9 : int,
%10 : float,
%11 : int):
%12 : float = prim::Constant[value=1.0000000000000001e-05]()
%13 : int[] = prim::Constant[value=[128]]()
%14 : int[] = prim::Constant[value=[512, 128]]()
%15 : int[] = prim::Constant[value=[1, 512, 128]]()
%16 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %15)
%17 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %6, %11)
%18 : Float(512, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%17, %14)
%19 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%18, %15)
%20 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::pow(%19, %10)
%21 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%20, %5)
%22 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%19, %21, %9)
%23 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%22, %4)
%24 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::tanh(%23)
%25 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%24, %3, %8)
%26 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%19, %2)
%27 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%26, %25)
%28 : Float(1, 512, 128, strides=[65536, 128, 1], requires_grad=0, device=cuda:0), %29 : Tensor, %30 : Tensor = aten::native_layer_norm(%27, %13, %0, %1, %12)
%31 : Float(512, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%28, %14)
return (%31)
"""), ("autogen-54", """graph(%0 : Float(32, 1000, 13, 13, strides=[169000, 169, 13, 1], requires_grad=0, device=cuda:0)):
%1 : NoneType = prim::Constant()
%2 : bool = prim::Constant[value=1]()
%3 : int[] = prim::Constant[value=[-1, -2]]()
%4 : Float(32, 1000, 13, 13, strides=[169000, 169, 13, 1], requires_grad=0, device=cuda:0) = aten::relu(%0)
%5 : Float(32, 1000, 1, 1, strides=[1000, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%4, %3, %2, %1)
return (%5)
"""), ("autogen-55", """graph(%0 : Float(96, strides=[1], requires_grad=0, device=cuda:0),
%1 : Long(requires_grad=0, device=cuda:0),
%2 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%3 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%4 : Float(96, strides=[1], requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : int,
%7 : int,
%8 : int,
%9 : int):
%10 : NoneType = prim::Constant()
%11 : bool = prim::Constant[value=0]()
%12 : int[] = prim::Constant[value=[1]]()
%13 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::add(%4, %5, %9)
%14 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %3, %8)
%15 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::div(%14, %1)
%16 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::pow(%15, %7)
%17 : Float(96, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mean(%16, %12, %11, %10)
%18 : Float(96, 128, strides=[128, 1], requires_grad=0, device=cuda:0) = aten::mean(%17, %12, %11, %10)
%19 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::mean(%18, %12, %11, %10)
%20 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::sub(%0, %19, %6)
%21 : Float(96, strides=[1], requires_grad=0, device=cuda:0) = aten::div(%20, %13)
return (%21)
"""), ("autogen-56", """graph(%0 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0),
%3 : Float(384, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : float = prim::Constant[value=9.9999999999999995e-07]()
%8 : int[] = prim::Constant[value=[384]]()
%9 : int[] = prim::Constant[value=[1576, 384]]()
%10 : int[] = prim::Constant[value=[8, 197, 384]]()
%11 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(1576, 384, strides=[384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %9)
%14 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::reshape(%13, %10)
%15 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %14, %5)
%16 : Float(8, 197, 384, strides=[75648, 384, 1], requires_grad=0, device=cuda:0), %17 : Tensor, %18 : Tensor = aten::native_layer_norm(%15, %8, %0, %1, %7)
return (%16, %17, %18)
"""), ("autogen-57", """graph(%0 : Float(32, 960, 7, 7, strides=[47040, 49, 7, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[32, 960]]()
%2 : NoneType = prim::Constant()
%3 : bool = prim::Constant[value=1]()
%4 : int[] = prim::Constant[value=[-1, -2]]()
%5 : Float(32, 960, 1, 1, strides=[960, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%0, %4, %3, %2)
%6 : Float(32, 960, strides=[960, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %1)
return (%6)
"""), ("autogen-59", """graph(%0 : Long(1, 12, 4096, strides=[49152, 4096, 1], requires_grad=0, device=cuda:0),
%1 : Long(requires_grad=0, device=cuda:0),
%2 : Long(1, 12, 1, 4096, strides=[49152, 4096, 4096, 1], requires_grad=0, device=cuda:0),
%3 : Long(1, 12, 1, 1, strides=[1, 0, 1, 1], requires_grad=0, device=cuda:0),
%4 : int,
%5 : int):
%6 : int[] = prim::Constant[value=[1, 12, 4096]]()
%7 : Long(1, 12, 1, 4096, strides=[49152, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %3, %5)
%8 : Long(1, 12, 4096, strides=[49152, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %6)
%9 : Long(1, 12, 4096, strides=[49152, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%8, %1)
%10 : Long(1, 12, 4096, strides=[49152, 4096, 1], requires_grad=0, device=cuda:0) = aten::add(%9, %0, %4)
return (%10)
"""), ("autogen-60", """graph(%0 : Float(requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Float(1, 12, 4096, 64, strides=[3145728, 262144, 64, 1], requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : NoneType = prim::Constant()
%6 : bool = prim::Constant[value=1]()
%7 : int[] = prim::Constant[value=[-1]]()
%8 : int[] = prim::Constant[value=[1, 12, 64, 64, 64]]()
%9 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::reshape(%2, %8)
%10 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::pow(%9, %4)
%11 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%10, %7, %6, %5)
%12 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %1, %3)
%13 : Float(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::rsqrt(%12)
%14 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::mul(%9, %13)
%15 : Float(1, 12, 64, 64, 64, strides=[3145728, 262144, 4096, 64, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %0)
return (%15, %9)
"""), ("autogen-61", """graph(%0 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(2048, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : int[] = prim::Constant[value=[2048, 768]]()
%6 : int[] = prim::Constant[value=[16, 128, 768]]()
%7 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%2, %6)
%8 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%7, %1, %4)
%9 : Float(2048, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%8, %5)
%10 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %6)
%11 : Float(16, 128, 768, strides=[98304, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%0, %10, %3)
%12 : Float(2048, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %5)
return (%12, %11)
"""), ("autogen-62", """graph(%0 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0),
%1 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%3 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%5 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0),
%6 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%7 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%8 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%9 : Float(2048, strides=[1], requires_grad=0, device=cuda:0),
%10 : int):
%11 : int[] = prim::Constant[value=[32, 2048]]()
%12 : NoneType = prim::Constant()
%13 : bool = prim::Constant[value=1]()
%14 : int[] = prim::Constant[value=[-1, -2]]()
%15 : float = prim::Constant[value=1.0000000000000001e-05]()
%16 : float = prim::Constant[value=0.10000000000000001]()
%17 : bool = prim::Constant[value=0]()
%18 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0), %19 : Tensor, %20 : Tensor = aten::native_batch_norm(%5, %6, %7, %8, %9, %17, %16, %15)
%21 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0), %22 : Tensor, %23 : Tensor = aten::native_batch_norm(%0, %1, %2, %3, %4, %17, %16, %15)
%24 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::add(%21, %18, %10)
%25 : Float(32, 2048, 7, 7, strides=[100352, 49, 7, 1], requires_grad=0, device=cuda:0) = aten::relu(%24)
%26 : Float(32, 2048, 1, 1, strides=[2048, 1, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%25, %14, %13, %12)
%27 : Float(32, 2048, strides=[2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%26, %11)
return (%27)
"""), ("autogen-63", """graph(%0 : Float(480, 1, 1, 3, strides=[13, 3, 3, 1], requires_grad=0, device=cuda:0),
%1 : Long(requires_grad=0, device=cuda:0),
%2 : Float(480, 1, 64, 2, 64, 2, strides=[16384, 16384, 64, 8192, 1, 4096], requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : int[] = prim::Constant[value=[480, 128, 128, 1]]()
%6 : int[] = prim::Constant[value=[480, 128, 128]]()
%7 : int[] = prim::Constant[value=[480, 1, 128, 128]]()
%8 : Float(480, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%2, %7)
%9 : Float(480, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sigmoid(%8)
%10 : Float(480, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %6)
%11 : Float(480, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%1, %10, %4)
%12 : Float(480, 128, 128, strides=[16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%1, %11, %3)
%13 : Float(480, 128, 128, 1, strides=[16384, 128, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%12, %5)
%14 : Float(480, 128, 128, 3, strides=[49152, 384, 3, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %0)
return (%14, %13)
"""), ("autogen-64", """graph(%0 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%1 : Double(requires_grad=0, device=cuda:0),
%2 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%5 : Double(requires_grad=0, device=cuda:0),
%6 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0),
%7 : Double(requires_grad=0, device=cuda:0),
%8 : int,
%9 : int,
%10 : int,
%11 : int):
%12 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%6, %7)
%13 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %5)
%14 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%13, %3, %11)
%15 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::mul(%2, %14)
%16 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%0, %1, %10)
%17 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %15, %9)
%18 : Double(204, 204, 26, strides=[5304, 26, 1], requires_grad=0, device=cuda:0) = aten::add(%17, %12, %8)
return (%18)
"""), ("autogen-65", """graph(%0 : Float(20005, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(2048, 20005, strides=[20005, 1], requires_grad=0, device=cuda:0),
%2 : int):
%3 : int[] = prim::Constant[value=[2048, 20005]]()
%4 : int[] = prim::Constant[value=[16, 128, 20005]]()
%5 : Float(16, 128, 20005, strides=[2560640, 20005, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %4)
%6 : Float(16, 128, 20005, strides=[2560640, 20005, 1], requires_grad=0, device=cuda:0) = aten::add(%5, %0, %2)
%7 : Float(2048, 20005, strides=[20005, 1], requires_grad=0, device=cuda:0) = aten::reshape(%6, %3)
%8 : Float(16, 128, 20005, strides=[2560640, 20005, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %4)
return (%8)
"""), ("autogen-66", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(1024, 768, strides=[768, 1], requires_grad=0, device=cuda:0),
%4 : int):
%5 : int[] = prim::Constant[value=[1024, 768]]()
%6 : float = prim::Constant[value=1.0000000000000001e-05]()
%7 : int[] = prim::Constant[value=[768]]()
%8 : int[] = prim::Constant[value=[1, 1024, 768]]()
%9 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %8)
%10 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %9, %4)
%11 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0), %12 : Tensor, %13 : Tensor = aten::native_layer_norm(%10, %7, %0, %1, %6)
%14 : Float(1, 1024, 768, strides=[786432, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %8)
%15 : Float(1024, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %5)
return (%15)
"""), ("autogen-67", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(720, 64, 192, strides=[12288, 192, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 60, 64, 1, strides=[3840, 64, 1, 1], requires_grad=0, device=cuda:0),
%5 : Float(1, 60, 1, 192, strides=[11520, 192, 1, 1], requires_grad=0, device=cuda:0),
%6 : int,
%7 : int):
%8 : int[] = prim::Constant[value=[1, 12, 60, 64, 192]]()
%9 : int = prim::Constant[value=1]()
%10 : Float(1, 60, 64, 192, strides=[737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%4, %5)
%11 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::unsqueeze(%10, %9)
%12 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %11, %7)
%13 : Float(1, 1, 60, 64, 192, strides=[737280, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%12, %2)
%14 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %8)
%15 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::mul(%14, %0)
%16 : Float(1, 12, 60, 64, 192, strides=[8847360, 737280, 12288, 192, 1], requires_grad=0, device=cuda:0) = aten::add(%15, %13, %6)
return (%16, %11)
"""), ("autogen-68", """graph(%0 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%2 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0),
%3 : Float(768, strides=[1], requires_grad=0, device=cuda:0),
%4 : Float(1, 512, 768, 1, 1, strides=[768, 768, 1, 768, 768], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[512, 768]]()
%8 : float = prim::Constant[value=9.9999999999999998e-13]()
%9 : int[] = prim::Constant[value=[768]]()
%10 : int[] = prim::Constant[value=[1, 512, 768]]()
%11 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %10)
%12 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%11, %3, %6)
%13 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0) = aten::add(%2, %12, %5)
%14 : Float(1, 512, 768, strides=[393216, 768, 1], requires_grad=0, device=cuda:0), %15 : Tensor, %16 : Tensor = aten::native_layer_norm(%13, %9, %0, %1, %8)
%17 : Float(512, 768, strides=[768, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %7)
return (%17, %14)
"""), ("autogen-69", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(12, 64, 4096, strides=[262144, 4096, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 4096, strides=[4096, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : int[] = prim::Constant[value=[12, 64, 4096]]()
%8 : bool = prim::Constant[value=0]()
%9 : int = prim::Constant[value=-1]()
%10 : int[] = prim::Constant[value=[1, 12, 64, 4096]]()
%11 : int[] = prim::Constant[value=[1, 1, 1, 4096]]()
%12 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %11)
%13 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %12, %6)
%14 : Float(1, 1, 1, 4096, strides=[4096, 4096, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%13, %2)
%15 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %10)
%16 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::mul(%15, %0)
%17 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::add(%16, %14, %5)
%18 : Float(1, 12, 64, 4096, strides=[3145728, 262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%17, %9, %8)
%19 : Float(12, 64, 4096, strides=[262144, 4096, 1], requires_grad=0, device=cuda:0) = aten::reshape(%18, %7)
return (%19, %12)
"""), ("autogen-70", """graph(%0 : Long(1, 12, 64, 64, strides=[49152, 4096, 64, 1], requires_grad=0, device=cuda:0),
%1 : Long(1, 12, 64, 128, strides=[98304, 8192, 128, 1], requires_grad=0, device=cuda:0)):
%2 : int[] = prim::Constant[value=[1, 12, 64, 64, 1]]()
%3 : int[] = prim::Constant[value=[1, 12, 64, 1, 128]]()
%4 : Long(1, 12, 64, 1, 128, strides=[98304, 8192, 128, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %3)
%5 : Long(1, 12, 64, 64, 1, strides=[49152, 4096, 64, 1, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%6 : Bool(1, 12, 64, 64, 128, strides=[6291456, 524288, 8192, 128, 1], requires_grad=0, device=cuda:0) = aten::ne(%5, %4)
return (%6)
"""), ("autogen-71", """graph(%0 : Float(512, strides=[1], requires_grad=0, device=cuda:0),
%1 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : int,
%4 : int):
%5 : int[] = prim::Constant[value=[2048, 512]]()
%6 : NoneType = prim::Constant()
%7 : bool = prim::Constant[value=1]()
%8 : int[] = prim::Constant[value=[-1]]()
%9 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::pow(%1, %4)
%10 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::mean(%9, %8, %7, %6)
%11 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::add(%10, %2, %3)
%12 : Float(1, 2048, 1, strides=[2048, 1, 1], requires_grad=0, device=cuda:0) = aten::rsqrt(%11)
%13 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%1, %12)
%14 : Float(1, 2048, 512, strides=[1048576, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%0, %13)
%15 : Float(2048, 512, strides=[512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%14, %5)
return (%15)
"""), ("autogen-72", """graph(%0 : Long(2232, strides=[1], requires_grad=0, device=cuda:0),
%1 : Long(2232, strides=[1], requires_grad=0, device=cuda:0),
%2 : Long(requires_grad=0, device=cuda:0),
%3 : Long(1, 12, 62, 3, strides=[2232, 186, 3, 1], requires_grad=0, device=cuda:0),
%4 : int,
%5 : int):
%6 : int[] = prim::Constant[value=[2232]]()
%7 : Long(2232, strides=[1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %6)
%8 : Long(2232, strides=[1], requires_grad=0, device=cuda:0) = aten::mul(%1, %2)
%9 : Long(2232, strides=[1], requires_grad=0, device=cuda:0) = aten::add(%7, %8, %5)
%10 : Long(2232, strides=[1], requires_grad=0, device=cuda:0) = aten::add(%7, %0, %4)
return (%10, %9)
"""), ("autogen-73", """graph(%0 : Long(requires_grad=0, device=cuda:0),
%1 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0),
%2 : Long(requires_grad=0, device=cuda:0),
%3 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%4 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%5 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%6 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%7 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%8 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%9 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%10 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%11 : Float(96, 1, 1, 128, 128, strides=[81920, 16384, 16384, 128, 1], requires_grad=0, device=cuda:0),
%12 : Float(96, 1, 3, 128, 128, strides=[245760, 49152, 1, 384, 3], requires_grad=0, device=cuda:0),
%13 : int,
%14 : int,
%15 : int,
%16 : int,
%17 : int,
%18 : int,
%19 : int,
%20 : int,
%21 : int,
%22 : int):
%23 : int[] = prim::Constant[value=[96, 1, 128, 128]]()
%24 : int[] = prim::Constant[value=[96, 3, 128, 128]]()
%25 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%12, %24)
%26 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%11, %23)
%27 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %26, %22)
%28 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%10, %24)
%29 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%9, %23)
%30 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %29, %21)
%31 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%8, %24)
%32 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%7, %23)
%33 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %32, %20)
%34 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%6, %24)
%35 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%5, %23)
%36 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %35, %19)
%37 : Float(96, 3, 128, 128, strides=[245760, 1, 384, 3], requires_grad=0, device=cuda:0) = aten::reshape(%4, %24)
%38 : Float(96, 1, 128, 128, strides=[81920, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::reshape(%3, %23)
%39 : Float(96, 1, 128, 128, strides=[16384, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::sub(%2, %38, %18)
%40 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::div(%1, %0)
%41 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%40, %39)
%42 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%41, %37, %17)
%43 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%42, %36)
%44 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%43, %34, %16)
%45 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%44, %33)
%46 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%45, %31, %15)
%47 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%46, %30)
%48 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%47, %28, %14)
%49 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%48, %27)
%50 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::add(%49, %25, %13)
%51 : Float(96, 3, 128, 128, strides=[49152, 16384, 128, 1], requires_grad=0, device=cuda:0) = aten::mul(%50, %0)
return (%51)
"""), ("autogen-74", """graph(%0 : Long(200, 200, strides=[204, 1], requires_grad=0, device=cuda:0),
%1 : Long(requires_grad=0, device=cuda:0),
%2 : int,
%3 : int):
%4 : Long(200, 200, strides=[200, 1], requires_grad=0, device=cuda:0) = aten::sub(%0, %1, %3)
%5 : Bool(200, 200, strides=[200, 1], requires_grad=0, device=cuda:0) = aten::ge(%4, %2)
return (%5, %4)
"""), ("autogen-75", """graph(%0 : Double(requires_grad=0, device=cuda:0),
%1 : Float(12, 512, 512, strides=[262144, 512, 1], requires_grad=0, device=cuda:0),
%2 : Double(requires_grad=0, device=cuda:0),
%3 : Double(requires_grad=0, device=cuda:0),
%4 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0),
%5 : int,
%6 : int):
%7 : bool = prim::Constant[value=0]()
%8 : int = prim::Constant[value=-1]()
%9 : int[] = prim::Constant[value=[1, 12, 512, 512]]()
%10 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::sub(%3, %4, %6)
%11 : Float(1, 1, 1, 512, strides=[512, 512, 512, 1], requires_grad=0, device=cuda:0) = aten::mul(%10, %2)
%12 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::reshape(%1, %9)
%13 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::div(%12, %0)
%14 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::add(%13, %11, %5)
%15 : Float(1, 12, 512, 512, strides=[3145728, 262144, 512, 1], requires_grad=0, device=cuda:0) = aten::_softmax(%14, %8, %7)
return (%15, %11)
"""), ("autogen-76", """graph(%0 : Float(2048, 2048, strides=[2048, 1], requires_grad=0, device=cuda:0)):
%1 : int[] = prim::Constant[value=[2048, 2048]]()
%2 : int[] = prim::Constant[value=[1, 2048, 2048]]()
%3 : Float(1, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%0, %2)
%4 : Float(1, 2048, 2048, strides=[4194304, 2048, 1], requires_grad=0, device=cuda:0) = aten::relu(%3)
%5 : Float(2048, 2048, strides=[2048, 1], requires_grad=0, device=cuda:0) = aten::reshape(%4, %1)
return (%5)
""")]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# @licenselint-loose-mode
import argparse
import os
import random
import re
import subprocess
import sys
import textwrap
from datetime import date
from typing import List, Optional
import setuptools_git_versioning as gitversion
import torch
from setuptools.command.install import install as PipInstall
from skbuild import setup
from tabulate import tabulate
def generate_package_version(package_name: str, version_variant: str):
print("[SETUP.PY] Generating the package version ...")
if "nightly" in package_name:
# Use date stamp for nightly versions
print("[SETUP.PY] Package is for NIGHTLY; using timestamp for the versioning")
today = date.today()
version = f"{today.year}.{today.month}.{today.day}"
elif "test" in package_name:
# Use date stamp for nightly versions
print("[SETUP.PY] Package is for TEST: using random number for the versioning")
version = (f"0.0.{random.randint(0, 1000)}",)
else:
# Use git tag / branch / commit info to generate a PEP-440-compliant version string
print("[SETUP.PY] Package is for RELEASE: using git info for the versioning")
print(
f"[SETUP.PY] TAG: {gitversion.get_tag()}, BRANCH: {gitversion.get_branch()}, SHA: {gitversion.get_sha()}"
)
# Remove the local version identifier, if any (e.g. 0.4.0rc0.post0+git.6a63116c.dirty => 0.4.0rc0.post0)
# Then remove post0 (keep postN for N > 0) (e.g. 0.4.0rc0.post0 => 0.4.0rc0)
version = re.sub(".post0$", "", gitversion.version_from_git().split("+")[0])
version = str(version) + version_variant
print(f"[SETUP.PY] Setting the package version: {version}")
return version
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="fbgemm_gpu setup")
parser.add_argument(
"--cpu_only",
dest="cpu_only",
action="store_true",
help="build for cpu-only (no GPU support)",
)
parser.add_argument(
"--package_name",
type=str,
default="fbgemm_gpu",
help="the name of this output wheel",
)
parser.add_argument(
"--nvml_lib_path",
type=str,
default=None,
help="Certain operations require the nvml lib (libnvidia-ml.so). If you installed"
" this in a custom location (through cudatoolkit-dev), provide the path here.",
)
return parser.parse_known_args(argv)
def nvcc_ok(cuda_home: str, major: int, minor: int) -> bool:
if not cuda_home:
return False
nvcc_path = f"{cuda_home}/bin/nvcc"
if not os.path.exists(nvcc_path):
return False
try:
# Extract version from version string - inspired my NVIDIA/apex
output = subprocess.check_output([nvcc_path, "-V"], text=True)
fragments = output.split()
version = fragments[fragments.index("release") + 1]
version_fragments = version.split(".")
major_nvcc = int(version_fragments[0])
minor_nvcc = int(version_fragments[1].split(",")[0])
result = major == major_nvcc and minor == minor_nvcc
except BaseException:
result = False
return result
def find_cuda(major: int, minor: int) -> Optional[str]:
cuda_home = os.environ.get("CUDA_BIN_PATH")
if nvcc_ok(cuda_home, major, minor):
return cuda_home
cuda_nvcc = os.environ.get("CUDACXX")
if cuda_nvcc and os.path.exists(cuda_nvcc):
cuda_home = os.path.dirname(os.path.dirname(cuda_nvcc))
if nvcc_ok(cuda_home, major, minor):
return cuda_home
# Search standard installation location with version first
cuda_home = f"/usr/local/cuda-{major}.{minor}"
if nvcc_ok(cuda_home, major, minor):
return cuda_home
cuda_home = "/usr/local/cuda"
if nvcc_ok(cuda_home, major, minor):
return cuda_home
try:
# Try to find nvcc with which
with open(os.devnull, "w") as devnull:
nvcc = (
subprocess.check_output(["which", "nvcc"], stderr=devnull)
.decode()
.rstrip("\r\n")
)
cuda_home = os.path.dirname(os.path.dirname(nvcc))
except Exception:
cuda_home = None
if nvcc_ok(cuda_home, major, minor):
return cuda_home
return None
def set_cuda_environment_variables() -> None:
cub_include_path = os.getenv("CUB_DIR", None)
if cub_include_path is None:
print(
"CUDA CUB directory environment variable not set. Using default CUB location."
)
if torch.version.cuda is not None:
cuda_version = torch.version.cuda.split(".")
cuda_home = find_cuda(int(cuda_version[0]), int(cuda_version[1]))
else:
cuda_home = False
if cuda_home:
print(f"Using CUDA = {cuda_home}")
os.environ["CUDA_BIN_PATH"] = cuda_home
os.environ["CUDACXX"] = f"{cuda_home}/bin/nvcc"
def cmake_environment_variables(args) -> None:
def _get_cxx11_abi():
try:
import torch
value = int(torch._C._GLIBCXX_USE_CXX11_ABI)
except ImportError:
value = 0
return "-DGLIBCXX_USE_CXX11_ABI=" + str(value)
torch_root = os.path.dirname(torch.__file__)
os.environ["CMAKE_BUILD_PARALLEL_LEVEL"] = str(os.cpu_count() // 2)
cmake_args = [f"-DCMAKE_PREFIX_PATH={torch_root}", _get_cxx11_abi()]
if args.cpu_only:
cmake_args.append("-DFBGEMM_CPU_ONLY=ON")
if args.nvml_lib_path:
cmake_args.append(f"-DNVML_LIB_PATH={args.nvml_lib_path}")
return cmake_args
class FbgemmGpuInstaller(PipInstall):
"""FBGEMM_GPU PIP Installer"""
@classmethod
def generate_version_file(cls, package_version: str) -> None:
with open("fbgemm_gpu/_fbgemm_gpu_version.py", "w") as file:
print(
f"[SETUP.PY] Generating version file at: {os.path.realpath(file.name)}"
)
text = textwrap.dedent(
f"""
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
__version__: str = "{package_version}"
"""
)
file.write(text)
@classmethod
def description(cls) -> str:
# Get the long description from the relevant file
current_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(current_dir, "README.md"), encoding="utf-8") as f:
return f.read()
def print_versions(self) -> None:
pytorch_version = (
subprocess.run(
["python", "-c", "import torch; print(torch.__version__)"],
stdout=subprocess.PIPE,
)
.stdout.decode("utf-8")
.strip()
)
cuda_version_declared = (
subprocess.run(
["python", "-c", "import torch; print(torch.version.cuda)"],
stdout=subprocess.PIPE,
)
.stdout.decode("utf-8")
.strip()
)
table = [
["", "Version"],
["PyTorch", pytorch_version],
]
if cuda_version_declared != "None":
cuda_version = cuda_version_declared.split(".")
cuda_home = find_cuda(int(cuda_version[0]), int(cuda_version[1]))
actual_cuda_version = (
subprocess.run(
[f"{cuda_home}/bin/nvcc", "--version"],
stdout=subprocess.PIPE,
)
.stdout.decode("utf-8")
.strip()
)
table.extend(
[
["CUDA (Declared by PyTorch)", cuda_version_declared],
["CUDA (Actual)", actual_cuda_version],
]
)
print(tabulate(table, headers="firstrow", tablefmt="fancy_grid"))
def run(self):
PipInstall.run(self)
self.print_versions()
def main(argv: List[str]) -> None:
# Handle command line args before passing to main setup() method.
args, unknown = parse_args(argv)
print("Parsed Arguments: ", args)
if len(unknown) != 0 and (len(unknown) != 1 or unknown[0] != "clean"):
print("Unknown Arguments: ", unknown)
if args.cpu_only:
version_variant = "+cpu"
else:
set_cuda_environment_variables()
if torch.version.cuda is not None:
cuda_version = torch.version.cuda.split(".")
version_variant = "+cu" + str(cuda_version[0]) + str(cuda_version[1])
else:
# rocm or other gpus - to be specified if we offcially support them
version_variant = ""
# Skip Nova build steps since it will be done in pre-script
if "BUILD_FROM_NOVA" in os.environ:
build_from_nova = os.getenv("BUILD_FROM_NOVA")
print("build_from_nova", build_from_nova)
# Package name is the same for all variants in Nova
package_name = "fbgemm_gpu"
if str(build_from_nova) != "0":
# Skip build clean and build wheel steps in Nova workflow since they are done in pre-script
print("Build from Nova detected... exiting")
sys.exit(0)
else:
# If not building from Nova, use the fbgemm_gpu-<variant>
# PyPi does not accept version+xx in the name convention.
version_variant = ""
package_name = args.package_name
# Repair command line args for setup.
sys.argv = [sys.argv[0]] + unknown
# Determine the package version
package_version = generate_package_version(args.package_name, version_variant)
# Generate the version file
FbgemmGpuInstaller.generate_version_file(package_version)
setup(
name=package_name,
version=package_version,
author="FBGEMM Team",
author_email="[email protected]",
long_description=FbgemmGpuInstaller.description(),
long_description_content_type="text/markdown",
url="https://github.com/pytorch/fbgemm",
license="BSD-3",
keywords=[
"PyTorch",
"Recommendation Models",
"High Performance Computing",
"GPU",
"CUDA",
],
packages=["fbgemm_gpu"],
cmake_args=cmake_environment_variables(args),
cmdclass={
"install": FbgemmGpuInstaller,
},
# PyPI package information.
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
if __name__ == "__main__":
print(sys.argv)
main(sys.argv[1:])
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from typing import List, Tuple
import click
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
CacheAlgorithm,
EmbeddingLocation,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import nn, Tensor
logging.basicConfig(level=logging.DEBUG)
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:split_table_batched_embeddings"
)
# pyre-ignore
def benchmark_same_input(iters: int, f, *args) -> float:
"""
Returns average execution time in milliseconds across "iters".
"""
# Warm-up
f(*args)
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(iters):
f(*args)
end_event.record()
torch.cuda.synchronize()
return start_event.elapsed_time(end_event) / iters
# pyre-ignore
def benchmark_different_inputs(f, args) -> float:
"""
Returns average execution time in milliseconds across "iters".
"""
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for arg in args:
f(arg)
end_event.record()
torch.cuda.synchronize()
return start_event.elapsed_time(end_event) / len(args)
def get_num_cached_tables(num_tables: int, cached_tables_ratio: float) -> int:
"""
Controls how # of cached tables are determined based on parameters.
"""
return round(num_tables * cached_tables_ratio)
def create_table_offsets(
num_tables: int, cached_tables_ratio: float, num_embeddings: int
) -> Tensor:
"""
Returns "table size cumsum", which is information of UVM caching for tables.
"""
num_cached_tables = get_num_cached_tables(num_tables, cached_tables_ratio)
np_list = np.arange(0, num_embeddings * num_cached_tables, num_embeddings)
num_uncached_tables = num_tables - num_cached_tables
while num_uncached_tables > 0:
added = random.randint(1, num_uncached_tables)
pos = random.randint(0, len(np_list) - 1)
np_list = np.insert(np_list, pos, [np_list[pos]] * added)
num_uncached_tables -= added
cache_hash_size_cumsum: Tensor = torch.tensor(np_list).cuda()
return cache_hash_size_cumsum
def create_embedding_specs(
num_tables: int,
cached_tables_ratio: float,
num_embeddings: int,
embedding_dims: int,
) -> List[Tuple[str, int, int, SparseType, EmbeddingLocation]]:
"""
Returns embedding specs to be used with IntNBitTableBatchedEmbeddingBagsCodegen.
"""
num_cached_tables = get_num_cached_tables(num_tables, cached_tables_ratio)
num_uncached_tables = num_tables - num_cached_tables
embedding_specs = []
for _ in range(min(num_cached_tables, num_uncached_tables)):
embedding_specs.append(
(
"",
num_embeddings,
embedding_dims,
SparseType.INT8,
EmbeddingLocation.DEVICE,
)
)
embedding_specs.append(
(
"",
num_embeddings,
embedding_dims,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
)
if num_cached_tables > num_uncached_tables:
for _ in range(num_cached_tables - num_uncached_tables):
embedding_specs.append(
(
"",
num_embeddings,
embedding_dims,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
)
else:
for _ in range(num_uncached_tables - num_cached_tables):
embedding_specs.append(
(
"",
num_embeddings,
embedding_dims,
SparseType.INT8,
EmbeddingLocation.DEVICE,
)
)
return embedding_specs
def create_request(
num_tables: int, num_embeddings: int, batch: int, avg_pooling_factor: int
) -> Tuple[Tensor, Tensor]:
"""
Returns [indices, offsets], which are inputs of embedding bags.
"""
indices: Tensor = torch.randint(
0, num_embeddings, (num_tables * batch * avg_pooling_factor,), dtype=torch.int32
).cuda()
# Pooling factors are intentionally diversified between [1, pf / 2, pf, pf* 2, pf * 4, pf * 8].
# where pf == avg_pooling_factor.
pooling_factors = []
for _ in range(num_tables - 1):
half_avg_pooling_factor = avg_pooling_factor // 2
if half_avg_pooling_factor > 0:
pooling_factors.append(
random.choices(
[
1,
half_avg_pooling_factor,
avg_pooling_factor,
2 * avg_pooling_factor,
4 * avg_pooling_factor,
8 * avg_pooling_factor,
],
weights=[5, 10, 15, 1, 1, 3],
)[0]
)
else:
pooling_factors.append(
random.choices(
[1, avg_pooling_factor, 2 * avg_pooling_factor], weights=[2, 20, 1]
)[0]
)
# Last one is whatever is the remainder.
curr_total_pooling_factors = sum(pooling_factors)
pooling_factors.append(num_tables * avg_pooling_factor - curr_total_pooling_factors)
offsets_list = [0]
for pooling_factor in pooling_factors:
if pooling_factor == 1:
for _ in range(batch):
offsets_list.append(pooling_factor)
else:
finish_offset = offsets_list[-1] + pooling_factor * batch
for _ in range(batch - 1):
selected = max(
int(random.gauss(pooling_factor, 0.1 * pooling_factor)), 1
)
last_offset = offsets_list[-1]
offsets_list.append(last_offset + selected)
offsets_list.append(finish_offset)
offsets: Tensor = torch.tensor(offsets_list, dtype=torch.int32).cuda()
return (indices, offsets)
@click.group()
def cli() -> None:
pass
@cli.command()
@click.option("--iters", default=100)
@click.option("--num-tables", default=50)
@click.option("--cached-tables-ratio", default=1.0)
@click.option("--batch", default=100)
@click.option("--avg-pooling-factor", default=100)
def linearize_cache_indices(
iters: int,
num_tables: int,
cached_tables_ratio: float,
batch: int,
avg_pooling_factor: int,
) -> None:
num_embeddings: int = 1000000
cache_hash_size_cumsum = create_table_offsets(
num_tables, cached_tables_ratio, num_embeddings
)
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
t_ms = benchmark_same_input(
iters,
lambda indices, offsets: torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum, indices, offsets
),
indices,
offsets,
)
logging.info(
f"Across {iters} runs, T: {num_tables}, Cached T: {get_num_cached_tables(num_tables, cached_tables_ratio)}, BS: {batch}, {t_ms * 1.0e3:.0f}us"
)
@cli.command()
@click.option("--iters", default=100)
@click.option("--num-tables", default=50)
@click.option("--cached-tables-ratio", default=1.0)
@click.option("--batch", default=100)
@click.option("--avg-pooling-factor", default=100)
@click.option("--cache-load-factor", default=0.2)
def lxu_cache_lookup(
iters: int,
num_tables: int,
cached_tables_ratio: float,
batch: int,
avg_pooling_factor: int,
cache_load_factor: float,
) -> None:
num_embeddings: int = 1000000
embedding_dims: int = 128
embedding_specs = create_embedding_specs(
num_tables, cached_tables_ratio, num_embeddings, embedding_dims
)
tbe: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs, cache_load_factor=cache_load_factor
)
tbe.fill_random_weights()
# Imitate execution flow by performing prefetching once.
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
tbe.prefetch(indices, offsets)
linearized_indices = torch.ops.fbgemm.linearize_cache_indices(
tbe.cache_hash_size_cumsum, indices, offsets
)
t_ms = benchmark_same_input(
iters,
lambda linearized_indices, lxu_cache_state: torch.ops.fbgemm.lxu_cache_lookup(
linearized_indices, lxu_cache_state, tbe.total_cache_hash_size
),
linearized_indices,
tbe.lxu_cache_state,
)
# Run once again to obtain cache miss ratio.
locations = torch.ops.fbgemm.lxu_cache_lookup(
linearized_indices, tbe.lxu_cache_state, tbe.total_cache_hash_size
)
num_invalid_accesses = torch.sum(linearized_indices == tbe.total_cache_hash_size)
num_valid_accesses = linearized_indices.numel() - num_invalid_accesses
num_misses = torch.sum(locations == -1) - num_invalid_accesses
logging.info(
f"Across {iters} runs, T: {num_tables}, Cached T: {get_num_cached_tables(num_tables, cached_tables_ratio)}, "
f"BS: {batch}, cache_load_factor: {cache_load_factor}, {t_ms * 1.0e3:.0f}us, "
f"cache miss: {num_misses.item() / num_valid_accesses * 100}%"
)
@cli.command()
@click.option("--iters", default=100)
@click.option("--num-tables", default=50)
@click.option("--cached-tables-ratio", default=1.0)
@click.option("--batch", default=100)
@click.option("--avg-pooling-factor", default=100)
@click.option("--cache-load-factor", default=0.2)
def lru_cache_populate_byte(
iters: int,
num_tables: int,
cached_tables_ratio: float,
batch: int,
avg_pooling_factor: int,
cache_load_factor: float,
) -> None:
num_warm_ups: int = 5
num_embeddings: int = 1000000
embedding_dims: int = 128
embedding_specs = create_embedding_specs(
num_tables, cached_tables_ratio, num_embeddings, embedding_dims
)
cc: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs, cache_load_factor=cache_load_factor
)
cc.fill_random_weights()
warm_up_requests = []
for _ in range(num_warm_ups):
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
warm_up_requests.append(
torch.ops.fbgemm.linearize_cache_indices(
cc.cache_hash_size_cumsum, indices, offsets
)
)
requests = []
for _ in range(iters):
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
requests.append(
torch.ops.fbgemm.linearize_cache_indices(
cc.cache_hash_size_cumsum, indices, offsets
)
)
timestep: int = 1
def populate(linear_indices: Tensor) -> None:
nonlocal timestep
torch.ops.fbgemm.lru_cache_populate_byte(
cc.weights_uvm,
cc.cache_hash_size_cumsum,
cc.total_cache_hash_size,
cc.cache_index_table_map,
cc.weights_offsets,
cc.weights_tys,
cc.D_offsets,
linear_indices,
cc.lxu_cache_state,
cc.lxu_cache_weights,
timestep,
cc.lxu_state,
)
timestep += 1
for warm_up_request in warm_up_requests:
populate(warm_up_request)
t_ms = benchmark_different_inputs(
populate,
requests,
)
# Replay to figure out UVM access BW, which would be PCIe bound.
replay_cc: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs, cache_load_factor=cache_load_factor
)
replay_cc.fill_random_weights()
replay_timestep: int = 1
def replay_populate(linear_indices: Tensor) -> None:
nonlocal replay_timestep
torch.ops.fbgemm.lru_cache_populate_byte(
replay_cc.weights_uvm,
replay_cc.cache_hash_size_cumsum,
replay_cc.total_cache_hash_size,
replay_cc.cache_index_table_map,
replay_cc.weights_offsets,
replay_cc.weights_tys,
replay_cc.D_offsets,
linear_indices,
replay_cc.lxu_cache_state,
replay_cc.lxu_cache_weights,
replay_timestep,
replay_cc.lxu_state,
)
replay_timestep += 1
for warm_up_request in warm_up_requests:
replay_populate(warm_up_request)
total_rows = 0
for request in requests:
prev = replay_cc.lxu_cache_state.clone().detach()
replay_populate(request)
after = replay_cc.lxu_cache_state.clone().detach()
diff = after - prev
total_rows += diff.count_nonzero().item()
logging.info(
f"Across {iters} runs, T: {num_tables}, Cached T: {get_num_cached_tables(num_tables, cached_tables_ratio)}, "
f"BS: {batch}, cache_load_factor: {cache_load_factor}, {t_ms * 1.0e3:.0f}us, "
f"BW (just UVM accesses): {total_rows * embedding_dims / iters / t_ms * 1000 / 1024 / 1024} MB/s"
)
@cli.command()
@click.option("--iters", default=100)
@click.option("--num-tables", default=50)
@click.option("--cached-tables-ratio", default=1.0)
@click.option("--batch", default=100)
@click.option("--avg-pooling-factor", default=100)
@click.option("--cache-load-factor", default=0.2)
def lfu_cache_populate_byte(
iters: int,
num_tables: int,
cached_tables_ratio: float,
batch: int,
avg_pooling_factor: int,
cache_load_factor: float,
) -> None:
num_warm_ups: int = 5
num_embeddings: int = 1000000
embedding_dims: int = 128
embedding_specs = create_embedding_specs(
num_tables, cached_tables_ratio, num_embeddings, embedding_dims
)
cc: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs,
cache_load_factor=cache_load_factor,
cache_algorithm=CacheAlgorithm.LFU,
)
cc.fill_random_weights()
warm_up_requests = []
for _ in range(num_warm_ups):
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
warm_up_requests.append(
torch.ops.fbgemm.linearize_cache_indices(
cc.cache_hash_size_cumsum, indices, offsets
)
)
requests = []
for _ in range(iters):
indices, offsets = create_request(
num_tables, num_embeddings, batch, avg_pooling_factor
)
requests.append(
torch.ops.fbgemm.linearize_cache_indices(
cc.cache_hash_size_cumsum, indices, offsets
)
)
def populate(linear_indices: Tensor) -> None:
torch.ops.fbgemm.lfu_cache_populate_byte(
cc.weights_uvm,
cc.cache_hash_size_cumsum,
cc.total_cache_hash_size,
cc.cache_index_table_map,
cc.weights_offsets,
cc.weights_tys,
cc.D_offsets,
linear_indices,
cc.lxu_cache_state,
cc.lxu_cache_weights,
cc.lxu_state,
)
for warm_up_request in warm_up_requests:
populate(warm_up_request)
t_ms = benchmark_different_inputs(
populate,
requests,
)
# Replay to figure out UVM access BW, which would be PCIe bound.
replay_cc: nn.Module = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs,
cache_load_factor=cache_load_factor,
cache_algorithm=CacheAlgorithm.LFU,
)
replay_cc.fill_random_weights()
def replay_populate(linear_indices: Tensor) -> None:
torch.ops.fbgemm.lfu_cache_populate_byte(
replay_cc.weights_uvm,
replay_cc.cache_hash_size_cumsum,
replay_cc.total_cache_hash_size,
replay_cc.cache_index_table_map,
replay_cc.weights_offsets,
replay_cc.weights_tys,
replay_cc.D_offsets,
linear_indices,
replay_cc.lxu_cache_state,
replay_cc.lxu_cache_weights,
replay_cc.lxu_state,
)
for warm_up_request in warm_up_requests:
replay_populate(warm_up_request)
total_rows = 0
for request in requests:
prev = replay_cc.lxu_cache_state.clone().detach()
replay_populate(request)
after = replay_cc.lxu_cache_state.clone().detach()
diff = after - prev
total_rows += diff.count_nonzero().item()
logging.info(
f"Across {iters} runs, T: {num_tables}, Cached T: {get_num_cached_tables(num_tables, cached_tables_ratio)}, "
f"BS: {batch}, cache_load_factor: {cache_load_factor}, {t_ms * 1.0e3:.0f}us, "
f"BW (just UVM accesses): {total_rows * embedding_dims / iters / t_ms * 1000 / 1024 / 1024} MB/s"
)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import click
import torch
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
logging.basicConfig(level=logging.DEBUG)
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
@click.group()
def cli() -> None:
pass
@cli.command()
@click.option("--flush-gpu-cache-size-mb", default=40)
@click.option("--iters", default=100)
@click.option("--batch-size", default=25)
@click.option("--m", default=2048)
@click.option("--n", default=100)
@click.option("--k", default=256)
@click.option("--num_warmups", default=2)
def stride_gemm(
flush_gpu_cache_size_mb: int,
iters: int,
batch_size: int,
m: int,
n: int,
k: int,
num_warmups: int,
) -> None:
A = torch.rand(m, batch_size, k).half().cuda()
B = torch.rand(batch_size, k, n).half().cuda()
bias = torch.rand(batch_size, n).half().cuda()
bias_permute102 = bias.unsqueeze(1)
# A100 40MB L2 cache
elapse, _ = benchmark_torch_function(
torch.ops.fbgemm.permute102_baddbmm_permute102,
(bias, A, B),
flush_gpu_cache_size_mb,
iters=iters,
num_warmups=num_warmups,
)
logging.info(
f"stride gemm fused: time: {elapse}, TFLOPS/sec: {2.0 * batch_size * m * n * k / elapse / 1.0e12: .2f}"
)
def ref_stride_gemm(
bias_permute102: torch.Tensor, A: torch.Tensor, B: torch.Tensor
) -> torch.Tensor:
A_permute102 = A.permute(1, 0, 2)
C_permute102 = torch.baddbmm(bias_permute102, A_permute102, B)
C_ref = C_permute102.permute(1, 0, 2) # (m, batch_size, n)
return C_ref
# A100 40MB L2 cache
elapse_ref, _ = benchmark_torch_function(
ref_stride_gemm,
(bias_permute102, A, B),
flush_gpu_cache_size_mb,
iters=iters,
num_warmups=num_warmups,
)
logging.info(
f"stride gemm unfused: time: {elapse_ref}, TFLOPS/sec: {2.0 * batch_size * m * n * k / elapse_ref / 1.0e12: .2f}"
)
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
from typing import Tuple
import click
import numpy as np
import torch
from fbgemm_gpu.bench.bench_utils import benchmark_requests
from fbgemm_gpu.split_embedding_utils import generate_requests, round_up
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.ssd_split_table_batched_embeddings_ops import (
CacheAlgorithm,
EmbeddingLocation,
PoolingMode,
SparseType,
SSDIntNBitTableBatchedEmbeddingBags,
)
logging.basicConfig(level=logging.DEBUG)
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:ssd_split_table_batched_embeddings"
)
logging.basicConfig(level=logging.DEBUG)
@click.group()
def cli() -> None:
pass
def benchmark_ssd_function(
iters: int,
warmup_iters: int,
# pyre-fixme[2]: Parameter must be annotated.
s,
buf: torch.Tensor,
indices: torch.Tensor,
indices_per_itr: int,
) -> Tuple[float, float]:
actions_count_cpu = torch.tensor([indices_per_itr]).long().cpu()
# warmup
for i in range(warmup_iters):
start = i * indices_per_itr
end = start + indices_per_itr
indices_this_itr = indices[start:end]
# Benchmark code
s.get(indices_this_itr, buf, actions_count_cpu)
s.set(indices_this_itr, buf, actions_count_cpu)
logging.info("Finished warmup")
total_time_read_ns = 0
total_time_write_ns = 0
for i in range(iters):
start = (i + warmup_iters) * indices_per_itr
end = start + indices_per_itr
indices_this_itr = indices[start:end]
# Benchmark code
start = time.time_ns()
s.get(indices_this_itr, buf, actions_count_cpu)
read_end = time.time_ns()
s.set(indices_this_itr, buf, actions_count_cpu)
end = time.time_ns()
total_time_read_ns += read_end - start
total_time_write_ns += end - read_end
if i % 10 == 0:
logging.info(
f"{i}, {(read_end - start) / 10**6}, {(end - read_end) / 10**6}"
)
return (total_time_read_ns / iters, total_time_write_ns / iters)
def benchmark_read_write(
ssd_prefix: str,
batch_size: int,
bag_size: int,
num_embeddings: int,
embedding_dim: int,
iters: int,
warmup_iters: int,
num_shards: int,
num_threads: int,
) -> None:
import tempfile
idx_dtype = torch.int64
data_dtype = torch.float32
np.random.seed(42)
torch.random.manual_seed(43)
elem_size = 4
with tempfile.TemporaryDirectory(prefix=ssd_prefix) as ssd_directory:
ssd_db = torch.classes.fbgemm.EmbeddingRocksDBWrapper(
ssd_directory,
num_shards,
num_threads,
0, # ssd_memtable_flush_period,
0, # ssd_memtable_flush_offset,
4, # ssd_l0_files_per_compact,
embedding_dim,
0, # ssd_rate_limit_mbps,
1, # ssd_size_ratio,
8, # ssd_compaction_trigger,
536870912, # 512MB ssd_write_buffer_size,
8, # ssd_max_write_buffer_num,
-0.01, # ssd_uniform_init_lower
0.01, # ssd_uniform_init_upper
32, # row_storage_bitwidth
)
total_indices = (warmup_iters + iters) * batch_size * bag_size
indices_per_itr = batch_size * bag_size
indices = torch.randint(
low=0, high=num_embeddings, size=(total_indices,), dtype=idx_dtype
)
buf = torch.empty((batch_size * bag_size, embedding_dim), dtype=data_dtype)
read_lat_ns, write_lat_ns = benchmark_ssd_function(
iters, warmup_iters, ssd_db, buf, indices, indices_per_itr
)
total_bytes = batch_size * embedding_dim * bag_size * elem_size
byte_seconds_per_ns = total_bytes * 1e9
gibps_rd = byte_seconds_per_ns / (read_lat_ns * 2**30)
gibps_wr = byte_seconds_per_ns / (write_lat_ns * 2**30)
gibps_tot = 2 * byte_seconds_per_ns / ((read_lat_ns + write_lat_ns) * 2**30)
logging.info(
f"Batch Size: {batch_size}, "
f"Bag_size: {bag_size:3d}, "
f"Read_us: {read_lat_ns / 1000:8.0f}, "
f"Write_us: {write_lat_ns / 1000:8.0f}, "
f"Total_us: {(read_lat_ns + write_lat_ns) / 1000:8.0f}, "
f"TMaxQPS: {1e9 * batch_size / (read_lat_ns + write_lat_ns):8.0f}, "
f"GiBps Rd: {gibps_rd:3.2f}, "
f"GiBps Wr: {gibps_wr:3.2f}, "
f"GiBps R+W: {gibps_tot:3.2f}, "
)
del ssd_db
@cli.command()
# @click.option("--num-tables", default=64)
@click.option("--num-embeddings", default=int(1.5e9))
@click.option("--embedding-dim", default=128)
@click.option("--batch-size", default=1024)
@click.option("--bag-size", default=1)
@click.option("--iters", default=1000)
@click.option("--warmup-iters", default=100)
@click.option(
"--ssd-prefix", default="/tmp/ssd_benchmark_embedding"
) # Check P556577690 and https://fburl.com/t9lf4d7v
@click.option("--num-shards", default=8)
@click.option("--num-threads", default=8)
def ssd_read_write(
ssd_prefix: str,
num_embeddings: int,
embedding_dim: int,
bag_size: int,
batch_size: int,
iters: int,
warmup_iters: int,
num_shards: int,
num_threads: int,
) -> None:
benchmark_read_write(
ssd_prefix,
batch_size,
bag_size,
num_embeddings,
embedding_dim,
iters,
warmup_iters,
num_shards,
num_threads,
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--use-cache", is_flag=True, default=False)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--enforce-hbm", is_flag=True, default=False)
@click.option("--ssd-cache-loc", default="device")
def nbit_ssd(
alpha: bool,
bag_size: int, # L
batch_size: int, # B
embedding_dim: int, # D
weights_precision: SparseType,
iters: int,
mixed: bool,
num_embeddings: int, # E
num_tables: int, # T
reuse: float,
weighted: bool,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
use_cache: bool,
cache_algorithm: str,
cache_load_factor: float,
enforce_hbm: bool,
ssd_cache_loc: str,
) -> None:
import tempfile
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
cache_alg = CacheAlgorithm.LRU
managed_type = (
EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED
)
ssd_cache_location = (
EmbeddingLocation.MANAGED
if ssd_cache_loc == "managed"
else EmbeddingLocation.DEVICE
)
logging.info(f"T: {T}")
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_uvm = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
managed_type,
)
for d in Ds
],
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
).cuda()
emb_uvm.fill_random_weights()
feature_table_map = list(range(T))
C = max(T * B * L, 1)
emb_ssd = SSDIntNBitTableBatchedEmbeddingBags(
embedding_specs=[("", E, d, weights_precision) for d in Ds],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=C,
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
ssd_shards=2,
pooling_mode=PoolingMode.SUM,
ssd_cache_location=ssd_cache_location, # adjust the cache locations
).cuda()
emb_cpu = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
EmbeddingLocation.HOST,
)
for d in Ds
],
output_dtype=output_dtype,
device="cpu",
)
emb_cpu.fill_random_weights()
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
)
requests_gpu = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes = (
output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum(Ds) * L
)
nparams_byte = sum(w.numel() for (w, _) in emb_cpu.split_embedding_weights())
logging.info(
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * (L * sum(Ds)) * param_size_multiplier / 1.0e9: .2f} GB"
)
# UVM
torch.cuda.cudart().cudaProfilerStart()
torch.cuda.nvtx.range_push("uvm forward")
time_per_iter = benchmark_requests(
# pyre-ignore
requests_gpu,
lambda indices, offsets, per_sample_weights: emb_uvm.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"UVM NBit Forward, {weights_precision}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
torch.cuda.nvtx.range_pop()
torch.cuda.cudart().cudaProfilerStop()
# SSD
torch.cuda.cudart().cudaProfilerStart()
torch.cuda.nvtx.range_push("ssd forward")
time_per_iter = benchmark_requests(
# pyre-ignore
requests_gpu,
lambda indices, offsets, per_sample_weights: emb_ssd.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"SSD NBit Forward, {weights_precision}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
torch.cuda.nvtx.range_pop()
torch.cuda.cudart().cudaProfilerStop()
# CPU
requests_cpu = [
(a.int().cpu(), b.int().cpu(), c if c else None) for (a, b, c) in requests
]
time_per_iter = benchmark_requests(
# pyre-ignore
requests_cpu,
lambda indices, offsets, per_sample_weights: emb_cpu.forward(
indices.int().cpu(),
offsets.int().cpu(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"CPU NBit Forward, {weights_precision}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import random
from typing import List, Tuple
import click
import fbgemm_gpu
import torch
from torch.profiler import profile
logging.basicConfig(level=logging.DEBUG)
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
@click.group()
def cli() -> None:
pass
@cli.command()
@click.option("--batch-size", type=int, default=128)
@click.option("--embedding-dim", type=int, default=128)
@click.option("--max-len", type=int, default=128)
@click.option("--elem-type", type=str, default="half")
def device(
batch_size: int,
embedding_dim: int,
max_len: int,
elem_type: str,
) -> None:
lengths = torch.randint(max_len, size=(batch_size,))
total_lengths = lengths.sum().item()
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
dtype = (
torch.float16
if elem_type == "half" or elem_type == "float16"
else torch.float32
)
# pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, float, int]`.
values_2d = torch.rand(total_lengths, embedding_dim, dtype=dtype)
if torch.cuda.is_available():
offsets = offsets.cuda()
values_2d = values_2d.cuda()
time, output = benchmark_torch_function(
torch.ops.fbgemm.jagged_2d_to_dense, (values_2d, offsets, max_len), iters=1000
)
offsets_nbytes = offsets.numel() * offsets.element_size()
values_nbytes = values_2d.numel() * values_2d.element_size()
dense_nbytes = output.numel() * output.element_size()
num_bytes = offsets_nbytes + values_nbytes + dense_nbytes
logging.info(f"jagged_2d_to_dense {time} sec {num_bytes / time / 1e9} GB/s")
total_L = values_2d.size(0)
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.dense_to_jagged, (output, [offsets], total_L), iters=1000
)
num_bytes = offsets_nbytes + 2 * values_nbytes
logging.info(f"dense_to_jagged (2d) {time} sec {num_bytes / time / 1e9} GB/s")
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output,
(values_2d, [offsets], output),
iters=1000,
)
num_bytes = offsets_nbytes + 3 * values_nbytes
logging.info(
f"jagged_dense_elementwise_add_jagged_output {time} sec {num_bytes / time / 1e9} GB/s"
)
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.jagged_dense_elementwise_mul,
(values_2d, [offsets], output),
iters=1000,
)
num_bytes = offsets_nbytes + 3 * values_nbytes
logging.info(
f"jagged_dense_elementwise_mul {time} sec {num_bytes / time / 1e9} GB/s"
)
output_sq = output * output
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output,
(values_2d, [offsets], output, output_sq),
iters=1000,
)
num_bytes = offsets_nbytes + 4 * values_nbytes
logging.info(
f"jagged_dense_dense_elementwise_add_jagged_output {time} sec {num_bytes / time / 1e9} GB/s"
)
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
values_1d = torch.rand(total_lengths)
if torch.cuda.is_available():
values_1d = values_1d.cuda()
values_nbytes = values_1d.numel() * values_1d.element_size()
time, output = benchmark_torch_function(
lambda: torch.ops.fbgemm.jagged_1d_to_dense(
values_1d, offsets, max_len, padding_value=0
),
(),
iters=1000,
)
dense_nbytes = output.numel() * output.element_size()
num_bytes = offsets_nbytes + values_nbytes + dense_nbytes
logging.info(f"jagged_1d_to_dense {time} sec {num_bytes / time / 1e9} GB/s")
total_L = values_1d.size(0)
output_1d = torch.unsqueeze(output, -1)
time, jagged_output = benchmark_torch_function(
torch.ops.fbgemm.dense_to_jagged, (output_1d, [offsets], total_L), iters=1000
)
num_bytes = offsets_nbytes + 2 * values_nbytes
logging.info(f"dense_to_jagged (1d) {time} sec {num_bytes / time / 1e9} GB/s")
@cli.command()
@click.option("--batch-size", type=int, default=1)
@click.option("--h-dim", type=int, default=3)
@click.option("--embedding-dim", type=int, default=16)
@click.option("--max-len", type=int, default=10)
@click.option("--elem-type", type=str, default="half")
def batched_dense_vec_jagged_2d_mul(
batch_size: int,
h_dim: int,
embedding_dim: int,
max_len: int,
elem_type: str,
) -> None:
lengths = torch.randint(2 * max_len, size=(batch_size,)) # Allow for truncation
total_lengths = lengths.sum().item()
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
dtype = (
torch.float16
if elem_type == "half" or elem_type == "float16"
else torch.float32
)
# pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, float, int]`.
values_2d = torch.rand(total_lengths, h_dim * embedding_dim, dtype=dtype)
dense = torch.rand(batch_size * h_dim, max_len, dtype=dtype)
if torch.cuda.is_available():
offsets = offsets.cuda()
values_2d = values_2d.cuda()
dense = dense.cuda()
time, output = benchmark_torch_function(
torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul,
(dense, values_2d, offsets),
iters=1000,
)
# Account for the fact that each matmul inner dim was limited to max_len
computed_lengths = torch.minimum(lengths, torch.ones(batch_size) * max_len)
total_computed_lengths = computed_lengths.sum().item()
num_flops = total_computed_lengths * h_dim * embedding_dim * 2.0
logging.info(
f"batched_dense_vec_jagged_2d_mul {time} sec {num_flops / time / 1e9} GFLOP/s"
)
@cli.command()
@click.option("--batch-size", type=int, default=1024)
@click.option("--max-len", type=int, default=10)
@click.option("--dtype", type=str, default="float")
def jagged_1d_to_truncated_values(
batch_size: int,
max_len: int,
dtype: str,
) -> None:
lengths = torch.randint(2 * max_len, size=(batch_size,)) # Allow for truncation
total_lengths = lengths.sum().item()
torch_dtype = torch.float16 if dtype in ["half", "float16"] else torch.float32
# pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, float, int]`.
values = torch.rand(total_lengths, dtype=torch_dtype)
def ref(values: torch.Tensor, lengths: torch.Tensor, max_len: int) -> torch.Tensor:
dense_values = torch.ops.fbgemm.jagged_to_padded_dense(
values,
[torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)],
[max_len],
padding_value=0,
)
truncated_lengths = torch.clamp(lengths, max=max_len)
mask2d = torch.arange(max_len).expand(
batch_size, -1
) < truncated_lengths.unsqueeze(-1)
return dense_values[mask2d].view(-1)
time_ref, output_ref = benchmark_torch_function(
ref,
(values, lengths, max_len),
)
time, output = benchmark_torch_function(
torch.ops.fbgemm.jagged_1d_to_truncated_values,
(values, lengths, max_len),
)
torch.testing.assert_close(output, output_ref)
bytes = (values.numel() + output.numel()) * (
4 if torch_dtype == torch.float else 2
) + lengths.numel() * 4
logging.info(f"reference {time_ref} sec {bytes / time_ref / 1e9} GB/s")
logging.info(f"truncate_jagged_1d {time} sec {bytes / time / 1e9} GB/s")
@cli.command()
@click.option("--batch-size", type=int, default=1024)
@click.option("--max-len", type=int, default=256)
def masked_select_jagged_1d(
batch_size: int,
max_len: int,
) -> None:
lengths = torch.randint(2 * max_len, size=(batch_size,)) # Allow for truncation
total_lengths = int(lengths.sum().item())
dtype = torch.long
values = torch.randint(2**16, (total_lengths,), dtype=dtype)
mask = torch.randint(2, (total_lengths,)) > 0
def ref(
values: torch.Tensor, lengths: torch.Tensor, mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
masked_values_ref = values[mask]
cum_count = torch.cumsum(mask, 0)
cum_count = torch.cat((cum_count, torch.tensor([0])))
cum_length = cum_count[torch.cumsum(lengths, 0) - 1]
cum_length_shift_right = torch.roll(cum_length, 1)
cum_length_shift_right[0] = 0
masked_lengths_ref = cum_length - cum_length_shift_right
return masked_values_ref, masked_lengths_ref
time_ref, (masked_values_ref, masked_lengths_ref) = benchmark_torch_function(
ref,
(values, lengths, mask),
)
time, (masked_values, masked_lengths) = benchmark_torch_function(
torch.ops.fbgemm.masked_select_jagged_1d,
(values, lengths, mask),
)
torch.testing.assert_close(masked_values, masked_values_ref)
torch.testing.assert_close(masked_lengths, masked_lengths_ref)
bytes = (2 * values.numel() + 2 * lengths.numel() + 2 * masked_values.numel()) * 4
logging.info(f"reference {time_ref} sec {bytes / time_ref / 1e9} GB/s")
logging.info(f"masked_select_jagged_1d {time} sec {bytes / time / 1e9} GB/s")
@cli.command()
@click.option("--num-batches", type=int, default=40)
@click.option("--max-seq-length", type=int, default=400)
@click.option("--input-batch-size", type=int, default=1024)
@click.option("--output-batch-size", type=int, default=512)
@click.option("--jagged-tensor-type", type=str, default="float")
@click.option("--has-weights", is_flag=True, default=False)
@click.option("--weight-type", type=str, default="float")
def keyed_jagged_index_select_dim1(
num_batches: int,
max_seq_length: int,
input_batch_size: int,
output_batch_size: int,
jagged_tensor_type: str,
has_weights: bool,
weight_type: str,
) -> None:
jagged_tensor_types = {
"float": torch.float,
"half": torch.half,
"int": torch.int,
"long": torch.long,
}
weight_types = {"float": torch.float, "half": torch.half}
if jagged_tensor_type not in jagged_tensor_types.keys():
raise AssertionError(
f"--jagged-tensor-type ({jagged_tensor_type}) is not supported"
)
if weight_type not in weight_types.keys():
raise AssertionError(f"--weight-type ({weight_type}) is not supported")
jagged_tensor_dtype = jagged_tensor_types[jagged_tensor_type]
is_float = jagged_tensor_dtype in [torch.float, torch.half]
weight_dtype = weight_types[weight_type]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(input_batch_size * num_batches,),
dtype=torch.long,
device="cuda",
)
# Imitate KeyedJaggedTensor offsets
offsets = torch.concat(
[torch.zeros(1, dtype=torch.long, device="cuda"), lengths.cumsum(0)]
)
indices = torch.randint(
low=0,
high=1,
size=(output_batch_size,),
dtype=torch.long,
device="cuda",
)
if is_float:
values = torch.rand(
int(offsets[-1].item()),
dtype=jagged_tensor_dtype,
device="cuda",
)
else:
values = torch.randint(
2**16,
(int(offsets[-1].item()),),
dtype=jagged_tensor_dtype,
device="cuda",
)
weights = (
torch.rand(int(offsets[-1].item()), dtype=weight_dtype, device="cuda")
if has_weights
else None
)
# Only float tensors can require grad
if is_float:
values.requires_grad = True
time, output = benchmark_torch_function(
torch.ops.fbgemm.keyed_jagged_index_select_dim1,
(values, lengths, offsets, indices, input_batch_size, weights),
iters=1000,
)
output = output[0]
# Prepare inputs for the reference run
ref_inputs = []
for k in range(num_batches):
key_lengths = lengths[k * input_batch_size : (k + 1) * input_batch_size]
start_offset = offsets[k * input_batch_size]
end_offset = offsets[(k + 1) * input_batch_size]
key_values = values[start_offset:end_offset].view(-1, 1)
if has_weights:
# pyre-ignore[16]
key_weights = weights[start_offset:end_offset].view(-1, 1)
else:
key_weights = torch.empty(0)
ref_inputs.append((key_values, key_lengths, indices, key_weights))
def keyed_jagged_index_select_dim1_ref(
inputs: List[torch.Tensor],
has_weights: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
outputs = []
output_weights = []
for key_values, key_lengths, indices, _ in inputs:
outputs.append(
torch.ops.fbgemm.jagged_index_select(key_values, key_lengths, indices)[
0
].view(-1)
)
if has_weights:
for _, key_lengths, indices, key_weights in inputs:
output_weights.append(
torch.ops.fbgemm.jagged_index_select(
key_weights, key_lengths, indices
)[0].view(-1)
)
return torch.concat(outputs), torch.concat(
output_weights
) if has_weights else torch.empty(0)
time_ref, output_ref = benchmark_torch_function(
keyed_jagged_index_select_dim1_ref, (ref_inputs, has_weights)
)
output_ref = output_ref[0]
logging.info(
f"keyed_jagged_index_select_dim1 forward time: {time * 1e3} ms, ref {time_ref * 1e3}"
)
if not is_float:
return
grad = torch.rand_like(output)
time, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True), (grad,), iters=1000
)
time_ref, _ = benchmark_torch_function(
functools.partial(output_ref.backward, retain_graph=True), (grad,), iters=1000
)
logging.info(
f"keyed_jagged_index_select_dim1 backward time: {time * 1e3} ms, ref {time_ref * 1e3}"
)
@cli.command()
@click.option("--max-seq-length", type=int, default=400)
@click.option("--input-batch-size", type=int, default=1024)
@click.option("--slice-length", type=int, default=10)
@click.option("--jagged-tensor-type", type=str, default="float")
def jagged_slice_cpu(
max_seq_length: int,
input_batch_size: int,
slice_length: int,
jagged_tensor_type: str,
) -> None:
jagged_tensor_types = {
"float": torch.float,
"half": torch.half,
"int": torch.int,
"long": torch.long,
}
if jagged_tensor_type not in jagged_tensor_types.keys():
raise AssertionError(
f"--jagged-tensor-type ({jagged_tensor_type}) is not supported"
)
jagged_tensor_dtype = jagged_tensor_types[jagged_tensor_type]
is_float = jagged_tensor_dtype in [torch.float, torch.half]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(input_batch_size,),
dtype=torch.long,
)
start_list = [random.randint(0, max(len_ - 1, 0)) for len_ in lengths.tolist()]
start = torch.tensor(start_list)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
if is_float:
values = torch.rand(
int(offsets[-1].item()),
dtype=jagged_tensor_dtype,
)
else:
values = torch.randint(
2**16,
(int(offsets[-1].item()),),
dtype=jagged_tensor_dtype,
)
time, output = benchmark_torch_function(
torch.ops.fbgemm.jagged_slice,
(values, lengths, start, slice_length),
iters=1000,
)
def jagged_slice_ref(
x_values: torch.Tensor,
offsets: torch.Tensor,
start: torch.Tensor,
max_L: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
end_offsets_ = max_L + start + offsets[:-1]
end_offsets = torch.where(end_offsets_ > offsets[1:], offsets[1:], end_offsets_)
start_offsets = start + offsets[:-1]
indices_to_select: List[torch.Tensor] = []
for i in range(end_offsets.size(0)):
indices_to_select.append(
torch.arange(start_offsets[i].item(), end_offsets[i].item())
)
output_ref = torch.index_select(x_values, 0, torch.cat(indices_to_select))
new_lengths = end_offsets - start_offsets
new_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(new_lengths)
return output_ref, new_offsets
time_ref, output = benchmark_torch_function(
jagged_slice_ref, (values, offsets, start, slice_length)
)
logging.info(f"jagged_slice forward time: {time * 1e3} ms, ref {time_ref * 1e3} ms")
profiler = profile(
activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA,
],
schedule=torch.profiler.schedule(
wait=200,
warmup=100,
active=100,
),
record_shapes=True,
profile_memory=True,
with_stack=True,
with_flops=True,
)
profiler.start()
for _ in range(500):
torch.ops.fbgemm.jagged_slice(values, lengths, start, slice_length)
profiler.step()
profiler.stop()
logging.info(
"\n"
+ profiler.key_averages().table(sort_by="self_cuda_time_total", row_limit=10)
)
flops = sum(e.flops for e in profiler.events())
logging.info(f"Total Compute: {flops / 1e9} gflops")
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import signal
from typing import List, Tuple
import click
import fbgemm_gpu
import numpy as np
import tabulate
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
EmbeddingLocation,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from torch import Tensor
from torch.profiler import profile, ProfilerActivity
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings_cpu"
)
def get_gpu_device(gpu_num) -> torch.device:
return torch.device(f"cuda:{gpu_num}")
# Merged indices with shape (T, B, L) -> (flattened indices with shape
# (T * B * L), offsets with shape (T * B + 1)).
# Reference: https://fburl.com/code/5ueyfv5j
def get_table_batched_offsets_from_dense(
merged_indices: torch.Tensor,
gpu_num,
) -> Tuple[torch.Tensor, torch.Tensor]:
(T, B, L) = merged_indices.size()
lengths = np.ones((T, B)) * L
flat_lengths = lengths.flatten()
return (
merged_indices.int().contiguous().view(-1).to(device=get_gpu_device(gpu_num)),
torch.tensor(
([0] + np.cumsum(flat_lengths).tolist()), device=get_gpu_device(gpu_num)
).int(),
)
# Reference: https://fburl.com/code/o5600si0
def generate_requests(
num_gpus: int,
B: int,
T: int,
L: int,
E: int,
# inter-batch indices reuse rate
reuse: float = 0.0,
) -> List[Tuple[torch.IntTensor, torch.IntTensor, None]]:
rs = []
for gpu_num in range(num_gpus):
all_indices = torch.randint(
low=0,
high=E,
size=(T, B, L),
device=get_gpu_device(gpu_num),
dtype=torch.int32,
)
# each bag is usually sorted
(all_indices, _) = torch.sort(all_indices)
all_indices = all_indices.reshape(T, B * L)
rs.append(
get_table_batched_offsets_from_dense(all_indices.view(T, B, L), gpu_num)
)
return rs
def _get_random_tensor(
num_ads: int,
embedding_dimension: int,
ads_tables: int,
data_type: str,
gpu_idx: int,
include_quantization: bool,
):
if data_type == "FP16" or include_quantization:
result_tensor = torch.randn(
num_ads,
embedding_dimension * ads_tables,
dtype=torch.float16,
device=torch.device(f"cuda:{gpu_idx}"),
)
elif data_type == "INT8":
assert (
embedding_dimension % 2
) == 0, "needs to align to 2 bytes (half type size) for INT8"
result_tensor = torch.randint(
0,
255,
# 2 FP16 numbers for scale and bias, total of 4 bytes overhead
size=(num_ads, (embedding_dimension + 4) * ads_tables),
dtype=torch.uint8,
device=torch.device(f"cuda:{gpu_idx}"),
)
elif data_type == "INT4":
assert (
embedding_dimension % 4
) == 0, "needs to align to 2 bytes (half type size) for INT4"
result_tensor = torch.randint(
0,
255,
# Using torch.uint8 for int4 storage
size=(num_ads, (embedding_dimension // 2 + 4) * ads_tables),
dtype=torch.uint8,
device=torch.device(f"cuda:{gpu_idx}"),
)
else:
raise ValueError
return result_tensor
def generate_tbe(
batch_indices,
num_ads: int,
embedding_dimension: int,
num_of_embeddings: int,
pooling_factor: int,
ads_tables: int,
fused_tbe: bool,
data_type: str,
num_gpus: int,
):
B = num_ads
D = embedding_dimension
E = num_of_embeddings
L = pooling_factor
T = ads_tables
Ds = [D] * T
managed_option = EmbeddingLocation.DEVICE
output_dtype = SparseType.FP16
if fused_tbe:
assert data_type == "INT8" # INT4 not implemented yet
output_dtype = SparseType.INT8
emb = [
IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
str(idx),
E,
d,
SparseType.INT4,
managed_option,
)
for d in Ds
],
output_dtype=output_dtype,
device=get_gpu_device(idx),
bounds_check_mode=BoundsCheckMode.NONE,
)
for idx in range(num_gpus)
]
for e in emb:
e.fill_random_weights()
requests = generate_requests(num_gpus, B, T, L, E)
# https://fburl.com/code/doxxjc8c
SIZE_OF_FLOAT = 4
num_elem_per_byte = 1 if data_type == "INT8" else 2
assert embedding_dimension % (2 * num_elem_per_byte) == 0
col_sizes = (
[
(embedding_dimension + num_elem_per_byte - 1) // num_elem_per_byte
+ 2 * SIZE_OF_FLOAT
]
* ads_tables
* num_gpus
)
offset = torch.tensor([0] + col_sizes, device=batch_indices.device)
tbe_offset = torch.cumsum(offset, dim=0).to(torch.int).cuda()
return emb, requests, tbe_offset
def print_p2p_bandwidth(
num_gpus, iters, pooled_ad_embeddings, bytes_per_element
) -> None:
print("Pairwise GPU Copy Bandwidth (GB/s)")
p2p_copy_bw = np.zeros((num_gpus, num_gpus))
for i in range(num_gpus):
for j in range(num_gpus):
with torch.cuda.device(i):
t, _ = benchmark_torch_function(
lambda: pooled_ad_embeddings[i].copy_(pooled_ad_embeddings[j])
if i != j
else pooled_ad_embeddings[i].clone(),
(),
flush_gpu_cache_size_mb=0,
iters=iters,
)
p2p_copy_bw[i, j] = (
pooled_ad_embeddings[i].numel() * bytes_per_element / t / 1.0e9
)
table = tabulate.tabulate(
p2p_copy_bw,
headers=[f"GPU {i}" for i in range(num_gpus)],
tablefmt="fancy_grid",
floatfmt=".0f",
)
print(table)
def benchmark( # noqa C901
all_to_one_only: bool,
sum_reduce_to_one_only: bool,
num_ads: int,
embedding_dimension: int,
ads_tables: int,
iters: int = 10,
p2p_bw: bool = False,
dst_device: int = 0,
data_type: str = "FP16",
mode: str = "P2P",
skip_dequantization: bool = False,
num_of_embeddings: int = 10000,
pooling_factor: int = 25,
) -> str:
assert torch.cuda.is_available()
torch.cuda.set_device(dst_device)
num_gpus = torch.cuda.device_count()
batch_indices = torch.zeros(num_ads).long().cuda()
include_quantization = not mode == "P2P"
# Using torch.int8 for int4 storage
bytes_per_element = 2 if (data_type == "FP16" or include_quantization) else 1
total_elements = num_ads * embedding_dimension * ads_tables * num_gpus
logging.debug(
f"B: {num_ads}, D: {embedding_dimension}, T: {ads_tables}, Data Type: {data_type}, Num GPUs: {num_gpus}, Destination GPU: {dst_device}"
)
fused_tbe = mode == "P2P_FUSED_TBE"
include_tbe = fused_tbe or mode == "P2P_TBE"
if include_tbe:
emb, requests, tbe_offset = generate_tbe(
batch_indices,
num_ads,
embedding_dimension,
num_of_embeddings,
pooling_factor,
ads_tables,
fused_tbe,
data_type,
num_gpus,
)
pooled_ad_embeddings = [
_get_random_tensor(
num_ads,
embedding_dimension,
ads_tables,
data_type,
gpu_idx,
include_quantization,
)
for gpu_idx in range(num_gpus)
]
if p2p_bw:
print_p2p_bandwidth(num_gpus, iters, pooled_ad_embeddings, bytes_per_element)
def pool_func_with_quantization(
batch_indices,
include_quantization,
include_tbe,
fused_tbe,
skip_dequantization,
data_type,
):
if include_tbe:
embedding_results = []
for idx, (indices, offsets) in enumerate(requests):
with torch.cuda.device(idx):
embedding_results.append(emb[idx].forward(indices, offsets))
else:
embedding_results = pooled_ad_embeddings
if data_type == "FP16" or (not fused_tbe and not include_quantization):
if all_to_one_only:
return torch.ops.fbgemm.all_to_one_device(
pooled_ad_embeddings, batch_indices.device
)
elif sum_reduce_to_one_only:
return torch.ops.fbgemm.sum_reduce_to_one(
pooled_ad_embeddings, batch_indices.device
)
else:
return torch.ops.fbgemm.merge_pooled_embeddings(
embedding_results, batch_indices.size(0), batch_indices.device
)
assert data_type == "INT8" or data_type == "INT4"
assert not all_to_one_only # not supported
if fused_tbe:
pooled_quantized_result = torch.ops.fbgemm.merge_pooled_embeddings(
embedding_results, batch_indices.size(0), batch_indices.device
)
else:
quantized = []
for t in embedding_results:
t_split_by_table = torch.split(t, embedding_dimension, dim=1)
quantized_split_by_table = [
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(t.float())
if data_type == "INT8"
else torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
t.float(), 4
)
for t in t_split_by_table
]
result = torch.cat(quantized_split_by_table, dim=1)
quantized.append(result)
pooled_quantized_result = torch.ops.fbgemm.merge_pooled_embeddings(
quantized, batch_indices.size(0), batch_indices.device
)
if skip_dequantization:
return pooled_quantized_result
PooledEmbeddingDequantizeDataTypeFP16 = 1
if data_type == "INT8":
return torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim(
pooled_quantized_result,
tbe_offset,
PooledEmbeddingDequantizeDataTypeFP16,
)
else:
# TODO: the result here is wrong. Once MixedDim version for FusedNBit quantization is done, switch to that.
# Since their performance is similar, keep using Fused8BitRowwiseQuantizedToHalf for now.
return torch.ops.fbgemm.Fused8BitRowwiseQuantizedToHalf(
pooled_quantized_result
).half()
streams = [torch.cuda.Stream(device=i) for i in range(num_gpus)]
import contextlib
with contextlib.ExitStack() as stack:
for stream in streams:
stack.enter_context(torch.cuda.stream(stream))
# warm up
merged = pool_func_with_quantization(
batch_indices,
include_quantization,
include_tbe,
fused_tbe,
skip_dequantization,
data_type,
)
if all_to_one_only:
merged = torch.stack(merged)
t, _ = benchmark_torch_function(
pool_func_with_quantization,
(
batch_indices,
include_quantization,
include_tbe,
fused_tbe,
skip_dequantization,
data_type,
),
flush_gpu_cache_size_mb=0,
iters=iters,
)
with profile(activities=[ProfilerActivity.CUDA]) as prof:
pool_func_with_quantization(
batch_indices,
include_quantization,
include_tbe,
fused_tbe,
skip_dequantization,
data_type,
)
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=10))
if isinstance(merged, Tensor):
# all_to_one_only returns a list of tensors,
# otherwise, it's a Tensor.
merged = [merged]
output_num_el = sum([a.numel() for a in merged])
# Assume tensors gathered are all the same size.
num_el_transferred = output_num_el * (num_gpus - 1) / num_gpus
logging.debug(
f"Mode: {mode}, Data Type: {data_type}, B: {num_ads}, D: {embedding_dimension}, T: {ads_tables}, Num GPUs: {num_gpus}, Destination GPU: {dst_device}, all_to_one_only: {all_to_one_only}, "
f"Number of elements: {total_elements / 1.0e6:.2f}, Million, Number of elements per GPU: {total_elements / 1.0e6 / num_gpus:.2f}, Billion elements per sec: {total_elements / t / 1.0e9:.1f}, "
f"Output Size: {output_num_el * bytes_per_element / 1.0e6:.0f}MB, Num elements transferred: {num_el_transferred / 1.0e6}, All-to-one BW: {output_num_el * bytes_per_element / t / 1.0e9:.1f}GB/s, link BW: {num_el_transferred * bytes_per_element / t / 1.0e9:.1f}GB/s, "
f"t: {t * 1.0e3:.2f}ms"
)
# return result in CSV format
return (
f"{mode}, {data_type}, {num_ads}, {embedding_dimension}, {ads_tables}, {num_gpus}, {dst_device}, {all_to_one_only}, "
f"{total_elements / 1.0e6:.2f}, {total_elements / 1.0e6 / num_gpus:.2f}, {total_elements / 1.0e9 / t:.1f}, "
f"{output_num_el * bytes_per_element / 1.0e6:.0f}, {output_num_el * bytes_per_element / t / 1.0e9:.1f}, "
f"{num_el_transferred * bytes_per_element / 1.0e9 / t:.1f}, "
f"{t * 1.0e3:.2f}"
)
@click.command()
@click.option("--all-to-one-only", is_flag=True, default=False)
@click.option("--sum-reduce-to-one-only", is_flag=True, default=False)
@click.option("--num_ads", default=1024, type=int)
@click.option("--embedding_dimension", default=300, type=int)
@click.option("--ads_tables", default=100, type=int)
@click.option("--iters", default=10, type=int)
@click.option("--p2p_bw", is_flag=True, default=False)
@click.option("--dst_device", default=0, type=int)
@click.option(
"--data_type",
type=click.Choice(["FP16", "INT8", "INT4"]),
default="FP16",
)
# P2P: merge_pooled_embeddings() or all_to_one_device() for tensor with "--data_type"
# P2P_QUANT: for INT8/INT4 data type, start with FP16, then quantize -> P2P -> dequantize to FP16
# P2P_TBE: add TBE in front of P2P_QUANT. When "--data_type" is FP16, the flow is TBE -> P2P; for INT8/INT4, the flow is TBE -> quantize -> P2P -> dequantize
# P2P_FUSED_TBE: similar to P2P_TBE except fuse the quantization into TBE
@click.option(
"--mode",
type=click.Choice(["P2P", "P2P_QUANT", "P2P_TBE", "P2P_FUSED_TBE"]),
default="P2P",
)
# For quantized communication, do we dequantize back to FP16 in the end.
@click.option("--skip_dequantization", is_flag=True, default=False)
@click.option("--num_of_embeddings", default=100000, type=int)
@click.option("--pooling_factor", default=25, type=int)
@click.option("--sweep", is_flag=True, default=False)
def main(
all_to_one_only: bool,
sum_reduce_to_one_only: bool,
num_ads: int,
embedding_dimension: int,
ads_tables: int,
iters: int,
p2p_bw: bool,
dst_device: int,
data_type: str,
mode: str,
skip_dequantization: bool,
num_of_embeddings: int,
pooling_factor: int,
sweep: bool,
) -> None:
csv_header = (
"mode, data_type, num_ads, embedding_dimension, ads_tables, num_gpus, dst_device, all_to_one_only, "
"number of elements (Million), number of elements per GPU (Million), throughput (billion elements per sec), "
"output size (MB), all-to-one BW (GB/s), link BW (GB/s), t (ms)"
)
if sweep:
def handler(signum, frame):
logging.error("timeout")
raise TimeoutError()
results = []
num_gpu = torch.cuda.device_count()
for num_ads in [128, 256, 512, 1024, 2048]:
# Scale num_ads so all GPUs have sweep through the same number of total elements
num_ads *= 8 // num_gpu
for embedding_dimension in [16, 64, 112, 304]:
for ads_tables in [25, 50, 100, 400, 800]:
if num_ads * embedding_dimension * ads_tables > 983040000:
continue # Skip tests that are too large
signal.signal(signal.SIGTERM, handler)
signal.alarm(600)
logging.info(
f"config: num_ads: {num_ads}, embedding_dimension: {embedding_dimension}, ads_tables: {ads_tables}"
)
try:
result = benchmark(
all_to_one_only,
sum_reduce_to_one_only,
num_ads,
embedding_dimension,
ads_tables,
iters,
p2p_bw,
dst_device,
data_type,
mode,
skip_dequantization,
num_of_embeddings,
pooling_factor,
)
results.append(result)
except (TimeoutError, RuntimeError) as err:
logging.error(
f"B: {num_ads}, D: {embedding_dimension}, T: {ads_tables}, Data Type: {data_type}, Num GPU: {num_gpu}, time out or failed: {err}"
)
print(csv_header)
print(*results, sep="\n")
return
result = benchmark(
all_to_one_only,
sum_reduce_to_one_only,
num_ads,
embedding_dimension,
ads_tables,
iters,
p2p_bw,
dst_device,
data_type,
mode,
skip_dequantization,
num_of_embeddings,
pooling_factor,
)
print(csv_header)
print(result)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import subprocess
def run(args):
with open(args.shapes_file, "r") as f:
shapes = json.load(f)
num_embeddings_list = ",".join([str(shape[0]) for shape in shapes])
embedding_dims_list = ",".join([str(shape[1]) for shape in shapes])
cmds = [
args.python,
args.benchmark_path,
args.benchmark_cmd,
"--batch-size",
str(args.batch_size),
"--bag-size-list",
str(args.bag_size),
"--embedding-dim-list",
embedding_dims_list,
"--num-embeddings-list",
num_embeddings_list,
"--weights-precision",
args.weights_precision,
"--output-dtype",
args.output_dtype,
"--warmup-runs",
str(args.warmup_runs),
"--runs-of-iters",
str(args.warmup_runs + args.test_runs),
]
if not args.use_gpu:
cmds.append("--use-cpu")
if args.dry_run:
print("Command to be executed:")
print(" ".join(cmds))
return 0
p = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True
)
output = ""
for line in iter(p.stdout.readline, ""):
print(line, end="")
if args.output:
output += line
p.stdout.close()
p.wait()
if args.output:
with open(args.output, "w") as outf:
outf.write(output)
return p.returncode
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--python",
type=str,
default="python3.10",
help="The python interpreter used to run the benchmark",
)
parser.add_argument(
"--benchmark-path",
type=str,
default="split_table_batched_embeddings_benchmark.py",
help="Path to the benchmark script",
)
parser.add_argument(
"--benchmark-cmd",
type=str,
default="nbit-device-with-spec",
help="The subcommand of the benchmark",
)
parser.add_argument("--batch-size", type=int, default=32, help="Batch size")
parser.add_argument(
"--bag-size", type=int, default=13, help="Bag size or pooling factor"
)
parser.add_argument(
"--shapes-file",
type=str,
required=True,
help="Path to the JSON file that describes a list of shapes [rows, embedding-dims]. "
+ "Its content should look like '[[123, 2], [456, 16], [789, 16], ...]'",
)
parser.add_argument(
"--weights-precision",
type=str,
default="fp16",
help="Weight data type",
)
parser.add_argument(
"--output-dtype", type=str, default="fp16", help="Output data type"
)
parser.add_argument(
"--warmup-runs", type=int, default=5, help="Number of warmup runs"
)
parser.add_argument("--test-runs", type=int, default=5, help="Number of test runs")
parser.add_argument(
"--output", type=str, default="", help="Also log the benchmark output to a file"
)
parser.add_argument("--use-gpu", action="store_true", help="Use GPU instead of CPU")
parser.add_argument(
"--dry-run",
action="store_true",
help="Only print out the command that will execute",
)
args = parser.parse_args()
returncode = run(args)
exit(returncode)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import functools
from math import sqrt
from typing import List, Tuple
import click
import fbgemm_gpu
import fbgemm_gpu.batched_unary_embeddings_ops as batched_unary_embeddings_ops
import numpy as np
import torch
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
def generate_unary_feature(
batch_size: int, num_embeddings: int
) -> Tuple[List, List, List]:
lengths = []
offsets = []
indices = []
offset = 0
for _ in range(batch_size):
n_indices = 1
indices += (
np.round(np.random.random(n_indices) * (num_embeddings - 1))
.astype(int)
.tolist()
)
offsets.append(offset)
offset += 1
lengths.append(n_indices)
offsets.append(offset)
return (lengths, offsets, indices)
class MyModule(torch.nn.Module):
def __init__(self, num_tasks: int, hash_sizes: List[int]) -> None:
super().__init__()
self.num_tasks = num_tasks
self.hash_sizes = hash_sizes
self.emb_modules = torch.nn.ModuleList()
for _ in range(num_tasks):
for h in self.hash_sizes:
emb = torch.nn.EmbeddingBag(
num_embeddings=h,
embedding_dim=1,
mode="sum",
sparse=False,
include_last_offset=True,
)
emb.weight = torch.nn.Parameter(
torch.empty([h, 1]).uniform_(-sqrt(1 / h), sqrt(1 / h))
)
self.emb_modules.append(emb)
def forward(
self, offsets: List[torch.Tensor], indices: List[torch.Tensor]
) -> torch.Tensor:
tt_list = []
for n in range(self.num_tasks):
t_list = []
for i in range(len(self.hash_sizes)):
t = self.emb_modules[n * len(self.hash_sizes) + i](
offsets=offsets[i].long(), input=indices[i].long()
)
t_list.append(t)
tt = torch.cat(t_list, dim=1)
tt_list.append(tt)
return torch.cat(tt_list).view(self.num_tasks, -1, len(self.hash_sizes))
@click.command()
@click.option("--batch-size", default=512)
@click.option("--num-tables", default=2)
@click.option("--num-tasks", default=3)
@click.option("--repeats", default=100)
def main(batch_size, num_tables, num_tasks, repeats) -> None:
device = torch.device("cuda", 0)
torch.cuda.set_device(device)
hash_sizes = list(np.random.choice(range(50, 250), size=(num_tables)))
lengths = []
offsets = []
indices = []
for h in hash_sizes:
l, o, i = generate_unary_feature(batch_size, h)
lengths.append(torch.IntTensor(l).to(device))
offsets.append(torch.IntTensor(o).to(device))
indices.append(torch.IntTensor(i).to(device))
lengths_tensor = torch.cat(lengths)
indices_tensor = torch.cat(indices)
offsets_tensor = torch.zeros(
lengths_tensor.numel() + 1,
dtype=lengths_tensor.dtype,
device=lengths_tensor.device,
)
offsets_tensor[1:] = torch.ops.fbgemm.asynchronous_inclusive_cumsum(
lengths_tensor.view(-1)
)
# forward
ref_emb = MyModule(num_tasks, hash_sizes).to(device)
unary_emb = batched_unary_embeddings_ops.BatchedUnaryEmbeddingBag(
num_tasks, hash_sizes
).to(device)
for i, param in enumerate(unary_emb.split_embedding_weights()):
param.detach().copy_(ref_emb.emb_modules[i].weight)
output_ref = ref_emb(offsets, indices)
output = unary_emb(offsets_tensor, indices_tensor)
torch.testing.assert_close(output_ref, output)
# backward
d_output = torch.randn([num_tasks, batch_size, len(hash_sizes)]).to(device) * 0.1
output_ref.backward(d_output)
output.backward(d_output)
d_weight_ref = []
for emb in ref_emb.emb_modules:
d_weight_ref.append(emb.weight.grad)
d_weight_ref = torch.cat(d_weight_ref).view(num_tasks, -1)
d_weight = unary_emb.weight.grad
# pyre-fixme[16]: Optional type has no attribute `squeeze`.
torch.testing.assert_close(d_weight_ref, d_weight.squeeze())
# A100 40MB L2 cache
elapse, _ = benchmark_torch_function(ref_emb, (offsets, indices), iters=repeats)
print("PyTorch EmbeddingBag forward", elapse)
elapse, _ = benchmark_torch_function(
unary_emb,
(offsets_tensor, indices_tensor),
iters=repeats,
)
print("Batched Unary Emb forward", elapse)
output = ref_emb(offsets, indices)
output.backward(d_output, retain_graph=True)
elapse, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True),
(d_output,),
iters=repeats,
)
print("PyTorch EmbeddingBag backward", elapse)
output = unary_emb(offsets_tensor, indices_tensor)
elapse, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True),
(d_output,),
iters=repeats,
)
print("Batched Unary Emb backward", elapse)
if __name__ == "__main__":
main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
import statistics
import threading
import time
from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple
import torch
from fbgemm_gpu.split_embedding_utils import ( # noqa: F401
b_indices,
generate_requests, # noqa: F401
get_device, # noqa: F401
round_up, # noqa: F401
)
logging.basicConfig(level=logging.DEBUG)
def benchmark_torch_function( # noqa: C901
# pyre-fixme[2]: Parameter must be annotated.
f,
# pyre-fixme[2]: Parameter must be annotated.
args,
flush_gpu_cache_size_mb: int = 40,
iters: int = 10,
num_warmups: int = 2,
device: str = "cuda",
name: str = "",
num_threads: int = 1,
copy_f_for_multi_thread_test: bool = False,
) -> Tuple[float, torch.Tensor]:
logging.info(f"Start to benchmark {name}...")
if device != "" and device != "cuda":
torch.cuda.set_device(device)
for _ in range(num_warmups):
output = f(*args)
assert num_threads > 0
if torch.cuda.is_available() and (num_threads == 1):
cache = torch.empty(
int(flush_gpu_cache_size_mb * 1024 * 1024 // 4),
dtype=torch.float,
device=device,
)
start_event = [torch.cuda.Event(enable_timing=True) for i in range(iters)]
end_event = [torch.cuda.Event(enable_timing=True) for i in range(iters)]
torch.cuda.synchronize(device)
for i in range(iters):
# flush the cache
if flush_gpu_cache_size_mb:
cache.zero_()
start_event[i].record()
with torch.cuda.nvtx.range(f"RunCudaModule_{name}"):
output = f(*args)
end_event[i].record()
torch.cuda.synchronize(device)
times = torch.tensor(
[s.elapsed_time(e) for s, e in zip(start_event, end_event)]
)
elapsed_time = torch.mean(times).item() * 1.0e-3
elif torch.cuda.is_available() and (num_threads > 1):
cache = torch.empty(
int(flush_gpu_cache_size_mb * 1024 * 1024 // 4),
dtype=torch.float,
device=device,
)
duration_ms_list: List[float] = []
f_list = [f]
# make deepcopy of f if necessary
for _ in range(num_threads - 1):
f_list.append(copy.deepcopy(f) if copy_f_for_multi_thread_test else f)
@torch.inference_mode()
# pyre-ignore[53]
def forward(idx: int) -> None:
stream = torch.cuda.Stream()
f_temp = f_list[idx]
start_event = [
torch.cuda.Event(enable_timing=True)
for i in range(iters // num_threads)
]
end_event = [
torch.cuda.Event(enable_timing=True)
for i in range(iters // num_threads)
]
torch.cuda.synchronize(device)
with torch.cuda.stream(stream):
for i in range(iters // num_threads):
# flush the cache
if flush_gpu_cache_size_mb:
cache.zero_()
start_event[i].record()
with torch.cuda.nvtx.range(f"RunCudaModule_{name}"):
_ = f_temp(*args)
end_event[i].record()
torch.cuda.synchronize(device)
times = torch.tensor(
[s.elapsed_time(e) for s, e in zip(start_event, end_event)]
)
duration_ms = torch.sum(times).item()
duration_ms_list.append(duration_ms)
threads = [
threading.Thread(target=forward, args=(idx,)) for idx in range(num_threads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
elapsed_time = sum(duration_ms_list) * 1.0e-3 / num_threads / iters
torch.cuda.synchronize(device)
if copy_f_for_multi_thread_test:
# clean the copies of f and clean the HBM cache
for idx in reversed(range(num_threads - 1)):
del f_list[idx + 1]
torch.cuda.empty_cache()
else:
start_time = time.time()
for _ in range(iters):
with torch.cuda.nvtx.range(f"RunCPUModule_{name}"):
output = f(*args)
elapsed_time = (time.time() - start_time) / iters
# pyre-fixme[61]: `output` is undefined, or not always defined.
return float(elapsed_time), output
def benchmark_requests(
requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],
func: Callable[[torch.Tensor, torch.Tensor, Optional[torch.Tensor]], torch.Tensor],
flush_gpu_cache_size_mb: int = 0,
check_median: bool = False,
num_warmups: int = 0,
bwd_only: bool = False,
grad: Optional[torch.Tensor] = None,
# Used to label benchmark iterations differently in nsys profile result
# so that we can compare performance of two different models for example.
# If empty string is provided, it won't have any effect.
nvtx_range: str = "",
# Can be used to clear model's stats after warmup for example.
callback_after_warmup: Optional[Callable[[], None]] = None,
) -> float:
times = []
if num_warmups > 0:
indices, offsets, weights = requests[0]
for _ in range(num_warmups):
out = func(indices, offsets, weights)
if bwd_only:
out.backward(grad)
if callback_after_warmup is not None:
callback_after_warmup()
if torch.cuda.is_available():
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for it, (indices, offsets, weights) in enumerate(requests):
if bwd_only:
# Run forward before profiling if does backward only
out = func(indices, offsets, weights)
start_time = time.time()
if torch.cuda.is_available():
if flush_gpu_cache_size_mb:
_ = torch.rand(
flush_gpu_cache_size_mb * 1024 * 1024 // 4,
dtype=torch.float,
device="cuda",
)
torch.cuda.synchronize()
start_event.record()
if nvtx_range:
torch.cuda.nvtx.range_push(f"{nvtx_range}-{it}")
if bwd_only:
out.backward(grad)
else:
func(indices, offsets, weights)
if nvtx_range:
torch.cuda.nvtx.range_pop()
if torch.cuda.is_available():
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
times.append(it_time)
else:
it_time = time.time() - start_time
times.append(it_time)
avg_time = sum(times) / len(requests)
median_time = statistics.median(times)
return median_time if check_median else avg_time
def benchmark_requests_refer(
requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],
T: int,
B: int,
L: int,
E: int,
D: int,
pooling_mode: str,
weighted: bool,
flush_gpu_cache_size_mb: int = 0,
check_median: bool = False,
) -> float:
do_pooling = pooling_mode in ["sum", "mean"]
if do_pooling:
nn_embedding_list = [
torch.nn.EmbeddingBag(E, D, mode=pooling_mode, sparse=True).cuda()
] * T
else:
nn_embedding_list = [torch.nn.Embedding(E, D, sparse=True).cuda()] * T
times = []
if torch.cuda.is_available():
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for indices, _, weights in requests:
indices_list = indices.view(T, B, L).split(1)
if weighted:
assert weights is not None
weights_list = weights.view(T, B, L).split(1)
start_time = time.time()
if torch.cuda.is_available():
if flush_gpu_cache_size_mb:
_ = torch.rand(
flush_gpu_cache_size_mb * 1024 * 1024 // 4,
dtype=torch.float,
device="cuda",
)
torch.cuda.synchronize()
start_event.record()
nn_embedding_output = (
[
b_indices(nn_embedding, x, use_cpu=False, do_pooling=do_pooling)
for (nn_embedding, x) in zip(nn_embedding_list, indices_list)
]
if not weighted
else [
b_indices(
nn_embedding,
x,
per_sample_weights=xw.view(-1),
use_cpu=False,
do_pooling=do_pooling,
)
for (nn_embedding, x, xw) in zip(
nn_embedding_list,
indices_list,
# pyre-fixme[61]: `weights_list` is undefined, or not always
# defined.
weights_list,
)
]
)
if do_pooling:
final_output = torch.cat(
[f.view(B, -1) for f in nn_embedding_output], dim=1
)
else:
final_output = torch.cat(nn_embedding_output, dim=0).view( # noqa: F841
-1, D
)
if torch.cuda.is_available():
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
times.append(it_time)
else:
it_time = time.time() - start_time
times.append(it_time)
avg_time = sum(times) / len(requests)
median_time = statistics.median(times)
return median_time if check_median else avg_time
def benchmark_pipelined_requests(
requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],
func1: Callable[[torch.Tensor, torch.Tensor, Optional[torch.Tensor]], None],
func2: Callable[[torch.Tensor, torch.Tensor, Optional[torch.Tensor]], None],
flush_gpu_cache_size_mb: int = 0,
check_median: bool = False,
) -> Tuple[float, float]:
torch.cuda.synchronize()
start_events = [
(torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))
for _ in requests
]
end_events = [
(torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True))
for _ in requests
]
for (indices, offsets, indices_weights), start_event, end_event in zip(
requests, start_events, end_events
):
if flush_gpu_cache_size_mb:
_ = torch.rand(
flush_gpu_cache_size_mb * 1024 * 1024 // 4,
dtype=torch.float,
device="cuda",
)
torch.cuda.synchronize()
start_event[0].record()
func1(indices, offsets, indices_weights)
end_event[0].record()
start_event[1].record()
func2(indices, offsets, indices_weights)
end_event[1].record()
torch.cuda.synchronize()
avg_time = (
sum(
start_event[0].elapsed_time(end_event[0]) * 1.0e-3
for start_event, end_event in zip(start_events, end_events)
)
/ len(requests),
sum(
start_event[1].elapsed_time(end_event[1]) * 1.0e-3
for start_event, end_event in zip(start_events, end_events)
)
/ len(requests),
)
median_time = (
statistics.median(
start_event[0].elapsed_time(end_event[0]) * 1.0e-3
for start_event, end_event in zip(start_events, end_events)
),
statistics.median(
start_event[1].elapsed_time(end_event[1]) * 1.0e-3
for start_event, end_event in zip(start_events, end_events)
),
)
return median_time if check_median else avg_time
@dataclass
class VBEBenchmarkOutput:
avg: float
fwd: float
bwd: float
compressed_avg: float
compressed_fwd: float
reindex: float
compressed_bwd: float
def benchmark_vbe(
baseline_requests: List[Tuple[torch.Tensor, torch.Tensor]],
compressed_requests: List[Tuple[torch.Tensor, torch.Tensor]],
baseline_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
compressed_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
reindex: torch.Tensor,
embedding_dim: int,
) -> VBEBenchmarkOutput:
times = []
fwd_times = []
bwd_times = []
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for indices, offsets in baseline_requests:
time = 0.0
start_event.record()
# forward
out = baseline_func(indices, offsets)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
fwd_times.append(it_time)
time += it_time
grad = torch.rand_like(out)
start_event.record()
# backward
out.backward(grad)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
bwd_times.append(it_time)
time += it_time
times.append(time)
avg = statistics.median(times)
fwd = statistics.median(fwd_times)
bwd = statistics.median(bwd_times)
times.clear()
fwd_times.clear()
bwd_times.clear()
reindex_times = []
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for indices, offsets in compressed_requests:
time = 0.0
start_event.record()
# forward
out = compressed_func(indices, offsets)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
fwd_times.append(it_time)
time += it_time
start_event.record()
# reindex
out = out.reshape(-1, embedding_dim)
out = torch.ops.fbgemm.index_select_dim0(out, reindex)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
reindex_times.append(it_time)
time += it_time
grad = torch.rand_like(out)
start_event.record()
# backward
out.backward(grad)
end_event.record()
torch.cuda.synchronize()
it_time = start_event.elapsed_time(end_event) * 1.0e-3
bwd_times.append(it_time)
time += it_time
times.append(time)
compressed_avg = statistics.median(times)
compressed_fwd = statistics.median(fwd_times)
reindex = statistics.median(reindex_times)
compressed_bwd = statistics.median(bwd_times)
return VBEBenchmarkOutput(
avg, fwd, bwd, compressed_avg, compressed_fwd, reindex, compressed_bwd
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import math
import os
import random
import statistics
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import click
import fbgemm_gpu
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType, SparseType
from fbgemm_gpu.split_embedding_utils import generate_requests, get_device, round_up
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
EmbeddingLocation,
PoolingMode,
RecordCacheMetrics,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
DenseTableBatchedEmbeddingBagsCodegen,
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import Tensor
haveAIBench = False
try:
from aibench_observer.utils.observer import emitMetric
haveAIBench = True
except Exception:
haveAIBench = False
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import (
benchmark_pipelined_requests,
benchmark_requests,
benchmark_requests_refer,
benchmark_torch_function,
benchmark_vbe,
)
else:
from fbgemm_gpu.bench.bench_utils import (
benchmark_pipelined_requests,
benchmark_requests,
benchmark_requests_refer,
benchmark_torch_function,
benchmark_vbe,
)
logging.basicConfig(level=logging.DEBUG)
@click.group()
def cli() -> None:
pass
@cli.command()
# recommended value: alpha=1.15 for training and alpha=1.09 for inference
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.FP32)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--warmup-runs", default=0)
@click.option("--managed", default="device")
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.0)
@click.option("--row-wise/--no-row-wise", default=True)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--pooling", type=str, default="sum")
@click.option("--weighted-num-requires-grad", type=int, default=None)
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.NONE.value)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--dense", is_flag=True, default=False)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP32)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def device( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
stoc: bool,
iters: int,
warmup_runs: int,
managed: str,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
row_wise: bool,
weighted: bool,
pooling: str,
weighted_num_requires_grad: Optional[int],
bounds_check_mode: int,
flush_gpu_cache_size_mb: int,
dense: bool,
output_dtype: SparseType,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
if weighted_num_requires_grad:
assert weighted_num_requires_grad <= T
weighted_requires_grad_tables = np.random.choice(
T, replace=False, size=(weighted_num_requires_grad,)
).tolist()
feature_requires_grad = (
torch.tensor(
[1 if t in weighted_requires_grad_tables else 0 for t in range(T)]
)
.to(get_device())
.int()
)
else:
feature_requires_grad = None
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD if row_wise else OptimType.EXACT_ADAGRAD
if managed == "device":
managed_option = (
EmbeddingLocation.DEVICE
if torch.cuda.is_available()
else EmbeddingLocation.HOST
)
else:
managed_option = EmbeddingLocation.MANAGED
if pooling is None or pooling == "sum":
pooling = "sum"
pooling_mode = PoolingMode.SUM
do_pooling = True
elif pooling == "mean":
pooling_mode = PoolingMode.MEAN
do_pooling = True
else: # "none"
pooling_mode = PoolingMode.NONE
do_pooling = False
if dense:
emb = DenseTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
)
for d in Ds
],
pooling_mode=pooling_mode,
use_cpu=not torch.cuda.is_available(),
)
else:
emb = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
managed_option,
ComputeDevice.CUDA
if torch.cuda.is_available()
else ComputeDevice.CPU,
)
for d in Ds
],
optimizer=optimizer,
learning_rate=0.1,
eps=0.1,
weights_precision=weights_precision,
stochastic_rounding=stoc,
output_dtype=output_dtype,
pooling_mode=pooling_mode,
bounds_check_mode=BoundsCheckMode(bounds_check_mode),
)
emb = emb.to(get_device())
if weights_precision == SparseType.INT8:
emb.init_embedding_weights_uniform(-0.0003, 0.0003)
nparams = sum(w.numel() for w in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
if do_pooling:
read_write_bytes = (
output_size_multiplier * B * sum(Ds)
+ param_size_multiplier * B * sum(Ds) * L
)
else:
read_write_bytes = (
output_size_multiplier * B * sum(Ds) * L
+ param_size_multiplier * B * sum(Ds) * L
)
logging.info(
f"Embedding parameters: {nparams / 1.0e9: .2f} GParam, "
f"{nparams * param_size_multiplier / 1.0e9: .2f} GB"
)
logging.info(
f"Accessed weights per batch: {B * sum(Ds) * L * param_size_multiplier / 1.0e9: .2f} GB"
)
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
# forward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.long(),
offsets.long(),
per_sample_weights,
feature_requires_grad=feature_requires_grad,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
num_warmups=warmup_runs,
)
logging.info(
f"Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
if output_dtype == SparseType.INT8:
# backward bench not representative
return
if do_pooling:
grad_output = torch.randn(B, sum(Ds)).to(get_device())
else:
grad_output = torch.randn(B * T * L, D).to(get_device())
# backward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb(
indices.long(),
offsets.long(),
per_sample_weights,
feature_requires_grad=feature_requires_grad,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
bwd_only=True,
grad=grad_output,
)
logging.info(
f"Backward, B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, "
f"BW: {2 * read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, "
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.FP32)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--uvm-tables", default=1)
@click.option("--uvm-bag-size", default=1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP32)
@click.option("--use-cache", is_flag=True, default=False)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--enforce-hbm", is_flag=True, default=False)
def uvm(
alpha: bool,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
stoc: bool,
iters: int,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
uvm_tables: int,
uvm_bag_size: int,
weighted: bool,
flush_gpu_cache_size_mb: int,
requests_data_file: Optional[str],
tables: Optional[str],
output_dtype: SparseType,
use_cache: bool,
cache_algorithm: str,
cache_load_factor: float,
enforce_hbm: bool,
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
T_uvm = uvm_tables
assert T_uvm <= T
assert (
T_uvm > 0
), f"T_uvm specified {T_uvm} <= 0. If not testing UVM, please use device benchmark."
T_gpu = T - T_uvm
L_uvm = uvm_bag_size
cache_alg = CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
managed_type = (
EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED
)
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_uvm = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
managed_type,
ComputeDevice.CUDA,
)
for d in Ds[:T_uvm]
],
weights_precision=weights_precision,
stochastic_rounding=stoc,
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
).cuda()
if weights_precision == SparseType.INT8:
emb_uvm.init_embedding_weights_uniform(-0.0003, 0.0003)
if T_gpu > 0:
emb_gpu = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
EmbeddingLocation.DEVICE,
ComputeDevice.CUDA,
)
for d in Ds[T_uvm:]
],
weights_precision=weights_precision,
stochastic_rounding=stoc,
).cuda()
if weights_precision == SparseType.INT8:
emb_gpu.init_embedding_weights_uniform(-0.0003, 0.0003)
emb_mixed = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
managed_option,
ComputeDevice.CUDA,
)
for (d, managed_option) in zip(
Ds,
[managed_type] * T_uvm + [EmbeddingLocation.DEVICE] * T_gpu,
)
],
weights_precision=weights_precision,
stochastic_rounding=stoc,
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
).cuda()
if weights_precision == SparseType.INT8:
emb_mixed.init_embedding_weights_uniform(-0.0003, 0.0003)
requests_uvm = generate_requests(
iters,
B,
T_uvm,
L_uvm,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
requests_gpu = None
if T_gpu > 0:
requests_gpu = generate_requests(
iters,
B,
T_gpu,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=False,
requests_data_file=requests_data_file,
tables=tables,
)
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes_uvm = (
output_size_multiplier * B * sum(Ds[:T_uvm])
+ param_size_multiplier * B * sum(Ds[:T_uvm]) * L_uvm
)
time_per_iter = benchmark_requests(
requests_uvm,
lambda indices, offsets, per_sample_weights: emb_uvm.forward(
indices.long(),
offsets.long(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"UVM Forward, B: {B}, "
f"E: {E}, T: {T_uvm}, D: {D}, L: {L_uvm}, W: {weighted}, "
f"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
if T_gpu > 0:
requests = []
assert requests_gpu is not None
for rs_uvm, rs_gpu in zip(requests_uvm, requests_gpu):
indices = torch.cat([rs_uvm[0], rs_gpu[0]])
lengths = [L_uvm] * (T_uvm * B) + [L] * (T_gpu * B)
offsets = torch.tensor(([0] + np.cumsum(lengths).tolist())).int().cuda()
per_sample_weights = None
if weighted:
this_rs_uvm_weights = rs_uvm[2]
assert this_rs_uvm_weights is not None
this_rs_gpu_weights = rs_gpu[2]
assert this_rs_gpu_weights is not None
per_sample_weights = torch.cat(
[this_rs_uvm_weights, this_rs_gpu_weights]
)
requests.append((indices, offsets, per_sample_weights))
# forward
time_per_iter = benchmark_requests(
requests_gpu,
lambda indices, offsets, per_sample_weights: emb_gpu.forward(
indices.long(),
offsets.long(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
read_write_bytes_hbm = (
output_size_multiplier * B * sum(Ds[T_uvm:])
+ param_size_multiplier * B * sum(Ds[T_uvm:]) * L
)
logging.info(
f"GPU Forward, B: {B}, "
f"E: {E}, T: {T_gpu}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes_hbm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb_mixed.forward(
indices.long(),
offsets.long(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
read_write_bytes_total = read_write_bytes_uvm + read_write_bytes_hbm
logging.info(
f"Mixed Forward, B: {B}, "
f"E: {E}, T_GPU: {T_gpu}, T_UVM: {T_uvm}, D: {D}, L_GPU: {L}, L_UVM: {L_uvm}, W: {weighted}, "
f"BW: {read_write_bytes_total / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.FP32)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--long-index", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def cache( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
cache_algorithm: str,
cache_load_factor: float,
embedding_dim: int,
weights_precision: SparseType,
stoc: bool,
iters: int,
long_index: bool,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
weighted: bool,
flush_gpu_cache_size_mb: int,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
cache_alg = CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_nc = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
EmbeddingLocation.MANAGED,
ComputeDevice.CUDA,
)
for d in Ds
],
optimizer=optimizer,
weights_precision=weights_precision,
stochastic_rounding=stoc,
).cuda()
if weights_precision == SparseType.INT8:
emb_nc.init_embedding_weights_uniform(-0.0003, 0.0003)
emb = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
EmbeddingLocation.MANAGED_CACHING,
ComputeDevice.CUDA,
)
for d in Ds
],
optimizer=optimizer,
weights_precision=weights_precision,
stochastic_rounding=stoc,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
).cuda()
if weights_precision == SparseType.INT8:
emb.init_embedding_weights_uniform(-0.0003, 0.0003)
nparams = sum(w.numel() for w in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
logging.info(
f"Embedding tables: {E * T} rows, {nparams / 1.0e9: .2f} GParam, "
f"{nparams * param_size_multiplier / 1.0e9: .2f} GB"
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)
requests = generate_requests(
2 * iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
warmup_requests, requests = requests[:iters], requests[iters:]
grad_output = torch.randn(B, sum(Ds)).cuda()
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb_nc(
indices.long(), offsets.long(), per_sample_weights
).backward(grad_output),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"ForwardBackward (UVM), B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, "
f"BW: {3 * param_size_multiplier * B * sum(Ds) * L / time_per_iter / 1.0e9: .2f} GB/s, "
f"T: {time_per_iter * 1.0e6:.0f}us"
)
# warm up
for indices, offsets, _ in warmup_requests:
emb.forward(indices.long(), offsets.long())
# get cache miss rate (forward and backward) and exchanged cache lines (prefetch)
cache_misses = []
exchanged_cache_lines = []
NOT_FOUND = -1
for indices, offsets, _ in requests:
old_lxu_cache_state = emb.lxu_cache_state.clone()
emb.prefetch(indices.long(), offsets.long())
exchanged_cache_lines.append(
(emb.lxu_cache_state != old_lxu_cache_state).sum().item()
)
cache_misses.append((emb.lxu_cache_locations_list[0] == NOT_FOUND).sum().item())
emb.forward(indices.long(), offsets.long())
logging.info(
f"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, "
f"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}"
)
logging.info(
f"Cache miss -- mean: {sum(cache_misses)/len(requests)}, "
f"max: {max(cache_misses)}, min: {min(cache_misses)}"
)
# benchmark prefetch
emb.reset_cache_states()
for indices, offsets, _ in warmup_requests:
emb.forward(indices, offsets)
prefetch_time, forward_backward_time = benchmark_pipelined_requests(
requests,
lambda indices, offsets, indices_weights: emb.prefetch(indices, offsets),
lambda indices, offsets, indices_weights: emb.forward(
indices, offsets, indices_weights
).backward(grad_output),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
e2e_time = prefetch_time + forward_backward_time
logging.info(
f"ForwardBackward (LXU), reuse: {reuse}, alpha: {alpha}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, "
f"BW: {3 * param_size_multiplier * B * sum(Ds) * L / e2e_time / 1.0e9: .2f} GB/s, "
f"Tprefetch: {prefetch_time * 1.0e6:.0f}us, "
f"{2 * sum(exchanged_cache_lines) * param_size_multiplier * D / prefetch_time / len(requests) / 1.0e9: .2f} GB/s, "
f"Tfwdbwd: {forward_backward_time * 1.0e6:.0f}us, "
f"{3 * param_size_multiplier * B * sum(Ds) * L / forward_backward_time / 1.0e9: .2f} GB/s, "
f"Te2e: {e2e_time * 1.0e6:.0f}us, "
)
def benchmark_cpu_requests(
requests: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]],
func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],
) -> float:
import time
start_time = time.perf_counter()
for indices, offsets, weights in requests:
func(indices, offsets, weights)
end_time = time.perf_counter()
return (end_time - start_time) / len(requests)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--managed", default="device")
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.0)
@click.option("--row-wise/--no-row-wise", default=True)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--index-remapping", is_flag=True, default=False)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def nbit_cpu( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
stoc: bool,
iters: int,
managed: str,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
row_wise: bool,
weighted: bool,
index_remapping: bool,
requests_data_file: Optional[str],
tables: Optional[str],
output_dtype: SparseType,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
if mixed:
Ds = [
# int4 table batched emb op can only handle mixed D where D is multiple of 8
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 8)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, d, weights_precision, EmbeddingLocation.HOST) for d in Ds],
device="cpu",
index_remapping=[torch.arange(E) for _ in Ds] if index_remapping else None,
output_dtype=output_dtype,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cpu()
emb.fill_random_weights()
nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes = (
output_size_multiplier * B * T * D + param_size_multiplier * B * T * L * D
)
logging.info(
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
use_cpu=True,
)
requests = [
(a.cpu().int(), b.cpu().int(), c.cpu() if c else None) for (a, b, c) in requests
]
time_per_iter = benchmark_cpu_requests(
# pyre-fixme[6]: For 1st param expected `List[Tuple[IntTensor, IntTensor,
# Optional[Tensor]]]` but got `List[Tuple[Tensor, Tensor, Optional[Tensor]]]`.
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices,
offsets,
per_sample_weights,
),
)
logging.info(
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--managed", default="device")
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.0)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--pooling", type=str, default="sum")
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.NONE.value)
@click.option("--pruning-ratio", type=float, default=None)
@click.option("--pruning-hash-load-factor", default=0.75)
@click.option("--use-array-for-index-remapping", is_flag=True, default=True)
@click.option("--check-median", is_flag=True, default=True)
@click.option("--iters", default=100)
@click.option("--runs-of-iters", default=5)
@click.option("--warmup-runs", default=2)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--report-aibench", is_flag=True)
@click.option("--run-reference", is_flag=True, default=False)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def nbit_device( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
managed: str,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
weighted: bool,
pooling: str,
bounds_check_mode: int,
pruning_ratio: Optional[float],
pruning_hash_load_factor: float,
use_array_for_index_remapping: bool,
check_median: bool,
iters: int,
runs_of_iters: int,
warmup_runs: int,
output_dtype: SparseType,
report_aibench: bool,
run_reference: bool,
requests_data_file: Optional[str],
tables: Optional[str],
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
original_E = E
T = num_tables
index_remapping = None
if mixed:
# int4 table batched emb op can only handle mixed D where D is multiple of 8
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 8)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
mem_for_pruning = 0
if pruning_ratio:
assert pruning_ratio < 1 and pruning_ratio >= 0
E = math.ceil(E * (1.0 - pruning_ratio))
index_remapping = []
for _ in range(T):
mapping = torch.tensor([-1] * original_E, dtype=torch.int32)
selected_indices = random.sample(range(original_E), E)
for i, idx in enumerate(selected_indices):
mapping[idx] = i
index_remapping.append(mapping)
if use_array_for_index_remapping:
mem_for_pruning += mapping.numel() * 4
else:
mem_for_pruning += E / pruning_hash_load_factor * 2 * 4
if managed == "device":
managed_option = EmbeddingLocation.DEVICE
else:
managed_option = EmbeddingLocation.MANAGED
if pooling is None or pooling == "sum":
pooling = "sum"
pooling_mode = PoolingMode.SUM
do_pooling = True
elif pooling == "mean":
pooling_mode = PoolingMode.MEAN
do_pooling = True
else: # "none"
pooling_mode = PoolingMode.NONE
do_pooling = False
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, d, weights_precision, managed_option) for d in Ds],
bounds_check_mode=BoundsCheckMode(bounds_check_mode),
index_remapping=index_remapping,
pruning_hash_load_factor=pruning_hash_load_factor,
use_array_for_index_remapping=use_array_for_index_remapping,
output_dtype=output_dtype,
pooling_mode=pooling_mode,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cuda()
emb.fill_random_weights()
nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
if do_pooling:
read_write_bytes = (
output_size_multiplier * B * T * D + param_size_multiplier * B * T * L * D
)
else:
read_write_bytes = (
output_size_multiplier * B * T * L * D
+ param_size_multiplier * B * T * L * D
)
logging.info(
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)
times = []
for i in range(runs_of_iters):
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
# forward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
check_median=check_median,
)
# free up GPU memory
del requests
logging.info(
f"Iteration {i}: "
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us, "
f"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB"
)
if i >= warmup_runs:
times.append(time_per_iter)
time_per_iter = statistics.mean(times)
bandwidth = read_write_bytes / time_per_iter / 1.0e9
logging.info(
f"Average of all iterations: "
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {bandwidth: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us, "
f"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB"
)
if report_aibench and haveAIBench:
print(
emitMetric(
type="NET",
metric=f"bandwidth_{weights_precision}",
unit="scalar",
value=str(bandwidth),
)
)
print(
emitMetric(
type="NET",
metric=f"time_per_iter_{weights_precision}",
unit="scalar",
value=str(time_per_iter * 1.0e6),
)
)
if run_reference:
times = []
for i in range(runs_of_iters):
requests = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
requests_data_file=requests_data_file,
tables=tables,
)
requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
# forward
time_per_iter_refer = benchmark_requests_refer(
requests,
T,
B,
L,
E,
D,
pooling,
weighted,
check_median=check_median,
)
# free up GPU memory
del requests
logging.info(
f"Reference (nn.Embedding(Bag)) Iteration {i}: "
f"Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter_refer / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter_refer * 1.0e6:.0f}us "
)
if i >= warmup_runs:
times.append(time_per_iter_refer)
time_per_iter_refer = statistics.mean(times)
bandwidth = read_write_bytes / time_per_iter_refer / 1.0e9
logging.info(
f"Average of all iterations: "
f"Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"Effective BW: {bandwidth: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter_refer * 1.0e6:.0f}us "
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size-list", type=str, default="20")
@click.option("--batch-size", default=512)
@click.option("--embedding-dim-list", type=str, default="128")
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--managed", default="device")
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings-list", type=str, default="100000")
@click.option("--reuse", default=0.0)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--pooling", type=str, default="sum")
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.NONE.value)
@click.option("--pruning-ratio", type=float, default=None)
@click.option("--pruning-hash-load-factor", default=0.75)
@click.option("--use-array-for-index-remapping", is_flag=True, default=True)
@click.option("--check-median", is_flag=True, default=True)
@click.option("--iters", default=100)
@click.option("--runs-of-iters", default=5)
@click.option("--warmup-runs", default=2)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--report-aibench", is_flag=True)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
@click.option("--use-cpu", is_flag=True, default=False)
def nbit_device_with_spec( # noqa C901
alpha: float,
bag_size_list: str,
batch_size: int,
embedding_dim_list: str,
weights_precision: SparseType,
managed: str,
mixed: bool,
num_embeddings_list: str,
reuse: float,
weighted: bool,
pooling: str,
bounds_check_mode: int,
pruning_ratio: Optional[float],
pruning_hash_load_factor: float,
use_array_for_index_remapping: bool,
check_median: bool,
iters: int,
runs_of_iters: int,
warmup_runs: int,
output_dtype: SparseType,
report_aibench: bool,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
use_cpu: bool,
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
Ds = [int(D) for D in embedding_dim_list.split(",")]
Ls = [int(L) for L in bag_size_list.split(",")]
Es = [int(E) for E in num_embeddings_list.split(",")]
E = np.mean(Es)
D = np.mean(Ds)
L = np.mean(Ls)
T = len(Ds)
logging.info("TBE Spec:")
logging.info("#, E, D, L")
for i, (e, d, bag_size) in enumerate(zip(Es, Ds, Ls)):
logging.info(f"{i}, {e}, {d}, {bag_size}")
logging.info(f"Mean(Es) = {E}, Mean(Ds) = {D}, Mean(Ls) = {L}")
index_remapping = None
mem_for_pruning = 0
if pruning_ratio:
original_Es = Es
assert pruning_ratio < 1 and pruning_ratio >= 0
index_remapping = []
new_Es = []
for original_E in original_Es:
E = math.ceil(original_E * (1.0 - pruning_ratio))
mapping = torch.tensor([-1] * original_E, dtype=torch.int32)
selected_indices = random.sample(range(original_E), E)
for i, idx in enumerate(selected_indices):
mapping[idx] = i
index_remapping.append(mapping)
if use_array_for_index_remapping:
mem_for_pruning += mapping.numel() * 4
else:
mem_for_pruning += E / pruning_hash_load_factor * 2 * 4
new_Es.append(E)
Es = new_Es
E = np.mean(Es)
logging.info(f"After prunnig (pruning_ratio={pruning_ratio}")
logging.info("#, E, D, L")
for i, (e, d, bag_size) in enumerate(zip(Es, Ds, Ls)):
logging.info(f"{i}, {e}, {d}, {bag_size}")
logging.info(f"Mean(Es) = {E}, Mean(Ds) = {D}, Mean(Ls) = {L}")
if managed == "device":
managed_option = EmbeddingLocation.DEVICE
else:
managed_option = EmbeddingLocation.MANAGED
# Override managed_option to HOST if using CPU
if use_cpu:
managed_option = EmbeddingLocation.HOST
if pooling is None or pooling == "sum":
pooling = "sum"
pooling_mode = PoolingMode.SUM
do_pooling = True
elif pooling == "mean":
pooling_mode = PoolingMode.MEAN
do_pooling = True
else: # "none"
pooling_mode = PoolingMode.NONE
do_pooling = False
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", e, d, weights_precision, managed_option) for d, e in zip(Ds, Es)],
device="cpu" if use_cpu else None,
bounds_check_mode=BoundsCheckMode(bounds_check_mode),
index_remapping=index_remapping,
pruning_hash_load_factor=pruning_hash_load_factor,
use_array_for_index_remapping=use_array_for_index_remapping,
output_dtype=output_dtype,
pooling_mode=pooling_mode,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
)
if use_cpu:
emb = emb.cpu()
else:
emb = emb.cuda()
emb.fill_random_weights()
nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
if do_pooling:
read_write_bytes = sum(
[
output_size_multiplier * B * d
+ param_size_multiplier * B * bag_size * d
for bag_size, d in zip(Ls, Ds)
]
)
else:
read_write_bytes = sum(
[
output_size_multiplier * B * bag_size * d
+ param_size_multiplier * B * bag_size * d
for bag_size, d in zip(Ls, Ds)
]
)
logging.info(
f"{weights_precision} Embedding tables: {sum(Es)} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * sum(Ls)} rows, "
f"{B * sum([bag_size * d for bag_size, d in zip(Ls, Ds)]) * param_size_multiplier / 1.0e9: .2f} GB"
)
times = []
for i in range(runs_of_iters):
# Generate a request for each table then combine
all_requests = {
"indices": [[] for _ in range(iters)],
"offsets": [[] for _ in range(iters)],
"weights": [[] for _ in range(iters)],
}
# row = iter, column = tensor
for t, (bag_size, e) in enumerate(zip(Ls, Es)):
requests = generate_requests(
iters,
B,
1,
bag_size,
e,
reuse=reuse,
# don't use zipf if e isn't large enough compared to bag_size.
alpha=alpha if (e / bag_size) > 2.0 else 1.0,
# need many more samples for zipf if bag_size is very small.
zipf_oversample_ratio=3 if bag_size > 5 else 10,
weighted=weighted,
use_cpu=use_cpu,
)
for it, (indices, offsets, weights) in enumerate(requests):
all_requests["indices"][it].append(indices)
if t > 0:
offsets = offsets[1:] # remove the first element
offsets += all_requests["offsets"][it][t - 1][-1]
all_requests["offsets"][it].append(offsets)
all_requests["weights"][it].append(weights)
requests = []
for it in range(iters):
indices = torch.concat(all_requests["indices"][it])
offsets = torch.concat(all_requests["offsets"][it])
if weighted:
weights = torch.concat(all_requests["weights"][it])
else:
weights = None
requests.append((indices, offsets, weights))
if use_cpu:
requests = [
(a.cpu().int(), b.cpu().int(), c.cpu() if c else None)
for (a, b, c) in requests
]
else:
requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
del all_requests
assert len(requests) == iters
# forward
if use_cpu:
time_per_iter = benchmark_cpu_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
)
else:
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
check_median=check_median,
)
# free up memory
del requests
logging.info(
f"Iteration {i}: "
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us, "
f"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB"
)
if i >= warmup_runs:
times.append(time_per_iter)
time_per_iter = statistics.mean(times)
bandwidth = read_write_bytes / time_per_iter / 1.0e9
logging.info(
f"Average of all iterations: "
f"{weights_precision} Forward, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {bandwidth: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us, "
f"Memory Usage For Pruning: {mem_for_pruning / 1.0e9:.0f} GB"
)
if report_aibench and haveAIBench:
print(
emitMetric(
type="NET",
metric=f"bandwidth_{weights_precision}",
unit="scalar",
value=str(bandwidth),
)
)
print(
emitMetric(
type="NET",
metric=f"time_per_iter_{weights_precision}",
unit="scalar",
value=str(time_per_iter * 1.0e6),
)
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--uvm-num-embeddings", default=int(1e5))
@click.option("--uvm-tables", default=1)
@click.option("--uvm-bag-size", default=1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--use-cache", is_flag=True, default=False)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--enforce-hbm", is_flag=True, default=False)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def nbit_uvm(
alpha: bool,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
iters: int,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
uvm_num_embeddings: int,
uvm_tables: int,
uvm_bag_size: int,
weighted: bool,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
use_cache: bool,
cache_algorithm: str,
cache_load_factor: float,
enforce_hbm: bool,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
E_uvm = uvm_num_embeddings
T = num_tables
T_uvm = uvm_tables
assert T_uvm <= T
assert (
T_uvm > 0
), f"T_uvm specified {T_uvm} <= 0. If not testing UVM, please use device benchmark."
T_gpu = T - T_uvm
L_uvm = uvm_bag_size
cache_alg = CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
managed_type = (
EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED
)
logging.info(f"T: {T}, T_uvm: {T_uvm}, T_gpu: {T_gpu}")
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_uvm = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E_uvm,
d,
weights_precision,
managed_type,
)
for d in Ds[:T_uvm]
],
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cuda()
emb_uvm.fill_random_weights()
if T_gpu > 0:
emb_gpu = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
EmbeddingLocation.DEVICE,
)
for d in Ds[T_uvm:]
],
output_dtype=output_dtype,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cuda()
emb_gpu.fill_random_weights()
emb_mixed = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
e,
d,
weights_precision,
managed_option,
)
for (e, d, managed_option) in zip(
[E_uvm] * T_uvm + [E] * T_gpu,
Ds,
[managed_type] * T_uvm + [EmbeddingLocation.DEVICE] * T_gpu,
)
],
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
).cuda()
emb_mixed.fill_random_weights()
requests_uvm = generate_requests(
iters,
B,
T_uvm,
L_uvm,
E_uvm,
reuse=reuse,
alpha=alpha,
weighted=weighted,
)
requests_uvm = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests_uvm]
requests_gpu = None
if T_gpu > 0:
requests_gpu = generate_requests(
iters,
B,
T_gpu,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=False,
)
requests_gpu = [
(a.int(), b.int(), c if c else None) for (a, b, c) in requests_gpu
]
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes_uvm = (
output_size_multiplier * B * sum(Ds[:T_uvm])
+ param_size_multiplier * B * sum(Ds[:T_uvm]) * L_uvm
)
if T_gpu > 0:
nparams_byte = sum(w.numel() for (w, _) in emb_mixed.split_embedding_weights())
logging.info(
f"{weights_precision} Embedding tables: {E * T_gpu + E_uvm * T_uvm} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * (T_gpu * L + T_uvm * L_uvm)} rows, "
f"{B * (L * sum(Ds[T_uvm:]) + L_uvm * sum(Ds[:T_uvm])) * param_size_multiplier / 1.0e9: .2f} GB"
)
torch.cuda.cudart().cudaProfilerStart()
torch.cuda.nvtx.range_push("uvm forward")
time_per_iter = benchmark_requests(
requests_uvm,
lambda indices, offsets, per_sample_weights: emb_uvm.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"UVM NBit Forward, {weights_precision}, B: {B}, "
f"E_uvm: {E_uvm}, T: {T_uvm}, D: {D}, L: {L_uvm}, W: {weighted}, "
f"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
torch.cuda.nvtx.range_pop()
torch.cuda.cudart().cudaProfilerStop()
if T_gpu > 0:
requests = []
assert requests_gpu is not None
for rs_uvm, rs_gpu in zip(requests_uvm, requests_gpu):
indices = torch.cat([rs_uvm[0], rs_gpu[0]])
lengths = [L_uvm] * (T_uvm * B) + [L] * (T_gpu * B)
offsets = torch.tensor(([0] + np.cumsum(lengths).tolist())).int().cuda()
per_sample_weights = None
if weighted:
this_rs_uvm_weights = rs_uvm[2]
assert this_rs_uvm_weights is not None
this_rs_gpu_weights = rs_gpu[2]
assert this_rs_gpu_weights is not None
per_sample_weights = torch.cat(
[this_rs_uvm_weights, this_rs_gpu_weights]
)
requests.append((indices, offsets, per_sample_weights))
# forward
time_per_iter = benchmark_requests(
requests_gpu,
lambda indices, offsets, per_sample_weights: emb_gpu.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
read_write_bytes_hbm = (
output_size_multiplier * B * sum(Ds[T_uvm:])
+ param_size_multiplier * B * sum(Ds[T_uvm:]) * L
)
logging.info(
f"GPU NBit Forward, {weights_precision}, B: {B}, "
f"E: {E}, T: {T_gpu}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes_hbm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb_mixed.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
read_write_bytes_total = read_write_bytes_uvm + read_write_bytes_hbm
logging.info(
f"Mixed NBit Forward, {weights_precision}, B: {B}, "
f"E_GPU: {E}, E_UVM: {E_uvm}, T_GPU: {T_gpu}, T_UVM: {T_uvm}, D: {D}, L_GPU: {L}, L_UVM: {L_uvm}, W: {weighted}, "
f"BW: {read_write_bytes_total / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
# benchmark prefetch
emb_mixed.reset_cache_states()
for indices, offsets, _ in requests:
emb_mixed.forward(indices, offsets)
prefetch_time, forward_time = benchmark_pipelined_requests(
requests,
lambda indices, offsets, indices_weights: emb_mixed.prefetch(
indices,
offsets,
),
lambda indices, offsets, indices_weights: emb_mixed.forward(
indices,
offsets,
indices_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
e2e_time = prefetch_time + forward_time
logging.info(
f"Forward(LXU) {weights_precision}, reuse: {reuse}, alpha: {alpha}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, "
f"Te2e: {e2e_time * 1.0e6:.0f}us, "
f"e2e BW: {read_write_bytes_total / e2e_time / 1.0e9: .2f} GB/s, "
f"Tprefetch: {prefetch_time * 1.0e6:.0f}us, "
f"TfwdTime: {forward_time * 1.0e6:.0f}us, "
f"{read_write_bytes_total / forward_time / 1.0e9: .2f} GB/s"
)
@cli.command()
@click.option("--test-name", type=str, default="")
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--iters", default=100)
@click.option("--warmup", default=10)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--use-cache", is_flag=True, default=False)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--enforce-hbm", is_flag=True, default=False)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
@click.option("--record-cache", is_flag=True, default=False)
@click.option("--uvm-host-mapped", is_flag=True, default=False)
@click.option(
"--dump-requests", type=int, default=0, help="number of reqs to dump (0=no dump)"
)
def nbit_uvm_compare_direct_mapped(
test_name: str,
alpha: bool,
bag_size: int,
batch_size: int,
embedding_dim: int,
weights_precision: SparseType,
iters: int,
warmup: int,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
weighted: bool,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
use_cache: bool,
cache_algorithm: str,
cache_load_factor: float,
enforce_hbm: bool,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
record_cache: bool,
uvm_host_mapped: bool,
dump_requests: int,
) -> None:
logging.info(json.dumps({k: str(v) for k, v in locals().items()}, indent=2))
np.random.seed(42)
torch.manual_seed(42)
B: int = batch_size
D: int = embedding_dim
L: int = bag_size
E: int = num_embeddings
T: int = num_tables
cache_alg: CacheAlgorithm = (
CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
)
managed_type: EmbeddingLocation = (
EmbeddingLocation.MANAGED_CACHING if use_cache else EmbeddingLocation.MANAGED
)
if mixed:
Ds: List[int] = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds: List[int] = [D] * T
_requests_uvm = generate_requests(
iters,
B,
T,
L,
E,
reuse=reuse,
alpha=alpha,
weighted=weighted,
)
# pyre-fixme[9]: requests_uvm has type `List[Tuple[IntTensor, IntTensor,
# Optional[Tensor]]]`; used as `List[Tuple[Tensor, Tensor, Optional[Tensor]]]`.
requests_uvm: List[Tuple[torch.IntTensor, torch.IntTensor, Optional[Tensor]]] = [
(a.int(), b.int(), c if c else None) for (a, b, c) in _requests_uvm
]
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes_uvm: float = (
output_size_multiplier * B * sum(Ds[:T])
+ param_size_multiplier * B * sum(Ds[:T]) * L
)
stats: Dict[str, Any] = {
"B": B,
"T": T,
"E": E,
"L": L,
"D": D,
"reuse": reuse,
}
def bench_uvm_cls(
name: str = "32way",
cache_assoc: int = 32,
record_cache: bool = False,
hbm: bool = False,
) -> None:
loc = managed_type if not hbm else EmbeddingLocation.DEVICE
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
loc,
)
for d in Ds[:T]
],
output_dtype=output_dtype,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
cache_assoc=cache_assoc,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
gather_uvm_cache_stats=record_cache,
uvm_host_mapped=uvm_host_mapped,
).cuda()
emb.fill_random_weights()
nvtx_range = (
f"UVM-RECORD-CACHE-{name.upper()}"
if record_cache
else f"UVM-{name.upper()}"
)
callback_after_warmup = emb.reset_uvm_cache_stats if record_cache else None
torch.cuda.cudart().cudaProfilerStart()
time_per_iter = benchmark_requests(
requests_uvm,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.int(),
offsets.int(),
per_sample_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
num_warmups=warmup,
nvtx_range=nvtx_range,
callback_after_warmup=callback_after_warmup,
)
torch.cuda.cudart().cudaProfilerStop()
nonlocal stats
if name not in stats:
stats[name] = {}
if not record_cache:
# Only measure time when cache counter is off (serious overhead)
if name not in stats:
stats[name] = {}
stats[name]["bytes"] = read_write_bytes_uvm
stats[name]["time_per_iter"] = time_per_iter * 1e6
logging.info(
f"[{name.center(8)}] "
f"UVM NBit Forward, {weights_precision}, B: {B}, "
f"E_uvm: {E}, T: {T}, D: {D}, L: {L}, W: {weighted}, "
f"BW: {read_write_bytes_uvm / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"Time: {time_per_iter * 1.0e6:.0f}us"
)
if record_cache:
ucs = emb.uvm_cache_stats.detach().cpu().numpy().tolist()
cache_stats = {
"num_calls": ucs[0],
"num_requested_indices": ucs[1],
"num_unique_indices": ucs[2],
"num_unique_misses": ucs[3],
"num_conflict_unique_misses": ucs[4],
"num_conflict_misses": ucs[5],
}
stats[name]["cache_stats"] = cache_stats
logging.info(f"[{name:>8s}] cache stats {cache_stats}")
bench_uvm_cls(name="HBM", hbm=True)
bench_uvm_cls(name="32way", cache_assoc=32)
bench_uvm_cls(name="1way", cache_assoc=1)
if record_cache:
bench_uvm_cls(
name="32way",
cache_assoc=32,
record_cache=True,
)
bench_uvm_cls(
name="1way",
cache_assoc=1,
record_cache=True,
)
if test_name:
folder = Path(os.getenv("HOME", ".")) / test_name
if not folder.is_dir():
logging.info(f"MAKING FOLDER {folder}")
folder.mkdir(parents=True, mode=0o755)
with (folder / "uvm_stats.txt").open("w") as f:
logging.info(f"Dumping stats at {folder}")
print(stats, file=f)
if dump_requests:
with (folder / "requests.txt").open("w") as f:
for req in requests_uvm[:dump_requests]:
ind, off, _ = req
print(ind.cpu().numpy().tolist(), file=f)
print(off.cpu().numpy().tolist(), file=f)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--cache-algorithm", default="lru")
@click.option("--cache-load-factor", default=0.2)
@click.option("--cache-assoc", default=32)
@click.option("--embedding-dim", default=128)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--iters", default=100)
@click.option("--mixed", is_flag=True, default=False)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--reuse", default=0.1)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--enforce-hbm", is_flag=True, default=False)
@click.option("--record-cache-miss-counter", is_flag=True, default=False)
@click.option("--record-tablewise-cache-miss", is_flag=True, default=False)
@click.option("--gather-uvm-cache-stats", is_flag=True, default=False)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def nbit_cache( # noqa C901
alpha: float,
bag_size: int,
batch_size: int,
cache_algorithm: str,
cache_load_factor: float,
cache_assoc: int,
embedding_dim: int,
weights_precision: SparseType,
iters: int,
mixed: bool,
num_embeddings: int,
num_tables: int,
reuse: float,
weighted: bool,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
enforce_hbm: bool,
record_cache_miss_counter: bool,
record_tablewise_cache_miss: bool,
gather_uvm_cache_stats: bool,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
cache_alg = CacheAlgorithm.LRU if cache_algorithm == "lru" else CacheAlgorithm.LFU
if mixed:
Ds = [
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(Ds)
else:
Ds = [D] * T
emb_nc = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
EmbeddingLocation.MANAGED,
)
for d in Ds
],
output_dtype=output_dtype,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
cache_assoc=cache_assoc,
).cuda()
emb_nc.fill_random_weights()
emb = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
d,
weights_precision,
EmbeddingLocation.MANAGED_CACHING,
)
for d in Ds
],
record_cache_metrics=RecordCacheMetrics(
record_cache_miss_counter, record_tablewise_cache_miss
),
gather_uvm_cache_stats=gather_uvm_cache_stats,
cache_load_factor=cache_load_factor,
cache_algorithm=cache_alg,
output_dtype=output_dtype,
enforce_hbm=enforce_hbm,
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
cache_assoc=cache_assoc,
).cuda()
emb.fill_random_weights()
nparams_byte = sum(w.numel() for (w, _) in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes = (
param_size_multiplier * B * sum(Ds) * L
+ output_size_multiplier * B * sum(Ds)
+ param_size_multiplier * B * sum(Ds) * L
)
logging.info(
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "
f"{nparams_byte / 1.0e9: .2f} GB" # IntN TBE use byte for storage
)
logging.info(
f"Accessed weights per batch: {B * T * L} rows, "
f"{B * T * L * D * param_size_multiplier / 1.0e9: .2f} GB"
)
requests = generate_requests(
2 * iters, B, T, L, E, reuse=reuse, alpha=alpha, weighted=weighted
)
requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
warmup_requests, requests = requests[:iters], requests[iters:]
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb_nc(
indices.int(), offsets.int(), per_sample_weights
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
logging.info(
f"Forward (UVM) {weights_precision}, B: {B}, E: {E}, T: {T}, D: {D}, L: {L}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
# warm up
for indices, offsets, _ in warmup_requests:
emb.forward(indices.int(), offsets.int())
# get cache miss rate (forward only) and exchanged cache lines (prefetch)
cache_misses = []
exchanged_cache_lines = []
unique_indices = []
input_indices = []
NOT_FOUND = -1
# reset the cache miss counters after warmup
if record_cache_miss_counter or record_tablewise_cache_miss:
emb.reset_cache_miss_counter()
if gather_uvm_cache_stats:
emb.reset_uvm_cache_stats()
for indices, offsets, _ in requests:
old_lxu_cache_state = emb.lxu_cache_state.clone()
emb.prefetch(indices, offsets)
exchanged_cache_lines.append(
(emb.lxu_cache_state != old_lxu_cache_state).sum().item()
)
cache_misses.append(
(emb.lxu_cache_locations_list.top() == NOT_FOUND).sum().item()
)
emb.forward(indices, offsets)
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
emb.cache_hash_size_cumsum,
indices,
offsets,
)
unique_indices.append(len(torch.unique(linear_cache_indices, sorted=False)))
input_indices.append(len(indices))
logging.info(
f"Exchanged cache lines -- mean: {sum(exchanged_cache_lines)/len(requests): .2f}, "
f"max: {max(exchanged_cache_lines)}, min: {min(exchanged_cache_lines)}"
)
logging.info(
f"Cache miss -- mean: {sum(cache_misses)/len(requests)}, "
f"max: {max(cache_misses)}, min: {min(cache_misses)}"
)
logging.info(
f"input_indices -- mean: {sum(input_indices)/len(requests)}, "
f"max: {max(input_indices)}, min: {min(input_indices)}"
)
logging.info(
f"unique_indices -- mean: {sum(unique_indices)/len(requests)}, "
f"max: {max(unique_indices)}, min: {min(unique_indices)}"
)
unique_miss_rate = [a / b for (a, b) in zip(exchanged_cache_lines, unique_indices)]
logging.info(
f"unique_miss_rate -- mean: {sum(unique_miss_rate)/len(requests)}, "
f"max: {max(unique_miss_rate)}, min: {min(unique_miss_rate)}"
)
if record_cache_miss_counter or record_tablewise_cache_miss:
emb.print_cache_miss_counter()
if gather_uvm_cache_stats:
emb.print_uvm_cache_stats()
# benchmark prefetch
if record_cache_miss_counter or record_tablewise_cache_miss:
emb.reset_cache_states()
if gather_uvm_cache_stats:
emb.reset_uvm_cache_stats()
for indices, offsets, _ in warmup_requests:
emb.forward(indices, offsets)
torch.cuda.cudart().cudaProfilerStart()
torch.cuda.nvtx.range_push("pipeline")
prefetch_time, forward_time = benchmark_pipelined_requests(
requests,
lambda indices, offsets, indices_weights: emb.prefetch(
indices,
offsets,
),
lambda indices, offsets, indices_weights: emb.forward(
indices,
offsets,
indices_weights,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
)
e2e_time = prefetch_time + forward_time
torch.cuda.nvtx.range_pop()
logging.info(
f"Forward(LXU) {weights_precision}, reuse: {reuse}, alpha: {alpha}, B: {B}, "
f"E: {E}, T: {T}, D: {D}, L: {L}, "
f"Te2e: {e2e_time * 1.0e6:.0f}us, "
f"e2e BW: {read_write_bytes / e2e_time / 1.0e9: .2f} GB/s, "
f"Tprefetch: {prefetch_time * 1.0e6:.0f}us, "
f"{2 * sum(exchanged_cache_lines) * param_size_multiplier * D / prefetch_time / len(requests) / 1.0e9: .2f} GB/s, "
f"TfwdTime: {forward_time * 1.0e6:.0f}us, "
f"{read_write_bytes / forward_time / 1.0e9: .2f} GB/s"
)
torch.cuda.cudart().cudaProfilerStop()
@cli.command()
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=2048)
@click.option("--iters", default=10)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=100)
@click.option("--pruning-hash-load-factor", default=0.75)
@click.option("--hit-rate", default=0.9)
@click.option("--use-cpu", is_flag=True, default=False)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def hashtable( # noqa C901
bag_size: int,
batch_size: int,
iters: int,
num_embeddings: int,
num_tables: int,
pruning_hash_load_factor: float,
hit_rate: float,
use_cpu: bool,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
B = batch_size
T = num_tables
L = bag_size
E = num_embeddings
np.random.seed(42)
torch.manual_seed(42)
if hit_rate == 1.0:
chosen_indices = torch.cat([torch.arange(E) for _ in range(T)], dim=0).int()
else:
chosen_indices = (
torch.randint(low=0, high=int(E * 1.0 / hit_rate), size=(E * T,))
.view(-1)
.int()
)
dense_indices = torch.cat([torch.arange(E) for _ in range(T)], dim=0).int()
offsets = torch.tensor([E * t for t in range(T + 1)]).int()
assert offsets[-1] == chosen_indices.numel()
assert offsets.numel() == T + 1
assert (offsets.numel() - 1) // T == 1
capacities = [round_up(int(E / pruning_hash_load_factor), 32) for _ in range(T)]
hash_table = torch.zeros(
(sum(capacities), 2),
dtype=torch.int32,
)
hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()
assert hash_table.numel() * 4 < 2**32
# initialize
hash_table[:, :] = -1
torch.ops.fbgemm.pruned_hashmap_insert(
chosen_indices, dense_indices, offsets, hash_table, hash_table_offsets
)
requests = generate_requests(
iters,
B,
T,
L,
E,
requests_data_file=requests_data_file,
tables=tables,
)
if not use_cpu:
hash_table = hash_table.cuda()
hash_table_offsets = hash_table_offsets.cuda()
requests = [(a.cuda().int(), b.cuda().int(), c) for (a, b, c) in requests]
else:
requests = [(a.int().cpu(), b.int().cpu(), c) for (a, b, c) in requests]
empirical_hit_rate = np.mean(
[
torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
.ne(-1)
.sum()
.item()
/ indices.numel()
for indices, offsets, _ in requests
]
)
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
),
)
logging.info(
f"LinearTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, "
f"T: {time_per_iter * 1.0e6:.0f}us, pruning load factor: {E * T / hash_table.shape[0] * 100:.1f}%, hit rate: {empirical_hit_rate * 100:.2f}%, Table size: {hash_table.numel() * 4 / 1.0e9:.0f} GB"
)
if use_cpu:
ht = torch.classes.fbgemm.PrunedMapCPU()
ht.insert(chosen_indices, dense_indices, offsets, T)
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: ht.lookup(indices, offsets),
)
logging.info(
f"HashTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, "
f"T: {time_per_iter * 1.0e6:.0f}us, pruning load factor: {E * T / hash_table.shape[0] * 100:.1f}%, hit rate: {empirical_hit_rate * 100:.2f}%, Table size: {hash_table.numel() * 4 / 1.0e9:.0f} GB"
)
@cli.command()
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=2048)
@click.option("--iters", default=100)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=100)
@click.option("--pruning-ratio", default=0.9)
@click.option("--device", default="cuda")
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def pruned_array( # noqa C901
bag_size: int,
batch_size: int,
iters: int,
num_embeddings: int,
num_tables: int,
pruning_ratio: float,
device: str,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
B = batch_size
T = num_tables
L = bag_size
E = num_embeddings
np.random.seed(42)
torch.manual_seed(42)
assert pruning_ratio > 0 and pruning_ratio <= 1
original_E = int(E / (1.0 - pruning_ratio))
index_remappings = torch.tensor(
[-1] * original_E * T, dtype=torch.int32, device=device
)
index_remappings_offsets = torch.empty(T + 1, dtype=torch.int64, device=device)
index_remappings_offsets[0] = 0
dense_indices = torch.tensor(range(E), dtype=torch.int32, device=device)
for t in range(T):
selected_indices = torch.add(
torch.randperm(original_E, device=device), t * original_E
)[:E]
index_remappings[selected_indices] = dense_indices
index_remappings_offsets[t + 1] = index_remappings_offsets[t] + original_E
requests = generate_requests(
iters,
B,
T,
L,
E,
requests_data_file=requests_data_file,
tables=tables,
use_cpu=True if device == "cpu" else False,
)
requests = [(a.int().to(device), b.int().to(device), c) for (a, b, c) in requests]
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings,
index_remappings_offsets,
),
)
logging.info(
f"LinearTable: B: {B}, T: {T}, L: {L}, E: {E}, QPS: {B * T * L / time_per_iter / 1.0e9:.2f}B QPS/s, "
f"T: {time_per_iter * 1.0e6:.0f}us, Pruning Ratio: {pruning_ratio * 100:.2f}%, Table size: {original_E * T * 4 / 1.0e9:.0f} GB"
)
@cli.command()
@click.option("--bag-size", default=20)
@click.option("--batch-size", default=512)
@click.option("--iters", default=100)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=32)
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.WARNING.value)
@click.option("--requests_data_file", type=str, default=None)
@click.option("--tables", type=str, default=None)
def bounds_check_indices( # noqa C901
bag_size: int,
batch_size: int,
iters: int,
num_embeddings: int,
num_tables: int,
bounds_check_mode: int,
requests_data_file: Optional[str],
tables: Optional[str],
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
L = bag_size
E = num_embeddings
T = num_tables
requests = generate_requests(
iters,
B,
T,
L,
E,
requests_data_file=requests_data_file,
tables=tables,
)
# requests = [(a.int(), b.int(), c if c else None) for (a, b, c) in requests]
warning = torch.tensor([0]).long().to(get_device())
rows_per_table = torch.tensor([E for _ in range(T)]).long().to(get_device())
# forward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, _: torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
BoundsCheckMode(bounds_check_mode),
warning,
),
)
logging.info(
f"Bounds Check Indices: B: {B}, "
f"E: {E}, T: {T}, L: {L}, "
f"BW: {(8 * B * T * L + 8 * (B * T + 1)) / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--num-tables", type=int, default=32)
@click.option("--embedding-dim", type=int, default=248)
@click.option("--num-embeddings", type=int, default=int(1e5))
@click.option("--update-row-num", type=int, default=1e4)
@click.option("--weights-precision", type=SparseType, default=SparseType.INT4)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP16)
@click.option("--iters", type=int, default=100)
@click.option("--fp8-exponent-bits", type=int, default=None)
@click.option("--fp8-exponent-bias", type=int, default=None)
def emb_inplace_update( # noqa C901
num_tables: int,
embedding_dim: int,
num_embeddings: int,
update_row_num: int,
weights_precision: SparseType,
output_dtype: SparseType,
iters: int,
fp8_exponent_bits: Optional[int],
fp8_exponent_bias: Optional[int],
) -> None:
if open_source:
logging.warning(
"emb_inplace_update op benchmark doesn't support open source now!"
)
return
np.random.seed(42)
torch.manual_seed(42)
T = num_tables
D = embedding_dim
E = num_embeddings
N = update_row_num
D_alignment = max(weights_precision.align_size() for t in range(T))
D_alignment = max(D_alignment, output_dtype.align_size())
D = round_up(D, D_alignment)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Es = [E] * T
row_alignment = 16 # use_cpu = False -> only test CUDA function now
weights_ty_list = [weights_precision] * T
managed = [EmbeddingLocation.DEVICE] * T
embedding_specs = [
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
]
op = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=embedding_specs,
output_dtype=output_dtype,
device=torch.cuda.current_device(),
fp8_exponent_bits=fp8_exponent_bits,
fp8_exponent_bias=fp8_exponent_bias,
)
# Initilize the random weights for int nbit table split embedding bag
op.fill_random_weights()
update_table_idx = [np.random.randint(low=0, high=T) for _ in range(N)]
# Generate non-dup indices
table_map = {}
update_row_idx = []
for t in update_table_idx:
while True:
row_idx = np.random.randint(low=0, high=Es[t])
if t not in table_map or row_idx not in table_map[t]:
break
if t in table_map:
table_map[t].append(row_idx)
else:
table_map[t] = []
table_map[t].append(row_idx)
update_row_idx.append(row_idx)
update_weight_size = sum(
[
rounded_row_size_in_bytes(
Ds[t],
weights_ty_list[t],
row_alignment,
)
for t in update_table_idx
]
)
update_weights = torch.randint(
low=0,
high=255,
size=(update_weight_size,),
dtype=torch.uint8,
device=torch.cuda.current_device(),
)
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
read_write_bytes = output_size_multiplier * N * D + param_size_multiplier * N * D
# Update op weights with the customized ops
op.embedding_inplace_update_internal(
update_table_idx,
update_row_idx,
update_weights,
)
time_per_iter, _ = benchmark_torch_function(
op.embedding_inplace_update_internal,
(update_table_idx, update_row_idx, update_weights),
iters=iters,
)
logging.info(
f"Emb inplace update (including H2D for metadata): "
f"T: {T}, D: {D}, E: {E}, N: {N}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9:.2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
update_offsets = []
update_offset = 0
for table_idx in update_table_idx:
D_bytes = rounded_row_size_in_bytes(
Ds[table_idx],
weights_ty_list[table_idx],
row_alignment,
)
update_offsets.append(update_offset)
update_offset += D_bytes
update_offsets.append(update_offset)
update_table_idx = torch.tensor(
update_table_idx,
device=torch.cuda.current_device(),
dtype=torch.int32,
)
update_row_idx = torch.tensor(
update_row_idx,
device=torch.cuda.current_device(),
dtype=torch.int32,
)
update_offsets = torch.tensor(
update_offsets,
device=torch.cuda.current_device(),
dtype=torch.int64,
)
time_per_iter, _ = benchmark_torch_function(
torch.ops.fbgemm.emb_inplace_update,
(
op.weights_dev,
op.weights_uvm,
op.weights_placements,
op.weights_offsets,
op.weights_tys,
op.D_offsets,
update_weights,
update_table_idx,
update_row_idx,
update_offsets,
16, # row_alignment
),
iters=iters,
)
logging.info(
f"Emb inplace update (pure device update op): "
f"T: {T}, D: {D}, E: {E}, N: {N}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9:.2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
@cli.command()
@click.option("--alpha", default=1.0)
@click.option(
"--bag-size-list",
type=str,
default="20",
)
@click.option(
"--bag-size-sigma-list",
type=str,
default="None",
help="A list of bag size standard deviations for generating bag sizes "
"(one std per table). If set, the benchmark will treat --bag-size-list as a "
"list of bag size means.",
)
@click.option("--batch-size", default=512)
@click.option("--embedding-dim-list", type=str, default="128")
@click.option("--weights-precision", type=SparseType, default=SparseType.FP32)
@click.option("--stoc", is_flag=True, default=False)
@click.option("--iters", default=100)
@click.option("--warmup-runs", default=0)
@click.option("--managed", default="device")
@click.option("--num-embeddings-list", type=str, default="100000")
@click.option("--reuse", default=0.0)
@click.option("--row-wise/--no-row-wise", default=True)
@click.option("--weighted", is_flag=True, default=False)
@click.option("--pooling", type=str, default="sum")
@click.option("--bounds-check-mode", type=int, default=BoundsCheckMode.NONE.value)
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--output-dtype", type=SparseType, default=SparseType.FP32)
def device_with_spec( # noqa C901
alpha: float,
bag_size_list: str,
bag_size_sigma_list: str,
batch_size: int,
embedding_dim_list: str,
weights_precision: SparseType,
stoc: bool,
iters: int,
warmup_runs: int,
managed: str,
num_embeddings_list: str,
reuse: float,
row_wise: bool,
weighted: bool,
pooling: str,
bounds_check_mode: int,
flush_gpu_cache_size_mb: int,
output_dtype: SparseType,
) -> None:
np.random.seed(42)
torch.manual_seed(42)
B = batch_size
Ds = [int(D) for D in embedding_dim_list.split(",")]
Es = [int(E) for E in num_embeddings_list.split(",")]
T = len(Ds)
use_variable_bag_sizes = bag_size_sigma_list != "None"
if use_variable_bag_sizes:
Ls = [int(mu) for mu in bag_size_list.split(",")]
sigma_Ls = [int(sigma) for sigma in bag_size_sigma_list.split(",")]
assert T == len(Ls) and T == len(sigma_Ls), (
f"bag-size-list (length: {len(Ls)}) and bag-size-sigma-list "
f"(length: {len(sigma_Ls)}) must have the same length as "
f"embedding-dim-list (length: {T})"
)
else:
Ls = [int(L) for L in bag_size_list.split(",")]
assert T == len(Ls), (
f"bag-size-list (length: {len(Ls)}) must have the same length as "
f"embedding-dim-list (length: {T})"
)
assert T == len(Es), (
f"num-embeddings-list (length: {len(Es)}) must have the same length as "
f"embedding-dim-list (length: {T})"
)
assert T >= 1, "There must be at least one table"
feature_requires_grad = None
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD if row_wise else OptimType.EXACT_ADAGRAD
if managed == "device":
managed_option = (
EmbeddingLocation.DEVICE
if torch.cuda.is_available()
else EmbeddingLocation.HOST
)
else:
managed_option = EmbeddingLocation.MANAGED
if pooling is None or pooling == "sum":
pooling = "sum"
pooling_mode = PoolingMode.SUM
do_pooling = True
elif pooling == "mean":
pooling_mode = PoolingMode.MEAN
do_pooling = True
else: # "none"
pooling_mode = PoolingMode.NONE
do_pooling = False
if not do_pooling:
ref_D = Ds[0]
for D in Ds:
assert (
D == ref_D
), "All embedding dimensions must be the same for sequence TBE"
emb = SplitTableBatchedEmbeddingBagsCodegen(
[
(
e,
d,
managed_option,
ComputeDevice.CUDA if torch.cuda.is_available() else ComputeDevice.CPU,
)
for d, e in zip(Ds, Es)
],
optimizer=optimizer,
learning_rate=0.1,
eps=0.1,
weights_precision=weights_precision,
stochastic_rounding=stoc,
output_dtype=output_dtype,
pooling_mode=pooling_mode,
bounds_check_mode=BoundsCheckMode(bounds_check_mode),
)
emb = emb.to(get_device())
if weights_precision == SparseType.INT8:
emb.init_embedding_weights_uniform(-0.0003, 0.0003)
nparams = sum(w.numel() for w in emb.split_embedding_weights())
param_size_multiplier = weights_precision.bit_rate() / 8.0
output_size_multiplier = output_dtype.bit_rate() / 8.0
# Generate a request for each table then combine
all_requests = {
"indices": [[] for _ in range(iters)],
"offsets": [[] for _ in range(iters)],
"weights": [[] for _ in range(iters)],
}
# row = iter, column = tensor
for t, e in enumerate(Es):
# (indices, offsets, weights)
requests = generate_requests(
iters,
B,
1,
Ls[t],
e,
reuse=reuse,
alpha=alpha,
weighted=weighted,
sigma_L=sigma_Ls[t] if use_variable_bag_sizes else None,
zipf_oversample_ratio=3 if Ls[t] > 5 else 5,
)
for i, (indices, offsets, weights) in enumerate(requests):
all_requests["indices"][i].append(indices)
if t > 0:
offsets = offsets[1:] # remove the first element
offsets += all_requests["offsets"][i][t - 1][-1]
all_requests["offsets"][i].append(offsets)
all_requests["weights"][i].append(weights)
prev_indices_len = -1
requests = []
for i in range(iters):
indices = torch.concat(all_requests["indices"][i])
if prev_indices_len == -1:
prev_indices_len = indices.numel()
assert (
prev_indices_len == indices.numel()
), "Number of indices for every iteration must be the same"
offsets = torch.concat(all_requests["offsets"][i])
if weighted:
weights = torch.concat(all_requests["weights"][i])
else:
weights = None
requests.append((indices, offsets, weights))
del all_requests
assert len(requests) == iters
sum_DLs = sum([d * l for d, l in zip(Ds, Ls)])
if do_pooling:
read_write_bytes = (
output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum_DLs
)
else:
read_write_bytes = (
output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum_DLs
)
if use_variable_bag_sizes:
# pyre-ignore [61]
Ls_str = f"mu {Ls} sigma {sigma_Ls}"
else:
Ls_str = f"{Ls}"
logging.info(
f"Embedding parameters: {nparams / 1.0e9: .2f} GParam, "
f"{nparams * param_size_multiplier / 1.0e9: .2f} GB"
)
logging.info(
f"Accessed weights per batch: {B * sum_DLs * param_size_multiplier / 1.0e9: .2f} GB"
)
# forward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb.forward(
indices.long(),
offsets.long(),
per_sample_weights,
feature_requires_grad=feature_requires_grad,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
num_warmups=warmup_runs,
)
logging.info(
f"Forward, B: {B}, "
f"Es: {Es}, T: {T}, Ds: {Ds}, Ls: {Ls_str}, W: {weighted}, "
f"BW: {read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, " # noqa: B950
f"T: {time_per_iter * 1.0e6:.0f}us"
)
if output_dtype == SparseType.INT8:
# backward bench not representative
return
if do_pooling:
grad_output = torch.randn(B, sum(Ds)).to(get_device())
else:
# Obtain B * L from indices len
# pyre-ignore[19]
grad_output = torch.randn(requests[0][0].numel(), D).to(get_device())
# backward
time_per_iter = benchmark_requests(
requests,
lambda indices, offsets, per_sample_weights: emb(
indices.long(),
offsets.long(),
per_sample_weights,
feature_requires_grad=feature_requires_grad,
),
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
bwd_only=True,
grad=grad_output,
)
logging.info(
f"Backward, B: {B}, Es: {Es}, T: {T}, Ds: {Ds}, Ls: {Ls_str}, "
f"BW: {2 * read_write_bytes / time_per_iter / 1.0e9: .2f} GB/s, "
f"T: {time_per_iter * 1.0e6:.0f}us"
)
def _to_offsets(lengths: torch.Tensor) -> torch.Tensor:
return torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
@cli.command()
@click.option("--batch-size", default=128000)
@click.option("--compressed-batch-size", default=12800)
@click.option("--embedding-dim", default=128)
@click.option("--bag-size", default=5)
@click.option("--num-embeddings", default=int(1e5))
@click.option("--num-tables", default=20)
@click.option("--compressed-tables", default=10)
@click.option("--iters", default=100)
def vbe(
batch_size: int,
compressed_batch_size: int,
embedding_dim: int,
bag_size: int,
num_embeddings: int,
num_tables: int,
compressed_tables: int,
iters: int,
) -> None:
torch.manual_seed(42)
B = batch_size
cB = compressed_batch_size
D = embedding_dim
L = bag_size
E = num_embeddings
T = num_tables
cT = compressed_tables
Ds = [D] * T
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD
managed_option = (
EmbeddingLocation.DEVICE
if torch.cuda.is_available()
else EmbeddingLocation.HOST
)
pooling_mode = PoolingMode.SUM
emb = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
d,
managed_option,
ComputeDevice.CUDA,
)
for d in Ds
],
optimizer=optimizer,
learning_rate=0.1,
eps=0.1,
weights_precision=SparseType.FP32,
stochastic_rounding=False,
output_dtype=SparseType.FP32,
pooling_mode=pooling_mode,
bounds_check_mode=BoundsCheckMode(BoundsCheckMode.NONE.value),
).to(get_device())
compressed_batch_sizes = ([cB] * cT) + ([B] * (T - cT))
compressed_lengths = [L] * sum(compressed_batch_sizes)
compressed_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
torch.tensor(compressed_lengths, device=get_device())
)
compressed_values = torch.randint(
low=0,
high=E,
size=(sum(compressed_lengths),),
device=get_device(),
dtype=torch.int32,
)
batch_sizes = [B] * T
lengths = [L] * sum(batch_sizes)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
torch.tensor(lengths, device=get_device())
)
reindex = []
for t in range(cT):
start = t * cB
end = cB * (t + 1)
reindex.extend(range(start, end))
for _ in range(B - cB):
i = random.randint(t * cB, cB * (t + 1))
reindex.append(i)
reindex.extend(range(cB * cT, (cB * cT) + (B * cT)))
reindex = torch.tensor(reindex, device=get_device())
values = torch.index_select(compressed_values.reshape(-1, L), 0, reindex).flatten()
requests = [
(
values,
offsets,
)
for _ in range(iters)
]
compressed_requests = [
(
compressed_values,
compressed_offsets,
)
for _ in range(iters)
]
out = benchmark_vbe(
requests,
compressed_requests,
baseline_func=lambda indices, offsets: emb.forward(
indices.long(),
offsets.long(),
),
compressed_func=lambda indices, offsets: emb.forward(
indices.long(),
offsets.long(),
batch_size_per_feature_per_rank=[[bs] for bs in compressed_batch_sizes],
),
reindex=reindex,
embedding_dim=D,
)
logging.info(
f"Uncompressed, B: {B}, T: {T}, D: {D}, L: {L}, "
f"T: {out.avg * 1.0e6:.0f}us, fwd: {out.fwd * 1.0e6:.0f}us, bwd: {out.bwd * 1.0e6:.0f}us\n"
f"Compressed, B: {B}, cB: {cB}, T: {T - cT}, cT: {cT}, D: {D}, L: {L}, "
f"T: {out.compressed_avg * 1.0e6:.0f}us, fwd: {out.compressed_fwd * 1.0e6:.0f}us, reindex: {out.reindex * 1.0e6:.0f}us, bwd: {out.compressed_bwd * 1.0e6:.0f}us"
)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import functools
import logging
import random
from typing import List
import click
import fbgemm_gpu
import numpy as np
import torch
from torch.profiler import profile
logging.basicConfig(level=logging.DEBUG)
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:index_select_ops")
@click.group()
def cli() -> None:
pass
@cli.command()
@click.option("--world-size", default=128)
@click.option("--num-tables", default=10)
@click.option("--min-len", default=10000)
@click.option("--max-len", default=20000)
def device(
world_size: int,
num_tables: int,
min_len: int,
max_len: int,
) -> None:
lengths = torch.randint(min_len, max_len, size=(num_tables * world_size,))
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
permute = list(range(num_tables * world_size))
random.shuffle(permute)
permute_tensor = torch.tensor(permute)
permuted_length = torch.index_select(lengths, 0, permute_tensor)
permuted_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(permuted_length)
jagged_size = offsets[-1]
if torch.cuda.is_available():
permute_tensor = permute_tensor.cuda()
offsets = offsets.cuda()
permuted_offsets = permuted_offsets.cuda()
time, output = benchmark_torch_function(
torch.ops.fbgemm.expand_into_jagged_permute,
(permute_tensor, offsets, permuted_offsets, jagged_size),
)
num_bytes = (
permute_tensor.numel() * permute_tensor.element_size()
+ offsets.numel() * offsets.element_size()
+ permuted_offsets.numel() * permuted_offsets.element_size()
+ output.numel() * output.element_size()
)
logging.info(f"expand_into_jagged_permute {time} sec {num_bytes / time / 1e9} GB/s")
@cli.command()
@click.option("--row-size", default=25600)
@click.option("--batch-size", default=4096)
@click.option("--unique-batch-size", default=1024)
@click.option("--input-precision", type=str, default="fp32")
def batch_reuse_index_select_device(
row_size: int, batch_size: int, unique_batch_size: int, input_precision: str
) -> None:
# A function for generating indices in batch_reuse
# pyre-fixme[11]: Annotation `array` is not defined as a type.
def gen_inverse_index(curr_size: int, final_size: int) -> np.array:
inverse_index = list(range(curr_size))
np_arr = np.array(inverse_index)
for _ in range(final_size - curr_size):
inverse_index.append(np.random.randint(0, curr_size))
np_arr = np.array(inverse_index)
np.random.shuffle(np_arr)
return np_arr
dtype = torch.float
if input_precision == "fp32":
dtype = torch.float
elif input_precision == "fp16":
dtype = torch.half
else:
raise RuntimeError(f"Does not support data type {input_precision}")
indices = torch.cuda.IntTensor(gen_inverse_index(unique_batch_size, batch_size))
input = torch.rand(unique_batch_size, row_size, dtype=dtype, device="cuda")
input.requires_grad = True
num_bytes = 2 * batch_size * row_size * input.element_size()
time, output = benchmark_torch_function(
torch.ops.fbgemm.index_select_dim0, (input, indices, 0, unique_batch_size)
)
logging.info(
f"index_select_dim0 forward: {dtype}, {num_bytes} bytes read/write, {time * 1e3} ms, {num_bytes / time / 1e9} GB/s"
)
grad = torch.rand_like(output, dtype=dtype, device="cuda")
num_bytes = (input.numel() + output.numel()) * input.element_size()
time, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True), (grad,)
)
logging.info(
f"index_select_dim0 backward: {dtype}, {num_bytes} bytes read/write, {time * 1e3} ms, {num_bytes / time / 1e9} GB/s"
)
@cli.command()
@click.option("--max-seq-length", default=500)
@click.option("--batch-size", default=4096)
@click.option("--num-cols", default=256)
@click.option("--num-jagged-tensor-rows", default=4096)
@click.option("--num-zero-padding", default=1024)
@click.option("--index-dtype", type=click.Choice(["int", "long"]), default="int")
@click.option(
"--jagged-tensor-dtype", type=click.Choice(["float", "half"]), default="float"
)
def jagged_index_select_2d_bench(
max_seq_length: int,
batch_size: int,
num_cols: int,
num_jagged_tensor_rows: int,
num_zero_padding: int,
index_dtype: str,
jagged_tensor_dtype: str,
) -> None:
def jagged_index_select_2d_ref(
values: torch.Tensor, lengths: torch.Tensor, inverse_lookup: torch.Tensor
) -> torch.Tensor:
offsets = torch.ops.fbgemm.asynchronous_exclusive_cumsum(lengths)
end_offsets = offsets + lengths
full_start_offset = torch.index_select(offsets, 0, inverse_lookup)
full_end_offset = torch.index_select(end_offsets, 0, inverse_lookup)
index_ranges = torch.stack(
(full_start_offset, full_end_offset), dim=0
).transpose(0, 1)
to_be_merged_tensors = []
for row in index_ranges:
to_be_merged_tensors.append(torch.arange(row[0], row[1], device="cuda"))
all_indices = torch.cat(to_be_merged_tensors, dim=0)
new_embeddings = torch.index_select(values, 0, all_indices)
return new_embeddings
index_t = {"int": torch.int, "long": torch.long}[index_dtype]
scalar_t = {"float": torch.float, "half": torch.half}[jagged_tensor_dtype]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(num_jagged_tensor_rows,),
dtype=index_t,
device="cuda",
)
indices, _ = torch.sort(
torch.randint(
low=0,
high=num_jagged_tensor_rows,
size=(batch_size,),
dtype=index_t,
device="cuda",
)
)
values = torch.rand(
int(lengths.sum().item()), num_cols, dtype=scalar_t, device="cuda"
)
values.requires_grad = True
indices[batch_size - num_zero_padding :] = 0
time, (output, _) = benchmark_torch_function(
torch.ops.fbgemm.jagged_index_select,
(values, lengths, indices),
num_warmups=10,
iters=100,
)
time_ref, output_ref = benchmark_torch_function(
jagged_index_select_2d_ref,
(values, lengths, indices),
num_warmups=10,
iters=100,
)
logging.info(
f"jagged_index_select_2d_bench "
f"(max_seq_length={max_seq_length}, "
f"batch_size={batch_size}, "
f"num_cols={num_cols}, "
f"num_jagged_tensor_rows={num_jagged_tensor_rows}, "
f"num_zero_padding={num_zero_padding}, "
f"index_dtype={index_dtype}, "
f"jagged_tensor_dtype={jagged_tensor_dtype})"
)
logging.info(f"forward: fbgemm {time * 1e3:.3f} ms, ref {time_ref * 1e3:.3f} ms")
grad = torch.rand_like(output)
time, _ = benchmark_torch_function(
functools.partial(output.backward, retain_graph=True),
(grad,),
num_warmups=10,
iters=100,
)
time_ref, _ = benchmark_torch_function(
functools.partial(output_ref.backward, retain_graph=True),
(grad,),
num_warmups=10,
iters=100,
)
logging.info(f"backward: fbgemm {time * 1e3:.3f} ms, ref {time_ref * 1e3:.3f} ms")
@cli.command()
@click.option("--row-size", default=512)
@click.option("--batch-size", default=4096)
@click.option("--unique-batch-size", default=1024)
@click.option("--input-precision", type=str, default="fp32")
@click.option("--sort-indices", type=bool, default=True)
@click.option("--num-groups", default=32)
def group_index_select_2d_bench(
row_size: int,
batch_size: int,
unique_batch_size: int,
input_precision: str,
sort_indices: bool,
num_groups: int,
) -> None:
def gen_inverse_index(curr_size: int, final_size: int) -> np.array:
inverse_index = list(range(curr_size))
np_arr = np.array(inverse_index)
for _ in range(final_size - curr_size):
inverse_index.append(np.random.randint(0, curr_size))
np_arr = np.array(inverse_index)
np.random.shuffle(np_arr)
return np_arr
dtype = torch.float
if input_precision == "fp32":
dtype = torch.float
elif input_precision == "fp16":
dtype = torch.half
else:
raise RuntimeError(f"Does not support data type {input_precision}")
offset_indices_group = []
indices_group = []
for i in range(num_groups):
indices = torch.cuda.IntTensor(gen_inverse_index(unique_batch_size, batch_size))
if sort_indices:
indices, _ = indices.sort()
indices_group.append(indices)
indices = torch.add(indices, batch_size * i)
offset_indices_group.append(indices)
offset_indices = torch.concat(offset_indices_group)
input = torch.rand(num_groups * batch_size, row_size, dtype=dtype, device="cuda")
input.requires_grad = True
num_bytes = 2 * batch_size * row_size * input.element_size() * num_groups
bench_kwargs = {"num_warmups": 10, "iters": 100}
# Benchmark forward
time_ref, output_ref = benchmark_torch_function(
torch.index_select, (input, 0, offset_indices), **bench_kwargs
)
input_group = input.split(batch_size, 0)
time, output_group = benchmark_torch_function(
torch.ops.fbgemm.group_index_select_dim0,
(input_group, indices_group),
**bench_kwargs,
)
logging.info(
f"forward: PyTorch batch {time_ref:.5f} sec ({num_bytes / time_ref / 1e9:.5f} GB/s), "
f"fbgemm group {time:5f} sec ({num_bytes / time / 1e9:.5f} GB/s)"
)
# Benchmark backward
grad = torch.rand_like(output_ref)
time_ref, _ = benchmark_torch_function(
functools.partial(output_ref.backward, retain_graph=True),
(grad,),
**bench_kwargs,
)
cat_output = torch.cat(output_group)
time, _ = benchmark_torch_function(
functools.partial(cat_output.backward, retain_graph=True),
(grad,),
**bench_kwargs,
)
logging.info(
f"backward: PyTorch batch {time_ref:.5f} sec ({num_bytes / time_ref / 1e9:.5f} GB/s), "
f"fbgemm group {time:.5f} sec ({num_bytes / time / 1e9:.5f} GB/s)"
)
@cli.command()
@click.option("--num-vecs", default=2048)
@click.option("--num-entries-per-vec", default=1024)
@click.option("--dtype", type=str, default="long")
def asynchronous_complete_cumsum_2d_bench(
num_vecs: int,
num_entries_per_vec: int,
dtype: str,
) -> None:
# Reference code from TorchRec https://github.com/pytorch/torchrec/pull/332
@torch.jit.script
def asynchronous_complete_cumsum_2d_ref(lengths: torch.Tensor) -> torch.Tensor:
(f, b) = lengths.shape
offsets_0 = lengths.new_zeros((f, 1))
offsets_1 = torch.cumsum(lengths, dim=-1).to(lengths.dtype)
offsets = torch.cat([offsets_0, offsets_1], dim=-1)
return offsets
assert dtype == "int" or dtype == "long", "Only int and long are supported"
index_dtype = torch.int64 if dtype == "long" else torch.int32
x = torch.randint(low=0, high=100, size=(num_vecs, num_entries_per_vec)).type(
index_dtype
)
x = x.cuda()
time_ref, _ = benchmark_torch_function(
asynchronous_complete_cumsum_2d_ref, (x,), num_warmups=100, iters=1000
)
time, _ = benchmark_torch_function(
torch.ops.fbgemm.asynchronous_complete_cumsum, (x,), num_warmups=100, iters=1000
)
logging.info(
f"asynchronous_complete_cumsum_2d_bench: input shape {x.shape}, dtype {dtype}"
)
logging.info(f"ref time: {time_ref:.5f} sec")
logging.info(f"fbgemm_gpu time: {time:.5f} sec")
@cli.command()
@click.option("--batch-size", default=8192)
@click.option("--table-size", default=20)
@click.option("--length", default=50)
@click.option("--num-ads", default=100)
@click.option("--dtype", type=click.Choice(["float", "long"]), default="long")
@click.option("--itype", type=click.Choice(["int", "long"]), default="int")
@click.option("--broadcast-indices", type=bool, default=True)
@click.option("--device", type=str, default="cpu")
def reorder_batched_ad_indices_bench(
batch_size: int,
table_size: int,
length: int,
num_ads: int,
dtype: str,
itype: str,
broadcast_indices: bool,
device: str,
) -> None:
assert dtype == "float" or dtype == "long", "Only int and long are supported"
data_type = torch.int64 if dtype == "long" else torch.float
data_size = 8 if dtype == "long" else 4
assert itype == "int" or itype == "long", "Only int and long are supported"
index_type = torch.int64 if itype == "long" else torch.int32
if broadcast_indices:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(batch_size * table_size * length,),
)
.int()
.to(device)
.to(data_type)
)
cat_ad_lengths = (
torch.cat(
[
torch.tensor([length for _ in range(table_size)])
for _ in range(batch_size)
],
0,
)
.int()
.to(device)
)
else:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(batch_size * table_size * num_ads * length,),
)
.int()
.to(device)
.to(data_type)
)
cat_ad_lengths = (
torch.cat(
[
torch.tensor([length for _ in range(table_size * num_ads)])
for _ in range(batch_size)
],
0,
)
.int()
.to(device)
)
batch_offsets = (
torch.tensor([num_ads * b for b in range(batch_size + 1)]).int().cuda()
).to(device)
num_ads_in_batch = batch_size * num_ads
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
).to(device)
cat_ad_offsets = (
torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
.to(index_type)
.to(device)
)
reordered_cat_ad_offsets = (
torch.ops.fbgemm.asynchronous_complete_cumsum(reordered_cat_ad_lengths)
.to(index_type)
.to(device)
)
time, _ = benchmark_torch_function(
torch.ops.fbgemm.reorder_batched_ad_indices,
(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
),
num_warmups=100,
iters=1000,
)
num_bytes = batch_size * table_size * (num_ads + 1) * length * data_size
logging.info(
f"fbgemm_gpu time: {time * 1000:.5f} ms ({num_bytes / time / 1e9:.5f} GB/s)"
)
@cli.command()
@click.option("--batch-size", default=8192)
@click.option("--table-size", default=20)
@click.option("--length", default=50)
@click.option("--num-ads", default=100)
@click.option("--broadcast-indices", type=bool, default=True)
@click.option("--device", type=str, default="cpu")
def reorder_batched_ad_lengths_bench(
batch_size: int,
table_size: int,
length: int,
num_ads: int,
broadcast_indices: bool,
device: str,
) -> None:
if broadcast_indices:
cat_ad_lengths = (
torch.cat(
[
torch.tensor([length for _ in range(table_size)])
for _ in range(batch_size)
],
0,
)
.int()
.to(device)
)
else:
cat_ad_lengths = (
torch.cat(
[
torch.tensor([length for _ in range(table_size * num_ads)])
for _ in range(batch_size)
],
0,
)
.int()
.to(device)
)
batch_offsets = (
torch.tensor([num_ads * b for b in range(batch_size + 1)]).int().cuda()
).to(device)
num_ads_in_batch = batch_size * num_ads
time, _ = benchmark_torch_function(
torch.ops.fbgemm.reorder_batched_ad_lengths,
(
cat_ad_lengths,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
),
num_warmups=100,
iters=1000,
)
num_bytes = batch_size * table_size * (num_ads + 1) * length * 4
logging.info(
f"fbgemm_gpu time: {time * 1000:.5f} ms ({num_bytes / time / 1e9:.5f} GB/s)"
)
@cli.command()
@click.option("--num-inputs", default=1024)
@click.option("--rows", default=100)
@click.option("--columns", default=128)
@click.option("--num-indices", default=2048)
@click.option("--timeline", is_flag=True, default=False)
def index_select_bench(
num_inputs: int, rows: int, columns: int, num_indices: int, timeline: bool
) -> None:
input_rows = [rows] * num_inputs
input_columns = [columns] * num_inputs
input_num_indices = [num_indices] * num_inputs
inputs = [
torch.rand(rows, cols, dtype=torch.float, device="cuda")
for rows, cols in zip(input_rows, input_columns)
]
for i in range(len(inputs)):
inputs[i].requires_grad = True
indices = [
torch.randint(low=0, high=rows, size=(num,), dtype=torch.long, device="cuda")
for num, rows in zip(input_num_indices, input_rows)
]
concat_inputs = torch.concat([input.flatten().clone().detach() for input in inputs])
concat_inputs.requires_grad = True
concat_indices = torch.concat(indices)
gis_inputs = [input.clone().detach() for input in inputs]
for i in range(len(gis_inputs)):
gis_inputs[i].requires_grad = True
# Add optimizer to perform zero grad in order to reset gradients
# before the accumulation phase
optim_index: torch.optim.Optimizer = torch.optim.SGD(inputs, lr=0.1)
optim_batch: torch.optim.Optimizer = torch.optim.SGD([concat_inputs], lr=0.1)
optim_group: torch.optim.Optimizer = torch.optim.SGD(gis_inputs, lr=0.1)
def index_select_fwd_ref(
inputs: List[torch.Tensor], indices: List[torch.Tensor]
) -> List[torch.Tensor]:
outputs = []
for input, index in zip(inputs, indices):
optim_index.zero_grad()
outputs.append(torch.index_select(input, 0, index))
return outputs
def index_select_bwd_ref(
outputs: List[torch.Tensor], grads: List[torch.Tensor]
) -> None:
for output, grad in zip(outputs, grads):
optim_index.zero_grad()
output.backward(grad, retain_graph=True)
def batch_index_select_fwd(
concat_inputs: List[torch.Tensor],
concat_indices: List[int],
input_num_indices: List[int],
input_rows: List[int],
input_columns: List[int],
) -> torch.autograd.Variable:
optim_batch.zero_grad()
return torch.ops.fbgemm.batch_index_select_dim0(
concat_inputs, concat_indices, input_num_indices, input_rows, input_columns
)
def group_index_select_fwd(
gis_inputs: List[torch.Tensor], indices: List[int]
) -> torch.autograd.Variable:
optim_group.zero_grad()
return torch.ops.fbgemm.group_index_select_dim0(gis_inputs, indices)
def batch_group_index_select_bwd(
output: torch.autograd.Variable,
grads: List[torch.Tensor],
optim: torch.optim.Optimizer,
) -> torch.autograd.Variable:
optim.zero_grad()
return output.backward(grads, retain_graph=True)
bench_kwargs = {"num_warmups": 10, "iters": 10 if timeline else 100}
profile_ctx = profile if timeline else contextlib.nullcontext
with profile_ctx() as prof:
time_pyt, out_pyt = benchmark_torch_function(
index_select_fwd_ref,
(inputs, indices),
**bench_kwargs,
)
time_bis, out_bis = benchmark_torch_function(
batch_index_select_fwd,
(
concat_inputs,
concat_indices,
input_num_indices,
input_rows,
input_columns,
),
**bench_kwargs,
)
time_gis, out_gis = benchmark_torch_function(
group_index_select_fwd,
(gis_inputs, indices),
**bench_kwargs,
)
if timeline:
prof.export_chrome_trace("index_select_fwd_trace.json")
grads = [torch.rand_like(out) for out in out_pyt]
concat_grads = torch.concat([grad.flatten() for grad in grads])
concat_out_gis = torch.concat([out.flatten() for out in out_gis])
with profile_ctx() as prof:
time_bwd_pyt, _ = benchmark_torch_function(
index_select_bwd_ref,
(out_pyt, grads),
**bench_kwargs,
)
time_bwd_bis, _ = benchmark_torch_function(
batch_group_index_select_bwd,
(
out_bis,
concat_grads,
optim_batch,
),
**bench_kwargs,
)
time_bwd_gis, _ = benchmark_torch_function(
batch_group_index_select_bwd,
(
concat_out_gis,
concat_grads,
optim_group,
),
**bench_kwargs,
)
if timeline:
prof.export_chrome_trace("index_select_bwd_trace.json")
logging.info(
f"torch.index_select forward {time_pyt * 1e6:.2f} us, backward {time_bwd_pyt * 1e6:.2f} us\n"
f"torch.ops.fbgemm.batch_index_select forward {time_bis * 1e6:.2f} us, backward {time_bwd_bis * 1e6:.2f} us\n"
f"torch.ops.fbgemm.group_index_select_dim0 forward {time_gis * 1e6:.2f} us, backward {time_bwd_gis * 1e6:.2f} us"
)
@cli.command()
@click.option("--batch-size", default=8192)
@click.option("--table-size", default=20)
@click.option("--length", default=50)
@click.option("--num-ads", default=100)
@click.option("--dtype", type=click.Choice(["float", "long"]), default="long")
@click.option("--itype", type=click.Choice(["int", "long"]), default="int")
@click.option("--broadcast-indices", type=bool, default=True)
def cat_reorder_batched_ad_indices_bench(
batch_size: int,
table_size: int,
length: int,
num_ads: int,
dtype: str,
itype: str,
broadcast_indices: bool,
) -> None:
assert dtype == "float" or dtype == "long", "Only int and long are supported"
data_type = torch.int64 if dtype == "long" else torch.float
data_size = 8 if dtype == "long" else 4
assert itype == "int" or itype == "long", "Only int and long are supported"
if broadcast_indices:
ad_indices = [
(
torch.randint(
low=0,
high=100,
size=(table_size * length,),
)
.int()
.to(data_type)
)
for _ in range(batch_size)
]
ad_lengths = [
torch.tensor([length for _ in range(table_size)]).int()
for _ in range(batch_size)
]
else:
ad_indices = [
(
torch.randint(
low=0,
high=100,
size=(table_size * num_ads * length,),
)
.int()
.to(data_type)
)
for _ in range(batch_size)
]
ad_lengths = [
torch.tensor([length for _ in range(table_size * num_ads)]).int()
for _ in range(batch_size)
]
batch_offsets = torch.tensor([num_ads * b for b in range(batch_size + 1)]).int()
num_ads_in_batch = batch_size * num_ads
# pyre-ignore
def pass_1(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
cat_ad_lengths = torch.cat(ad_lengths, 0).to("cuda", non_blocking=True)
cat_ad_indices = torch.cat(ad_indices, 0).to("cuda", non_blocking=True)
batch_offsets = batch_offsets.to("cuda", non_blocking=True)
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
)
return reordered_cat_ad_indices, reordered_cat_ad_lengths
# process length on device and process indice on device
# pyre-ignore
def pass_2(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
cat_ad_lengths = torch.cat(ad_lengths, 0)
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
)
cat_ad_indices = torch.cat(ad_indices, 0)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets.to("cuda", non_blocking=True),
cat_ad_indices.to("cuda", non_blocking=True),
reordered_cat_ad_offsets.to("cuda", non_blocking=True),
batch_offsets.to("cuda", non_blocking=True),
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
)
return reordered_cat_ad_indices, reordered_cat_ad_lengths.to(
"cuda", non_blocking=True
)
# minimize GPU workload + unfused cat + reorder
# pyre-ignore
def pass_3(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
cat_ad_lengths = torch.cat(ad_lengths, 0)
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
)
cat_ad_indices = torch.cat(ad_indices, 0)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
)
return reordered_cat_ad_indices.to(
"cuda", non_blocking=True
), reordered_cat_ad_lengths.to("cuda", non_blocking=True)
# minimize GPU workload + fuse cat + reorder
# pyre-ignore
def pass_4(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
cat_ad_lengths = torch.cat(ad_lengths, 0)
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(cat_ad_lengths)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
)
reordered_cat_ad_indices = torch.ops.fbgemm.cat_reorder_batched_ad_indices(
cat_ad_offsets,
ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
batch_size * table_size * num_ads * length,
)
return reordered_cat_ad_indices.to(
"cuda", non_blocking=True
), reordered_cat_ad_lengths.to("cuda", non_blocking=True)
num_bytes = batch_size * table_size * (num_ads + 1) * length * data_size
# pyre-ignore
def ben(fn, name, ad_indices, ad_lengths, batch_offsets, num_ads_in_batch):
time, _ = benchmark_torch_function(
fn,
(ad_indices, ad_lengths, batch_offsets, num_ads_in_batch),
num_warmups=50,
iters=500,
)
logging.info(
f"{name} fbgemm_gpu time: {time * 1000:.5f} ms ({num_bytes / time / 1e9:.5f} GB/s)"
)
ben(pass_1, "pass_1", ad_indices, ad_lengths, batch_offsets, num_ads_in_batch)
ben(pass_2, "pass_2", ad_indices, ad_lengths, batch_offsets, num_ads_in_batch)
ben(pass_3, "pass_3", ad_indices, ad_lengths, batch_offsets, num_ads_in_batch)
ben(pass_4, "pass_4", ad_indices, ad_lengths, batch_offsets, num_ads_in_batch)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import random
import click
import fbgemm_gpu
import hypothesis.strategies as st
import torch
from hypothesis import given, settings
logging.basicConfig(level=logging.DEBUG)
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from bench_utils import benchmark_torch_function
else:
from fbgemm_gpu.bench.bench_utils import benchmark_torch_function
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
@click.group()
def cli() -> None:
pass
def bench_impl(
flush_gpu_cache_size_mb: int,
iters: int,
num_columns: int,
num_rows: int,
warmup_runs: int,
) -> None:
average_time = {
"int8_quant": 0.0,
"int4_quant": 0.0,
"int2_quant": 0.0,
"fp8_143_quant": 0.0,
"fp8_152_quant": 0.0,
"fp16_quant": 0.0,
"bf16_quant_fbgemm": 0.0,
"bf16_quant_pytorch": 0.0,
"int8_dequant": 0.0,
"int4_dequant": 0.0,
"int2_dequant": 0.0,
"fp8_143_dequant": 0.0,
"fp8_152_dequant": 0.0,
"fp16_dequant": 0.0,
"bf16_dequant_fbgemm": 0.0,
"bf16_dequant_pytorch": 0.0,
}
benchmark = functools.partial(
benchmark_torch_function,
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
num_warmups=warmup_runs,
)
input_data = torch.rand(num_rows, num_columns).float()
if torch.cuda.is_available():
input_data = input_data.cuda()
quant_data_8bit = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(input_data)
quant_data_4bit = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data, 4
)
quant_data_2bit = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data, 2
)
quant_data_fp8_143 = torch.ops.fbgemm.FloatToHFP8Quantized(
input_data.contiguous(), 4, 14, (2 - 2 ** (-3))
)
quant_data_fp8_152 = torch.ops.fbgemm.FloatToHFP8Quantized(
input_data, 5, 30, (2 - 2 ** (-2))
)
quant_data_fp16 = input_data.half()
quant_data_bf16_fbgemm = torch.ops.fbgemm.FloatToBfloat16Quantized(
input_data.contiguous()
)
quant_data_bf16_pytorch = input_data.bfloat16().view(torch.half)
average_time["int8_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized,
(input_data,),
)
average_time["int4_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf,
(input_data, 4),
)
average_time["int2_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf,
(input_data, 2),
)
average_time["fp8_143_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToHFP8Quantized,
(input_data, 4, 14, (2 - 2 ** (-3))),
)
average_time["fp8_152_quant"], _ = benchmark(
torch.ops.fbgemm.FloatToHFP8Quantized,
(input_data, 5, 30, (2 - 2 ** (-2))),
)
average_time["fp16_quant"], _ = benchmark(
lambda tensor: tensor.half(),
(input_data,),
)
average_time["bf16_quant_fbgemm"], _ = benchmark(
torch.ops.fbgemm.FloatToBfloat16Quantized,
(input_data,),
)
average_time["bf16_quant_pytorch"], _ = benchmark(
lambda tensor: tensor.bfloat16().view(torch.half),
(input_data,),
)
average_time["int8_dequant"], _ = benchmark(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat,
(quant_data_8bit,),
)
average_time["int4_dequant"], _ = benchmark(
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat,
(quant_data_4bit, 4),
)
average_time["int2_dequant"], _ = benchmark(
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat,
(quant_data_2bit, 2),
)
average_time["fp8_143_dequant"], _ = benchmark(
torch.ops.fbgemm.HFP8QuantizedToFloat,
(quant_data_fp8_143, 4, 14),
)
average_time["fp8_152_dequant"], _ = benchmark(
torch.ops.fbgemm.HFP8QuantizedToFloat,
(quant_data_fp8_152, 5, 30),
)
average_time["fp16_dequant"], _ = benchmark(
lambda tensor: tensor.float(),
(quant_data_fp16,),
)
average_time["bf16_dequant_fbgemm"], _ = benchmark(
torch.ops.fbgemm.Bfloat16QuantizedToFloat,
(quant_data_bf16_fbgemm,),
)
average_time["bf16_dequant_pytorch"], _ = benchmark(
lambda tensor: tensor.view(torch.bfloat16).float(),
(quant_data_bf16_pytorch,),
)
logging.info(f"-------------- ncols={num_columns}, nrows={num_rows}-------------")
for k, t_time in average_time.items():
logging.info(f"{k} time per iter: {t_time * 1.0e6:.0f}us")
@settings(max_examples=10, deadline=None)
# pyre-ignore
@given(
num_columns=st.sampled_from([2**n for n in range(4, 10)]),
num_rows=st.sampled_from([2**n for n in range(4, 10)]),
)
def bench_spectrum(
flush_gpu_cache_size_mb: int,
iters: int,
num_columns: int,
num_rows: int,
warmup_runs: int,
) -> None:
bench_impl(
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
num_columns=num_columns,
num_rows=num_rows,
warmup_runs=warmup_runs,
)
@cli.command()
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--iters", default=100)
@click.option("--num-columns", default=-1)
@click.option("--num-rows", default=-1)
@click.option("--warmup-runs", default=2)
def bench(
flush_gpu_cache_size_mb: int,
iters: int,
num_columns: int,
num_rows: int,
warmup_runs: int,
) -> None:
if num_columns == -1 or num_rows == -1:
bench_spectrum(
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
warmup_runs=warmup_runs,
)
else:
bench_impl(
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
num_columns=num_columns,
num_rows=num_rows,
warmup_runs=warmup_runs,
)
@cli.command()
@click.option("--flush-gpu-cache-size-mb", default=0)
@click.option("--iters", default=100)
@click.option("--batch_size", default=512)
@click.option("--num_tables", default=256)
@click.option("--min_dim", default=1)
@click.option("--max_dim", default=128)
@click.option("--warmup-runs", default=2)
def mixdim(
flush_gpu_cache_size_mb: int,
iters: int,
batch_size: int,
num_tables: int,
min_dim: int,
max_dim: int,
warmup_runs: int,
) -> None:
if not torch.cuda.is_available():
raise RuntimeError("CUDA is not available.")
random.seed(0)
table_dims = [
random.randint(min_dim, max_dim) * 8 for _ in range(num_tables)
] # assume table dimensions are multiples of 8
table_dims_with_qparams = [d + 8 for d in table_dims]
D_offsets = (
torch.cumsum(torch.tensor([0] + table_dims_with_qparams), dim=0)
.to(torch.int)
.cuda()
)
input_refs = [torch.randn((batch_size, d)).cuda() for d in table_dims]
input_refs_int8 = [
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(t) for t in input_refs
]
input_data = torch.concat(input_refs_int8, dim=1).contiguous()
benchmark = functools.partial(
benchmark_torch_function,
flush_gpu_cache_size_mb=flush_gpu_cache_size_mb,
iters=iters,
num_warmups=warmup_runs,
)
average_time_mixed_dim_fp32, _ = benchmark(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim,
(
input_data,
D_offsets,
0,
),
) # output is FP32
average_time_mixed_dim_fp16, _ = benchmark_torch_function(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim,
(
input_data,
D_offsets,
1,
),
) # output is FP16
average_time_single_dim, _ = benchmark(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat,
(input_data,),
) # output is FP32
print(
f"Input tensor batch_size: {batch_size}, num_tables: {num_tables}, tensor_size: {input_data.numel() / (1 << 30)} GB, average table dimension: {sum(table_dims) * 1.0/num_tables}."
)
print(
f"Mixed dim dequantize average time per iter FP32: {average_time_mixed_dim_fp32} s, bandwidth : {input_data.numel() / (1 << 30) / average_time_mixed_dim_fp32} GB/s."
)
print(
f"Mixed dim dequantize average time per iter FP16: {average_time_mixed_dim_fp16} s, bandwidth : {input_data.numel() / (1 << 30) / average_time_mixed_dim_fp16} GB/s."
)
print(
f"Single dim dequantize average time per iter FP32: {average_time_single_dim} s, bandwidth: {input_data.numel() / (1 << 30) / average_time_single_dim} GB/s."
)
if __name__ == "__main__":
cli()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
from typing import Callable, Tuple
import click
import torch
from torch import Tensor
logging.basicConfig(level=logging.DEBUG)
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
def benchmark_hbc_function(
func: Callable[[Tensor], Tuple[Tensor, Tensor]],
input: Tensor,
) -> Tuple[float, Tensor]:
if input.is_cuda:
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
# Benchmark code
output, _ = func(input)
# Accumulate the time for iters iteration
end_event.record()
torch.cuda.synchronize()
elapsed_time = start_event.elapsed_time(end_event) * 1.0e-3
else:
start_time = time.time()
output, _ = func(input)
elapsed_time = time.time() - start_time
return float(elapsed_time), output
@click.command()
@click.option("--iters", default=100)
@click.option("--warmup-runs", default=2)
def main(
iters: int,
warmup_runs: int,
) -> None:
data_types = [torch.half, torch.float, torch.double]
total_time = {
"hbc": {
"cpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
"gpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
},
"hbc_by_feature": {
"cpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
"gpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
},
"generic_hbc_by_feature": {
"cpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
"gpu": {
torch.half: 0.0,
torch.float: 0.0,
torch.double: 0.0,
},
},
}
num_bins: int = 5000
num_segments: int = 42
num_logits = 5000
input_data_cpu = torch.rand(num_logits, dtype=torch.float)
segment_lengths: Tensor = torch.randint(0, 2, (num_logits,))
num_values: int = int(torch.sum(segment_lengths).item())
segment_values: Tensor = torch.randint(
0,
num_segments,
(num_values,),
)
lower_bound: float = 0.0
upper_bound: float = 1.0
w: float = (upper_bound - lower_bound) / num_bins
bin_num_examples: Tensor = torch.empty([num_bins], dtype=torch.float64).fill_(0.0)
bin_num_positives: Tensor = torch.empty([num_bins], dtype=torch.float64).fill_(0.0)
bin_boundaries: Tensor = torch.arange(
lower_bound + w, upper_bound - w / 2, w, dtype=torch.float64
)
by_feature_bin_num_examples: Tensor = torch.empty(
[num_bins * (num_segments + 1)], dtype=torch.float64
).fill_(0.0)
by_feature_bin_num_positives: Tensor = torch.empty(
[num_bins * (num_segments + 1)], dtype=torch.float64
).fill_(0.0)
def fbgemm_hbc_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.histogram_binning_calibration(
input,
bin_num_examples,
bin_num_positives,
0.4,
lower_bound,
upper_bound,
0,
0.9995,
)
def fbgemm_hbc_by_feature_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.histogram_binning_calibration_by_feature(
input,
segment_values,
segment_lengths,
num_segments,
by_feature_bin_num_examples,
by_feature_bin_num_positives,
num_bins,
0.4,
lower_bound,
upper_bound,
0,
0.9995,
)
def fbgemm_generic_hbc_by_feature_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
input,
segment_values,
segment_lengths,
num_segments,
by_feature_bin_num_examples,
by_feature_bin_num_positives,
bin_boundaries,
0.4,
0,
0.9995,
)
for step in range(iters + warmup_runs):
for data_type in data_types:
curr_input = input_data_cpu.to(data_type)
hbc_time, _ = benchmark_hbc_function(
fbgemm_hbc_cpu,
curr_input,
)
hbc_by_feature_time, _ = benchmark_hbc_function(
fbgemm_hbc_by_feature_cpu, curr_input
)
generic_hbc_by_feature_time, _ = benchmark_hbc_function(
fbgemm_generic_hbc_by_feature_cpu, curr_input
)
if step >= warmup_runs:
total_time["hbc"]["cpu"][data_type] += hbc_time
total_time["hbc_by_feature"]["cpu"][data_type] += hbc_by_feature_time
total_time["generic_hbc_by_feature"]["cpu"][
data_type
] += generic_hbc_by_feature_time
if torch.cuda.is_available():
bin_num_examples_gpu: Tensor = bin_num_examples.cuda()
bin_num_positives_gpu: Tensor = bin_num_positives.cuda()
def fbgemm_hbc_gpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.histogram_binning_calibration(
input,
bin_num_examples_gpu,
bin_num_positives_gpu,
0.4,
lower_bound,
upper_bound,
0,
0.9995,
)
segment_values_gpu: Tensor = segment_values.cuda()
segment_lengths_gpu: Tensor = segment_lengths.cuda()
by_feature_bin_num_examples_gpu: Tensor = by_feature_bin_num_examples.cuda()
by_feature_bin_num_positives_gpu: Tensor = (
by_feature_bin_num_positives.cuda()
)
def fbgemm_hbc_by_feature_gpu(input: Tensor) -> Tuple[Tensor, Tensor]:
return torch.ops.fbgemm.histogram_binning_calibration_by_feature(
input,
segment_values_gpu,
segment_lengths_gpu,
num_segments,
by_feature_bin_num_examples_gpu,
by_feature_bin_num_positives_gpu,
num_bins,
0.4,
lower_bound,
upper_bound,
0,
0.9995,
)
bin_boundaries_gpu: Tensor = bin_boundaries.cuda()
def fbgemm_generic_hbc_by_feature_gpu(
input: Tensor,
) -> Tuple[Tensor, Tensor]:
return (
torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
input,
segment_values_gpu,
segment_lengths_gpu,
num_segments,
by_feature_bin_num_examples_gpu,
by_feature_bin_num_positives_gpu,
bin_boundaries_gpu,
0.4,
0,
0.9995,
)
)
for data_type in data_types:
curr_input_gpu = input_data_cpu.cuda().to(data_type)
hbc_time, _ = benchmark_hbc_function(
fbgemm_hbc_gpu,
curr_input_gpu,
)
hbc_by_feature_time, _ = benchmark_hbc_function(
fbgemm_hbc_by_feature_gpu,
curr_input_gpu,
)
generic_hbc_by_feature_time, _ = benchmark_hbc_function(
fbgemm_generic_hbc_by_feature_gpu,
curr_input_gpu,
)
if step >= warmup_runs:
total_time["hbc"]["gpu"][data_type] += hbc_time
total_time["hbc_by_feature"]["gpu"][
data_type
] += hbc_by_feature_time
total_time["generic_hbc_by_feature"]["gpu"][
data_type
] += generic_hbc_by_feature_time
for op, curr_items in total_time.items():
for platform, data_items in curr_items.items():
for dtype, t_time in data_items.items():
logging.info(
f"{op}_{platform}_{dtype} time per iter: {t_time / iters * 1.0e6:.0f}us"
)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import re
import subprocess
import click
logging.basicConfig(level=logging.DEBUG)
@click.command()
@click.option(
"--benchmark-command",
default="python split_table_batched_embeddings_benchmark.py",
help="Benchmark command to run",
)
@click.option(
"--command-file",
default="batch_input.txt",
help="File containing input commands to evaluate",
)
def batch_benchmark(
benchmark_command: str,
command_file: str,
) -> None:
assert (
"split_table_batched_embeddings_benchmark" in benchmark_command
), "split_table_batched_embeddings benchmark required for execution"
benchmark_cmd = benchmark_command.strip().split()
cmds_run = 0
failed_runs = []
total_fwd_bytes_read_gb = 0
total_fwdbwd_bytes_read_gb = 0
total_fwd_time_us = 0
total_fwdbwd_time_us = 0
with open(command_file) as cmd_file:
for line in cmd_file:
options = line.replace('"', "").strip().split()
cmd = benchmark_cmd + options
logging.info(f"Running command {cmds_run}: {cmd}")
result = subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
logging.info(result.stdout.decode("utf-8"))
# Parse results
found_fwd_results = False
found_fwdbwd_results = False
for line in result.stdout.decode("utf-8").splitlines():
re_match = re.search(r"BW: ([\.\d]+) GB/s, T: ([\.\d]+)us", line)
if re_match:
bw_gb = float(re_match.groups()[0])
time_us = int(re_match.groups()[1])
total_bytes_read_gb = bw_gb * time_us / 1e6
if "Forward, " in line:
total_fwd_bytes_read_gb += total_bytes_read_gb
total_fwd_time_us += time_us
found_fwd_results = True
elif "ForwardBackward, " in line:
total_fwdbwd_bytes_read_gb += total_bytes_read_gb
total_fwdbwd_time_us += time_us
found_fwdbwd_results = True
else:
raise Exception(
f"Unexpected reported metric for line: '{line}'"
)
if not (found_fwd_results and found_fwdbwd_results):
failed_runs.append(cmds_run)
cmds_run += 1
logging.info(f"Number of commands run: {cmds_run}")
if failed_runs:
logging.info(f"Failed runs: {failed_runs}")
logging.info(
f"Average FWD BW: {total_fwd_bytes_read_gb / total_fwd_time_us * 1e6} GB/s"
)
logging.info(
f" FWDBWD BW: {total_fwdbwd_bytes_read_gb / total_fwdbwd_time_us * 1e6} GB/s"
)
if __name__ == "__main__":
batch_benchmark()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import enum
from typing import Any, Dict # noqa: F401
import torch
@enum.unique
class EmbOptimType(enum.Enum):
SGD = "sgd" # uses non-deterministic updates (atomicAdd(..)) with duplicate ids
EXACT_SGD = (
"exact_sgd" # uses deterministic updates (via sorting + segment reduction)
)
LAMB = "lamb"
ADAM = "adam"
# exact/dedup: gradients to the same row are applied with coalesce then apply
# together, instead of applied in sequence (approx).
EXACT_ADAGRAD = "exact_adagrad"
EXACT_ROWWISE_ADAGRAD = "exact_row_wise_adagrad"
LARS_SGD = "lars_sgd"
PARTIAL_ROWWISE_ADAM = "partial_row_wise_adam"
PARTIAL_ROWWISE_LAMB = "partial_row_wise_lamb"
ROWWISE_ADAGRAD = "row_wise_adagrad"
SHAMPOO = "shampoo" # not currently supported for sparse embedding tables
MADGRAD = "madgrad"
EXACT_ROWWISE_WEIGHTED_ADAGRAD = "exact_row_wise_weighted_adagrad"
NONE = "none"
def __str__(self) -> str:
return self.value
# Base class for quantization configuration (in case other numeric types have
# configs)
class QuantizationConfig:
def __init__(self) -> None:
self.config = {} # type: Dict[str, Any]
def get(self, name: str) -> int:
return -1
# FP8 quantization configuration
# Compute necessary parameters in the constructor
class FP8QuantizationConfig(QuantizationConfig):
def __init__(self, exponent_bits: int, exponent_bias: int) -> None:
super(FP8QuantizationConfig, self).__init__()
self.config = {
"exponent_bits": exponent_bits,
"exponent_bias": exponent_bias,
"max_position": (1 << ((1 << exponent_bits) - 2 - exponent_bias))
* (2 - 2 ** (exponent_bits - 7)),
} # type: Dict[str, Any]
def get(self, name: str) -> int:
if name not in self.config:
raise RuntimeError("{} must be set in config".format(name))
return self.config[name]
@enum.unique
class SparseType(enum.Enum):
FP32 = "fp32"
FP16 = "fp16"
FP8 = "fp8"
INT8 = "int8"
INT4 = "int4"
INT2 = "int2"
BF16 = "bf16"
def __str__(self) -> str:
return self.value
@staticmethod
def from_int(ty: int) -> "SparseType":
if ty == 0:
return SparseType("fp32")
elif ty == 1:
return SparseType("fp16")
elif ty == 2:
return SparseType("int8")
elif ty == 3:
return SparseType("int4")
elif ty == 4:
return SparseType("int2")
elif ty == 5:
return SparseType("bf16")
elif ty == 6:
return SparseType("fp8")
else:
raise ValueError(f"Unsupported sparse type: {ty}")
def as_int(self) -> int:
return {
SparseType.FP32.value: 0,
SparseType.FP16.value: 1,
SparseType.INT8.value: 2,
SparseType.INT4.value: 3,
SparseType.INT2.value: 4,
SparseType.BF16.value: 5,
SparseType.FP8.value: 6,
}[self.value]
@staticmethod
def from_dtype(dtype: torch.dtype) -> "SparseType":
if dtype == torch.float32:
return SparseType("fp32")
elif dtype == torch.float16:
return SparseType("fp16")
elif dtype == torch.int8 or dtype == torch.uint8:
return SparseType("int8")
elif dtype == torch.quint4x2:
return SparseType("int4")
elif dtype == torch.quint2x4:
return SparseType("int2")
elif dtype == torch.bfloat16:
return SparseType("bf16")
else:
raise ValueError(f"Unsupported sparse dtype: {dtype}")
def as_dtype(self) -> torch.dtype:
return {
SparseType.FP32.value: torch.float32,
SparseType.FP16.value: torch.float16,
SparseType.FP8.value: torch.uint8,
SparseType.INT8.value: torch.uint8,
SparseType.INT4.value: torch.quint4x2,
SparseType.INT2.value: torch.quint2x4,
SparseType.BF16.value: torch.bfloat16,
}[self.value]
def bit_rate(self) -> int:
return {
SparseType.FP32.value: 32,
SparseType.FP16.value: 16,
SparseType.FP8.value: 8,
SparseType.INT8.value: 8,
SparseType.INT4.value: 4,
SparseType.INT2.value: 2,
SparseType.BF16.value: 16,
}[self.value]
def align_size(self) -> int:
return {
SparseType.FP32.value: 1,
SparseType.FP16.value: 2,
SparseType.FP8.value: 4,
SparseType.INT8.value: 4,
SparseType.INT4.value: 8,
SparseType.INT2.value: 16,
SparseType.BF16.value: 2,
}[self.value]
def is_float(self) -> bool:
if (
self.value == SparseType.FP32.value
or self.value == SparseType.FP16.value
or self.value == SparseType.FP8.value
or self.value == SparseType.BF16.value
):
return True
else:
return False
def default_config(self) -> QuantizationConfig:
if self.value == SparseType.FP8.value:
return FP8QuantizationConfig(4, 7)
else:
return QuantizationConfig()
ELEMENT_SIZE: Dict[SparseType, int] = {
SparseType.FP32: 4,
SparseType.FP16: 2,
SparseType.FP8: 1,
SparseType.INT8: 1,
SparseType.BF16: 2,
# SparseType.INT4: 0.5,
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable
import torch
class BatchAuc(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self,
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
sorted_labels = torch.gather(labels, 1, sorted_indices)
sorted_weights = torch.gather(weights, 1, sorted_indices)
cum_fp = torch.cumsum(sorted_weights * (1.0 - sorted_labels), dim=-1)
cum_tp = torch.cumsum(sorted_weights * sorted_labels, dim=-1)
fac = cum_fp[:, -1] * cum_tp[:, -1]
auc = torch.where(fac == 0, 0.5, torch.trapz(cum_tp, cum_fp, dim=-1) / fac)
return auc
class Auc(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self,
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
aucs = []
for sorted_indices_i, labels_i, weights_i in zip(
sorted_indices, labels, weights
):
sorted_labels = torch.index_select(labels_i, dim=0, index=sorted_indices_i)
sorted_weights = torch.index_select(
weights_i, dim=0, index=sorted_indices_i
)
cum_fp = torch.cumsum(sorted_weights * (1.0 - sorted_labels), dim=0)
cum_tp = torch.cumsum(sorted_weights * sorted_labels, dim=0)
auc = torch.where(
cum_fp[-1] * cum_tp[-1] == 0,
0.5, # 0.5 is the no-signal default value for auc.
torch.trapz(cum_tp, cum_fp) / cum_fp[-1] / cum_tp[-1],
)
aucs.append(auc.view(1))
return torch.cat(aucs)
class AucJiterator(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# Jiterator only works with elementwise kernels
fp_code_string = """
template <typename T> T fp(T weights, T labels) {
return weights * (1.0 - labels);
}"""
tp_code_string = """
template <typename T> T tp(T weights, T labels) {
return weights * labels;
}"""
# pyre-ignore [4]
self.jitted_fp: Callable[..., Any] = torch.cuda.jiterator._create_jit_fn(
fp_code_string
)
# pyre-ignore [4]
self.jitted_tp: Callable[..., Any] = torch.cuda.jiterator._create_jit_fn(
tp_code_string
)
def forward(
self,
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
aucs = []
for sorted_indices_i, labels_i, weights_i in zip(
sorted_indices, labels, weights
):
sorted_labels = torch.index_select(labels_i, dim=0, index=sorted_indices_i)
sorted_weights = torch.index_select(
weights_i, dim=0, index=sorted_indices_i
)
cum_fp = torch.cumsum(self.jitted_fp(sorted_weights, sorted_labels), dim=0)
cum_tp = torch.cumsum(self.jitted_tp(sorted_weights, sorted_labels), dim=0)
auc = torch.where(
cum_fp[-1] * cum_tp[-1] == 0,
0.5, # 0.5 is the no-signal default value for auc.
torch.trapz(cum_tp, cum_fp) / cum_fp[-1] / cum_tp[-1],
)
aucs.append(auc.view(1))
return torch.cat(aucs)
class BatchAucJiterator(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# Jiterator only works with elementwise kernels
fp_code_string = """
template <typename T> T fp(T weights, T labels) {
return weights * (1.0 - labels);
}"""
tp_code_string = """
template <typename T> T tp(T weights, T labels) {
return weights * labels;
}"""
# pyre-ignore [4]
self.jitted_fp: Callable[..., Any] = torch.cuda.jiterator._create_jit_fn(
fp_code_string
)
# pyre-ignore [4]
self.jitted_tp: Callable[..., Any] = torch.cuda.jiterator._create_jit_fn(
tp_code_string
)
def forward(
self,
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
sorted_labels = torch.gather(labels, 1, sorted_indices)
sorted_weights = torch.gather(weights, 1, sorted_indices)
cum_fp = torch.cumsum(self.jitted_fp(sorted_weights, sorted_labels), dim=-1)
cum_tp = torch.cumsum(self.jitted_tp(sorted_weights, sorted_labels), dim=-1)
fac = cum_fp[:, -1] * cum_tp[:, -1]
auc = torch.where(fac == 0, 0.5, torch.trapz(cum_tp, cum_fp, dim=-1) / fac)
return auc
def auc(
n_tasks: int, predictions: torch.Tensor, labels: torch.Tensor, weights: torch.Tensor
) -> torch.Tensor:
_, sorted_indices = torch.sort(predictions, descending=True, dim=-1)
return torch.ops.fbgemm.batch_auc(n_tasks, sorted_indices, labels, weights)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from itertools import accumulate
from typing import List, Optional
import torch
from torch import nn
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_gpu"
)
class PermutePooledEmbeddings(nn.Module):
def __init__(
self,
embs_dims: List[int],
permute: List[int],
device: Optional[torch.device] = None,
) -> None:
super(PermutePooledEmbeddings, self).__init__()
logging.info("Using Permute Pooled Embeddings")
self.register_buffer(
"_offset_dim_list",
torch.tensor(
[0] + list(accumulate(embs_dims)), device=device, dtype=torch.int64
),
)
self.register_buffer(
"_permute", torch.tensor(permute, device=device, dtype=torch.int64)
)
inv_permute: List[int] = [0] * len(permute)
for i, p in enumerate(permute):
inv_permute[p] = i
self.register_buffer(
"_inv_permute", torch.tensor(inv_permute, device=device, dtype=torch.int64)
)
# `Union[BoundMethod[typing.Callable(torch.Tensor.tolist)[[Named(self,
# torch.Tensor)], List[typing.Any]], torch.Tensor], nn.Module, torch.Tensor]`
# is not a function.
inv_embs_dims = [embs_dims[i] for i in permute]
self.register_buffer(
"_inv_offset_dim_list",
torch.tensor(
[0] + list(accumulate(inv_embs_dims)), device=device, dtype=torch.int64
),
)
def forward(self, pooled_embs: torch.Tensor) -> torch.Tensor:
result = torch.ops.fbgemm.permute_pooled_embs_auto_grad(
pooled_embs,
self._offset_dim_list.to(device=pooled_embs.device),
self._permute.to(device=pooled_embs.device),
self._inv_offset_dim_list.to(device=pooled_embs.device),
self._inv_permute.to(device=pooled_embs.device),
)
return result
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa F401
from fbgemm_gpu.split_embedding_optimizer_codegen.optimizer_args import (
SplitEmbeddingArgs,
SplitEmbeddingOptimizerParams,
)
from fbgemm_gpu.split_embedding_optimizer_codegen.split_embedding_optimizer_rowwise_adagrad import (
SplitEmbeddingRowwiseAdagrad,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import enum
import typing
from typing import Any, Callable, List, Tuple
# Create enums in given namespace with information from query_op
def create_enums(
namespace: typing.Dict[str, Any],
query_op: Callable[[], List[Tuple[str, List[Tuple[str, int]]]]],
) -> None:
for enum_name, items in query_op():
# Create matching python enumeration
# pyre-fixme[6]: For 2nd argument expected `None` but got `List[Tuple[str,
# int]]`.
new_enum = enum.Enum(enum_name, items)
# and store it in the module
namespace[enum_name] = new_enum
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Callable, List, Optional, Tuple, TypeVar
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import (
FP8QuantizationConfig,
SparseType,
) # usort:skip
# pyre-fixme[21]: Could not find name `default_rng` in `numpy.random` (stubbed).
from numpy.random import default_rng
logging.basicConfig(level=logging.DEBUG)
Deviceable = TypeVar(
"Deviceable", torch.nn.EmbeddingBag, torch.nn.Embedding, torch.Tensor
)
def round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
def get_device() -> torch.device:
# pyre-fixme[7]: Expected `device` but got `Union[int, device]`.
return (
torch.cuda.current_device()
if torch.cuda.is_available()
else torch.device("cpu")
)
def to_device(t: Deviceable, use_cpu: bool) -> Deviceable:
# pyre-fixme[7]: Expected `Deviceable` but got `Union[Tensor,
# torch.nn.EmbeddingBag]`.
return t.cpu() if use_cpu else t.cuda()
# Merged indices with shape (T, B, L) -> (flattened indices with shape
# (T * B * L), offsets with shape (T * B + 1))
def get_table_batched_offsets_from_dense(
merged_indices: torch.Tensor,
L: Optional[int] = None,
total_B: Optional[int] = None,
use_cpu: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
if L is None and total_B is None:
(T, B, L) = merged_indices.size()
total_B = T * B
lengths = np.ones(total_B) * L
return (
to_device(merged_indices.contiguous().view(-1), use_cpu),
to_device(
torch.tensor(([0] + np.cumsum(lengths).tolist())).long(),
use_cpu,
),
)
def get_offsets_from_dense(indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
(B, L) = indices.size()
return (
indices.contiguous().view(-1),
torch.tensor(
np.cumsum(np.asarray([0] + [L for _ in range(B)])[:-1]).astype(np.int64)
),
)
def b_indices(
b: Callable[..., torch.Tensor],
x: torch.Tensor,
per_sample_weights: Optional[torch.Tensor] = None,
use_cpu: bool = False,
do_pooling: bool = True,
) -> torch.Tensor:
(indices, offsets) = get_offsets_from_dense(x)
if do_pooling:
return b(
to_device(indices, use_cpu),
to_device(offsets, use_cpu),
per_sample_weights=per_sample_weights,
)
else:
return b(to_device(indices, use_cpu))
def generate_requests( # noqa C901
iters: int,
B: int,
T: int,
L: int,
E: int,
# inter-batch indices reuse rate
reuse: float = 0.0,
# alpha <= 1.0: use uniform distribution
# alpha > 1.0: use zipf distribution
alpha: float = 1.0,
zipf_oversample_ratio: int = 3,
weighted: bool = False,
requests_data_file: Optional[str] = None,
# Comma-separated list of table numbers
tables: Optional[str] = None,
# If sigma_L is not None, treat L as mu_L and generate Ls from sigma_L
# and mu_L
sigma_L: Optional[int] = None,
emulate_pruning: bool = False,
use_cpu: bool = False,
deterministic_output: bool = False, # generate_requests uses numpy.random.default_rng without a set random seed be default, causing the indices tensor to vary with each call to generate_requests - set generate_repeatable_output to use a fixed random seed instead for repeatable outputs
length_dist: str = "normal", # distribution of embedding sequence lengths
) -> List[Tuple[torch.IntTensor, torch.IntTensor, Optional[torch.Tensor]]]:
# TODO: refactor and split into helper functions to separate load from file,
# generate from distribution, and other future methods of generating data
if requests_data_file is not None:
indices_tensor, offsets_tensor, lengths_tensor = torch.load(requests_data_file)
average_L = 0
if tables is not None:
emb_tables = tuple(int(x) for x in tables.split(","))
indices = torch.zeros(0, dtype=indices_tensor.dtype)
offsets = torch.zeros(1, dtype=offsets_tensor.dtype)
total_L = 0
for t in emb_tables:
t_offsets = offsets_tensor[B * t : B * (t + 1) + 1]
total_L += t_offsets[-1] - t_offsets[0]
indices = torch.cat(
(indices, indices_tensor[t_offsets[0] : t_offsets[-1]])
)
offsets = torch.cat(
(
offsets,
t_offsets[1:] - t_offsets[0] + offsets[-1],
)
)
indices_tensor = indices
offsets_tensor = offsets
average_L = int(total_L / B)
assert np.prod(offsets_tensor.size()) - 1 == np.prod((T, B)), (
f"Requested tables: {emb_tables} "
f"does not conform to inputs (T, B) = ({T}, {B})."
)
logging.warning(
f"Using (indices = {indices_tensor.size()}, offsets = {offsets_tensor.size()}) based "
f"on tables: {emb_tables}"
)
else:
average_L = int((offsets_tensor[-1] - offsets_tensor[0]) / B)
assert (np.prod(offsets_tensor.size()) - 1) == np.prod((T, B)), (
f"Data file (indices = {indices_tensor.size()}, "
f"offsets = {offsets_tensor.size()}, lengths = {lengths_tensor.size()}) "
f"does not conform to inputs (T, B) = ({T}, {B})."
)
assert (
L == average_L
), f"Requested L does not align with provided data file ({L} vs. {average_L})"
assert E > max(indices_tensor), (
f"Number of embeddings is not enough to support maximum index "
f"provided by data file {E} vs. {max(indices_tensor)}"
)
weights_tensor = (
None
if not weighted
else torch.randn(indices_tensor.size(), device=get_device())
)
rs = []
for _ in range(iters):
rs.append(
(
indices_tensor.to(get_device()),
offsets_tensor.to(get_device()),
weights_tensor,
)
)
return rs
# Generate L from stats
if sigma_L is not None:
use_variable_L = True
if length_dist == "uniform":
# TODO: either make these separate parameters or make a separate version of
# generate_requests to handle the uniform dist case once whole
# generate_requests function is refactored to split into helper functions
# for each use case.
# L represents the lower bound when the uniform distribution is used
lower_bound = L
# sigma_L represetns the upper bound when the uniform distribution is used
upper_bound = sigma_L + 1
Ls = np.random.randint(
lower_bound,
upper_bound,
(T, B),
dtype=np.int32,
)
else: # normal dist
Ls = np.random.normal(loc=L, scale=sigma_L, size=T * B).astype(int)
# Make sure that Ls are positive
Ls[Ls < 0] = 0
# Use the same L distribution across iters
Ls = np.tile(Ls, iters)
L = Ls.max()
# Make it exclusive cumsum
L_offsets = torch.from_numpy(np.insert(Ls.cumsum(), 0, 0)).to(torch.long)
else:
use_variable_L = False
# Init to suppress the pyre error
L_offsets = torch.empty(1)
if alpha <= 1.0:
all_indices = torch.randint(
low=0,
high=E,
size=(iters, T, B, L),
device="cpu" if use_variable_L else get_device(),
dtype=torch.int32,
)
# each bag is usually sorted
(all_indices, _) = torch.sort(all_indices)
if use_variable_L:
all_indices = torch.ops.fbgemm.bottom_k_per_row(
all_indices.to(torch.long), L_offsets, False
)
all_indices = all_indices.to(get_device()).int()
else:
all_indices = all_indices.reshape(iters, T, B * L)
else:
assert E >= L, "num-embeddings must be greater than equal to bag-size"
# oversample and then remove duplicates to obtain sampling without
# replacement
zipf_shape = (iters, T, B, zipf_oversample_ratio * L)
if torch.cuda.is_available():
zipf_shape_total_len = np.prod(zipf_shape)
all_indices_list = []
# process 8 GB at a time on GPU
chunk_len = int(1e9)
for chunk_begin in range(0, zipf_shape_total_len, chunk_len):
all_indices_gpu = torch.ops.fbgemm.zipf_cuda(
alpha,
min(zipf_shape_total_len - chunk_begin, chunk_len),
seed=torch.randint(2**31 - 1, (1,))[0],
)
all_indices_list.append(all_indices_gpu.cpu())
all_indices = torch.cat(all_indices_list).reshape(zipf_shape)
else:
all_indices = torch.as_tensor(np.random.zipf(a=alpha, size=zipf_shape))
all_indices = (all_indices - 1) % E
if use_variable_L:
all_indices = torch.ops.fbgemm.bottom_k_per_row(
all_indices, L_offsets, True
)
else:
all_indices = torch.ops.fbgemm.bottom_k_per_row(
all_indices, torch.tensor([0, L], dtype=torch.long), True
)
if deterministic_output:
rng = default_rng(12345)
else:
rng = default_rng()
permutation = torch.as_tensor(
rng.choice(E, size=all_indices.max().item() + 1, replace=False)
)
all_indices = permutation.gather(0, all_indices.flatten())
all_indices = all_indices.to(get_device()).int()
if not use_variable_L:
all_indices = all_indices.reshape(iters, T, B * L)
if reuse > 0.0:
assert (
not use_variable_L
), "Does not support generating Ls from stats for reuse > 0.0"
for it in range(iters - 1):
for t in range(T):
reused_indices = torch.randperm(B * L, device=get_device())[
: int(B * L * reuse)
]
all_indices[it + 1, t, reused_indices] = all_indices[
it, t, reused_indices
]
# Some indices are set to -1 for emulating pruned rows.
if emulate_pruning:
for it in range(iters):
for t in range(T):
num_negative_indices = B // 2
random_locations = torch.randint(
low=0,
high=(B * L),
size=(num_negative_indices,),
device=torch.cuda.current_device(),
dtype=torch.int32,
)
all_indices[it, t, random_locations] = -1
rs = []
for it in range(iters):
if use_variable_L:
start_offset = L_offsets[it * T * B]
it_L_offsets = torch.concat(
[
torch.zeros(1),
L_offsets[it * T * B + 1 : (it + 1) * T * B + 1] - start_offset,
]
)
weights_tensor = (
None
if not weighted
else torch.randn(
int(it_L_offsets[-1].item()), device=get_device()
) # per sample weights will always be FP32
)
rs.append(
(
all_indices[start_offset : L_offsets[(it + 1) * T * B]],
it_L_offsets.to(get_device()),
weights_tensor,
)
)
else:
weights_tensor = (
None
if not weighted
else torch.randn(
T * B * L, device=get_device()
) # per sample weights will always be FP32
)
rs.append(
get_table_batched_offsets_from_dense(
all_indices[it].view(T, B, L), use_cpu=use_cpu
)
+ (weights_tensor,)
)
return rs
def quantize_embs(
weight: torch.Tensor,
weight_ty: SparseType,
fp8_config: Optional[FP8QuantizationConfig] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
weight = weight.detach()
if weight_ty == SparseType.FP32:
q_weight = weight.float()
res_weight = q_weight.view(torch.uint8)
return (res_weight, None)
elif weight_ty == SparseType.FP16:
q_weight = weight.half()
res_weight = q_weight.view(torch.uint8)
return (res_weight, None)
elif weight_ty == SparseType.FP8:
assert fp8_config is not None
# Quantize FP32 to HPF8
res_weight = torch.ops.fbgemm.FloatToHFP8Quantized(
weight.float(),
fp8_config.get("exponent_bits"),
fp8_config.get("exponent_bias"),
fp8_config.get("max_position"),
)
return (res_weight, None)
elif weight_ty == SparseType.INT8:
# Note that FloatToFused8BitRowwiseQuantized might have additional padding
# for alignment if embedding dimension is not a multiple of 4:
# https://fburl.com/code/z009xsy6
q_weight = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(weight)
res_weight = q_weight[:, :-8].view(torch.uint8)
res_scale_shift = torch.tensor(
q_weight[:, -8:].view(torch.float32).to(torch.float16).view(torch.uint8)
) # [-4, -2]: scale; [-2:]: bias
return (res_weight, res_scale_shift)
elif weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2:
# Note that FP32 -> INT4/INT2 conersion op below might have additional padding
# for alignment: https://fburl.com/code/xx9kkduf
q_weight = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
weight,
bit_rate=weight_ty.bit_rate(),
)
res_weight = q_weight[:, :-4].view(torch.uint8)
res_scale_shift = torch.tensor(
q_weight[:, -4:].view(torch.uint8)
) # [-4, -2]: scale; [-2:]: bias
return (res_weight, res_scale_shift)
else:
raise RuntimeError("Unsupported SparseType: {}".format(weight_ty))
def dequantize_embs(
weights: torch.Tensor,
scale_shift: torch.Tensor,
weight_ty: SparseType,
use_cpu: bool,
fp8_config: Optional[FP8QuantizationConfig] = None,
) -> torch.Tensor:
print(f"weight_ty: {weight_ty}")
assert (
weights.dtype == torch.uint8
), "The input tensor for dequantize_embs function needs to be byte tensor"
th_weights = weights
if scale_shift is not None:
th_scale_shift: torch.Tensor = scale_shift.view(torch.float16).to(torch.float32)
if weight_ty == SparseType.INT4:
(E, D_2) = th_weights.shape
D = D_2 * 2
def comp(i: int) -> torch.Tensor:
subs = th_weights.view(torch.uint8) >> (i * 4)
sub_mask = subs & 0xF
result = sub_mask.to(torch.float32) * th_scale_shift[:, 0].reshape(
-1, 1
).to(torch.float32) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return result.to(torch.float32)
comps = [comp(i) for i in range(2)]
comps = torch.stack(comps)
comps = comps.permute(1, 2, 0)
comps = comps.reshape(E, D)
return to_device(torch.tensor(comps), use_cpu)
elif weight_ty == SparseType.INT2:
(E, D_4) = th_weights.shape
D = D_4 * 4
# pyre-fixme[53]: Captured variable `scale_shift` is not annotated.
# pyre-fixme[53]: Captured variable `weights` is not annotated.
def comp(i: int) -> torch.Tensor:
subs = th_weights.view(torch.uint8) >> (i * 2)
sub_mask = subs & 0x3
result = sub_mask.to(torch.float32) * th_scale_shift[:, 0].reshape(
-1, 1
).to(torch.float32) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return result.to(torch.float32)
comps = [comp(i) for i in range(4)]
comps = torch.stack(comps)
comps = comps.permute(1, 2, 0)
comps = comps.reshape(E, D)
return to_device(torch.tensor(comps), use_cpu)
elif weight_ty == SparseType.INT8:
(E, D) = th_weights.shape
comps = th_weights.to(torch.float32) * th_scale_shift[:, 0].reshape(-1, 1).to(
torch.float32
) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return to_device(torch.tensor(comps), use_cpu)
elif weight_ty == SparseType.FP8:
assert fp8_config is not None
assert scale_shift is None
# Dequantize HPF8 to FP32
comps = torch.ops.fbgemm.HFP8QuantizedToFloat(
weights,
fp8_config.get("exponent_bits"),
fp8_config.get("exponent_bias"),
)
return to_device(comps, use_cpu)
elif weight_ty == SparseType.FP16:
assert scale_shift is None
comps = th_weights.view(torch.half)
return to_device(torch.tensor(comps), use_cpu)
elif weight_ty == SparseType.FP32:
assert scale_shift is None
comps = th_weights.view(torch.float32)
# pyre-fixme[7]: Expected `Tensor` but got implicit return value of `None`.
return to_device(torch.tensor(comps), use_cpu)
def fake_quantize_embs(
weights: torch.Tensor,
scale_shift: Optional[torch.Tensor],
dequant_weights: torch.Tensor,
weight_ty: SparseType,
use_cpu: bool,
fp8_config: Optional[FP8QuantizationConfig] = None,
) -> None:
assert (
weights.dtype == torch.uint8
), "The input tensor for dequantize_embs function needs to be byte tensor"
th_weights = weights
if scale_shift is not None:
th_scale_shift: torch.Tensor = (
scale_shift.contiguous().view(torch.float16).to(torch.float32)
)
if weight_ty == SparseType.INT4:
(E, D_2) = th_weights.shape
D = D_2 * 2
def comp(i: int) -> torch.Tensor:
subs = th_weights.view(torch.uint8) >> (i * 4)
sub_mask = subs & 0xF
result = sub_mask.to(torch.float32) * th_scale_shift[:, 0].reshape(
-1, 1
).to(torch.float32) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return result.to(torch.float32)
comps = [comp(i) for i in range(2)]
comps = torch.stack(comps)
comps = comps.permute(1, 2, 0)
comps = comps.reshape(E, D)
dequant_weights.copy_(to_device(comps, use_cpu))
elif weight_ty == SparseType.INT2:
(E, D_4) = th_weights.shape
D = D_4 * 4
# pyre-fixme[53]: Captured variable `scale_shift` is not annotated.
# pyre-fixme[53]: Captured variable `weights` is not annotated.
def comp(i: int) -> torch.Tensor:
subs = th_weights.view(torch.uint8) >> (i * 2)
sub_mask = subs & 0x3
result = sub_mask.to(torch.float32) * th_scale_shift[:, 0].reshape(
-1, 1
).to(torch.float32) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
return result.to(torch.float32)
comps = [comp(i) for i in range(4)]
comps = torch.stack(comps)
comps = comps.permute(1, 2, 0)
comps = comps.reshape(E, D)
dequant_weights.copy_(to_device(comps, use_cpu))
elif weight_ty == SparseType.INT8:
(E, D) = th_weights.shape
comps = th_weights.to(torch.float32) * th_scale_shift[:, 0].reshape(-1, 1).to(
torch.float32
) + th_scale_shift[:, 1].reshape(-1, 1).to(torch.float32)
dequant_weights.copy_(to_device(comps, use_cpu))
elif weight_ty == SparseType.FP8:
assert fp8_config is not None
assert scale_shift is None
# Quantize FP32 to HPF8
comps = torch.ops.fbgemm.FloatToHFP8Quantized(
dequant_weights.detach().float(),
fp8_config.get("exponent_bits"),
fp8_config.get("exponent_bias"),
fp8_config.get("max_position"),
)
weights.copy_(comps)
# Dequantize HPF8 to FP32
comps = torch.ops.fbgemm.HFP8QuantizedToFloat(
comps,
fp8_config.get("exponent_bits"),
fp8_config.get("exponent_bias"),
)
dequant_weights.copy_(to_device(comps, use_cpu))
elif weight_ty == SparseType.FP16:
assert scale_shift is None
comps = dequant_weights.detach().half().view(torch.uint8)
weights.copy_(comps)
elif weight_ty == SparseType.FP32:
assert scale_shift is None
comps = dequant_weights.detach().float().view(torch.uint8)
weights.copy_(comps)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import math
from typing import cast, Optional, Tuple
import torch
from fbgemm_gpu.split_embedding_configs import QuantizationConfig, SparseType
from fbgemm_gpu.split_embedding_utils import FP8QuantizationConfig, quantize_embs
from fbgemm_gpu.split_table_batched_embeddings_ops_common import EmbeddingLocation
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
SplitTableBatchedEmbeddingBagsCodegen,
)
from torch import Tensor # usort:skip
# TODO: add per-feature based converter option (based on embedding_specs during inference)
# TODO: optimize embedding pruning and quantization latency.
class SplitEmbInferenceConverter:
def __init__(
self,
quantize_type: SparseType,
pruning_ratio: Optional[float],
use_array_for_index_remapping: bool = True,
quantization_config: Optional[QuantizationConfig] = None,
):
self.quantize_type = quantize_type
# TODO(yingz): Change the pruning ratio to per-table settings.
self.pruning_ratio = pruning_ratio
self.use_array_for_index_remapping = use_array_for_index_remapping
self.quantization_config = quantization_config
def convert_model(self, model: torch.nn.Module) -> torch.nn.Module:
self._process_split_embs(model)
return model
def _prune_by_weights_l2_norm(self, new_num_rows, weights) -> Tuple[Tensor, float]:
assert new_num_rows > 0
from numpy.linalg import norm
indicators = []
for row in weights:
indicators.append(norm(row.cpu().numpy(), ord=2))
sorted_indicators = sorted(indicators, reverse=True)
threshold = None
for i in range(new_num_rows, len(sorted_indicators)):
if sorted_indicators[i] < sorted_indicators[new_num_rows - 1]:
threshold = sorted_indicators[i]
break
if threshold is None:
threshold = sorted_indicators[-1] - 1
return (torch.tensor(indicators), threshold)
def _prune_embs(
self,
idx: int,
num_rows: int,
module: SplitTableBatchedEmbeddingBagsCodegen,
) -> Tuple[Tensor, Optional[Tensor]]:
# TODO(yingz): Avoid DtoH / HtoD overhead.
weights = module.split_embedding_weights()[idx].cpu()
if self.pruning_ratio is None:
return (weights, None)
new_num_rows = int(math.ceil(num_rows * (1.0 - self.pruning_ratio))) # type: ignore
if new_num_rows == num_rows:
return (weights, None)
(indicators, threshold) = self._prune_by_weights_l2_norm(new_num_rows, weights)
return torch.ops.fbgemm.embedding_bag_rowwise_prune(
weights, indicators, threshold, torch.int32
)
def _get_quantization_config(self, name):
quantization_config = self.quantization_config
if quantization_config is None:
raise RuntimeError("quantization_config must be set for FP8 weight")
return quantization_config.get(name)
def _quantize_embs(
self, weight: Tensor, weight_ty: SparseType
) -> Tuple[Tensor, Optional[Tensor]]:
fp8_quant_config = cast(FP8QuantizationConfig, self.quantization_config)
return quantize_embs(weight, weight_ty, fp8_quant_config)
def _process_split_embs(self, model: torch.nn.Module) -> None:
for name, child in model.named_children():
if isinstance(
child,
SplitTableBatchedEmbeddingBagsCodegen,
):
embedding_specs = []
use_cpu = child.embedding_specs[0][3] == ComputeDevice.CPU
for E, D, _, _ in child.embedding_specs:
weights_ty = self.quantize_type
if D % weights_ty.align_size() != 0:
logging.warning(
f"Embedding dim {D} couldn't be divided by align size {weights_ty.align_size()}!"
)
assert D % 4 == 0
weights_ty = (
SparseType.FP16
) # fall back to FP16 if dimension couldn't be aligned with the required size
embedding_specs.append(("", E, D, weights_ty))
weight_lists = []
new_embedding_specs = []
index_remapping_list = []
for t, (_, E, D, weight_ty) in enumerate(embedding_specs):
# Try to prune embeddings.
(pruned_weight, index_remapping) = self._prune_embs(t, E, child)
new_embedding_specs.append(
(
"",
pruned_weight.size()[0],
D,
weight_ty,
EmbeddingLocation.HOST
if use_cpu
else EmbeddingLocation.DEVICE,
)
)
index_remapping_list.append(index_remapping)
# Try to quantize embeddings.
weight_lists.append(self._quantize_embs(pruned_weight, weight_ty))
is_fp8_weight = self.quantize_type == SparseType.FP8
q_child = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=new_embedding_specs,
index_remapping=index_remapping_list
if self.pruning_ratio is not None
else None,
pooling_mode=child.pooling_mode,
device="cpu" if use_cpu else torch.cuda.current_device(),
weight_lists=weight_lists,
use_array_for_index_remapping=self.use_array_for_index_remapping,
fp8_exponent_bits=self._get_quantization_config("exponent_bits")
if is_fp8_weight
else None,
fp8_exponent_bias=self._get_quantization_config("exponent_bias")
if is_fp8_weight
else None,
)
setattr(model, name, q_child)
else:
self._process_split_embs(child)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
try:
torch.ops.load_library(os.path.join(os.path.dirname(__file__), "fbgemm_gpu_py.so"))
except Exception as e:
print(e)
# __init__.py is only used in OSS
# Use existence to check if fbgemm_gpu_py.so has already been loaded
open_source: bool = True
# Re-export docs
from . import _fbgemm_gpu_docs # noqa: F401, E402
# Re-export the version string from the auto-generated version file
from ._fbgemm_gpu_version import __version__ # noqa: F401, E402
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from itertools import accumulate
from typing import List, Optional
import torch
from torch import nn
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_split_gpu"
)
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:permute_pooled_embedding_ops_split_cpu"
)
@torch.fx.wrap
def _fx_wrap_tensor_to_device(t: torch.Tensor, device: torch.device) -> torch.Tensor:
return t.to(device=device)
class PermutePooledEmbeddingsSplit(nn.Module):
def __init__(
self,
embs_dims: List[int],
permute: List[int],
device: Optional[torch.device] = None,
) -> None:
super(PermutePooledEmbeddingsSplit, self).__init__()
logging.info("Using Permute Pooled Embeddings")
self.register_buffer(
"_offset_dim_list",
torch.tensor(
[0] + list(accumulate(embs_dims)), device=device, dtype=torch.int64
),
)
self.register_buffer(
"_permute", torch.tensor(permute, device=device, dtype=torch.int64)
)
inv_permute: List[int] = [0] * len(permute)
for i, p in enumerate(permute):
inv_permute[p] = i
self.register_buffer(
"_inv_permute", torch.tensor(inv_permute, device=device, dtype=torch.int64)
)
# `Union[BoundMethod[typing.Callable(torch.Tensor.tolist)[[Named(self,
# torch.Tensor)], List[typing.Any]], torch.Tensor], nn.Module, torch.Tensor]`
# is not a function.
inv_embs_dims = [embs_dims[i] for i in permute]
self.register_buffer(
"_inv_offset_dim_list",
torch.tensor(
[0] + list(accumulate(inv_embs_dims)), device=device, dtype=torch.int64
),
)
def forward(self, pooled_embs: torch.Tensor) -> torch.Tensor:
result = torch.ops.fbgemm.permute_pooled_embs_auto_grad_split(
pooled_embs,
_fx_wrap_tensor_to_device(self._offset_dim_list, device=pooled_embs.device),
_fx_wrap_tensor_to_device(self._permute, device=pooled_embs.device),
_fx_wrap_tensor_to_device(
self._inv_offset_dim_list, device=pooled_embs.device
),
_fx_wrap_tensor_to_device(self._inv_permute, device=pooled_embs.device),
)
return result
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# The code in this file is refactored from https://fburl.com/code/p2gy2gxb
# based on "Amy Yang et al., Training Deep Learning Recommendation Model with
# Quantized Collective Communications", DLP-KDD 2020.
import logging
from typing import Optional, TypeVar
import torch
from fbgemm_gpu.quantize_utils import (
bf16_to_fp32,
fp16_to_fp32,
fp32_to_bf16_with_clamp,
fp32_to_fp16_with_clamp,
fp32_to_hfp8_with_clamp,
hfp8_to_fp32,
)
from fbgemm_gpu.split_embedding_configs import SparseType
from torch.autograd.profiler import record_function # usort:skip
logger: logging.Logger = logging.getLogger()
# FP8 configurations
ebits, mbits, bias = 4, 3, 15
max_pos: float = (2 ** ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
# INT8 configurations
ROW_DIM_DEFAULT = 32
def none_throws(
optional: Optional[TypeVar("_T")], message: str = "Unexpected `None`"
) -> TypeVar("_T"):
if optional is None:
raise AssertionError(message)
return optional
class QuantizationContext:
def __init__(self, row_dim: int = ROW_DIM_DEFAULT) -> None:
self.row_dim = row_dim
self.row_dim_quant: int = -1
def _quantize_tensor(
input_tensor: torch.Tensor,
comm_precision: SparseType,
ctx: Optional[QuantizationContext] = None,
is_fwd: bool = True,
) -> torch.Tensor:
if comm_precision == SparseType.FP32:
return input_tensor
elif comm_precision == SparseType.FP16:
return fp32_to_fp16_with_clamp(input_tensor)
elif comm_precision == SparseType.BF16:
return fp32_to_bf16_with_clamp(input_tensor)
elif comm_precision == SparseType.FP8:
# return fp32_to_hfp8_with_clamp(input_tensor, ebits, mbits, bias)
if ctx is not None and ctx.row_dim > 0:
ctx = none_throws(ctx)
row_dim = ctx.row_dim
input_2d = input_tensor.view((-1, row_dim)) if row_dim > 0 else input_tensor
input_2d_quant = torch.ops.fbgemm.FloatToFP8RowwiseQuantized(
input_2d, is_fwd
)
row_dim_quant = input_2d_quant.shape[1]
input_quant_all2all = None
input_quant_all2all = input_2d_quant.view((-1))
ctx.row_dim_quant = row_dim_quant
return input_quant_all2all
else:
return fp32_to_hfp8_with_clamp(input_tensor, ebits, mbits, bias)
elif comm_precision == SparseType.INT8:
ctx = none_throws(ctx)
row_dim = ctx.row_dim
input_2d = input_tensor.view((-1, row_dim)) if row_dim > 0 else input_tensor
input_2d_quant = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(input_2d)
row_dim_quant = input_2d_quant.shape[1]
input_quant_all2all = None
input_quant_all2all = input_2d_quant.view((-1))
ctx.row_dim_quant = row_dim_quant
return input_quant_all2all
else:
raise ValueError(f"comm_precision={comm_precision} is not supported")
def _dequantize_tensor(
quantized_tensor: torch.Tensor,
comm_precision: SparseType,
ctx: Optional[QuantizationContext] = None,
is_fwd: bool = True,
) -> torch.Tensor:
if comm_precision == SparseType.FP32:
assert quantized_tensor.dtype == torch.float
return quantized_tensor
elif comm_precision == SparseType.FP16:
assert quantized_tensor.dtype == torch.half
return fp16_to_fp32(quantized_tensor)
elif comm_precision == SparseType.BF16:
assert quantized_tensor.dtype == torch.bfloat16
return bf16_to_fp32(quantized_tensor)
elif comm_precision == SparseType.FP8:
if ctx is not None and ctx.row_dim > 0:
row_dim_quant = ctx.row_dim_quant
quantized_tensor_2d = quantized_tensor.view((-1, row_dim_quant))
dequant_tensor = torch.ops.fbgemm.FP8RowwiseQuantizedToFloat(
quantized_tensor_2d, is_fwd
)
return dequant_tensor.view(-1)
else:
assert quantized_tensor.dtype == torch.uint8
return hfp8_to_fp32(quantized_tensor, ebits, bias)
elif comm_precision == SparseType.INT8:
ctx = none_throws(ctx)
row_dim_quant = ctx.row_dim_quant
quantized_tensor_2d = quantized_tensor.view((-1, row_dim_quant))
dequant_tensor = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
quantized_tensor_2d
)
return dequant_tensor.view(-1)
else:
raise ValueError(f"comm_precision={comm_precision} is not supported")
class QuantizedCommCodec:
# Concrete implementation of QuantizedCommCodec provided by FBGEMM functions.
def __init__(
self,
comm_precision: SparseType,
loss_scale: Optional[float] = None,
row_dim: Optional[int] = None,
is_fwd: bool = True,
) -> None:
if loss_scale is not None:
if comm_precision not in [SparseType.FP16, SparseType.BF16]:
logger.warning(
f"Setting loss scale for comm_precision={comm_precision} is not supported. Overriding to None"
)
loss_scale = None
logger.info(
f"Creating QuantizedCommsCodec comm_precision:{comm_precision}, loss_scale:{loss_scale}"
)
self._comm_precision = comm_precision
self._loss_scale = loss_scale
self._is_fwd = is_fwd
self._row_dim: int = -1 if row_dim is None else row_dim
def encode(
self, input_tensor: torch.Tensor, ctx: Optional[QuantizationContext] = None
) -> torch.Tensor:
if self._loss_scale is not None:
input_tensor = self._loss_scale * input_tensor
with record_function(
f"## encoder {self._comm_precision} {self._loss_scale} ##"
):
output = _quantize_tensor(
input_tensor,
self._comm_precision,
ctx,
self._is_fwd,
)
return output
def decode(
self, input_tensor: torch.Tensor, ctx: Optional[QuantizationContext] = None
) -> torch.Tensor:
if self._loss_scale is not None:
input_tensor = input_tensor / self._loss_scale
with record_function(
f"## decoder {self._comm_precision} {self._loss_scale} ##"
):
dequantized_tensor = _dequantize_tensor(
input_tensor, self._comm_precision, ctx, self._is_fwd
)
return dequantized_tensor
def calc_quantized_size(
self, input_len: int, ctx: Optional[QuantizationContext] = None
) -> int:
# Use the same logic in _float_to_fused8bitrowwise_gpu_t()
if self._comm_precision == SparseType.INT8 or (
self._comm_precision == SparseType.FP8 and self._row_dim > 0
):
ctx = none_throws(ctx)
assert input_len % ctx.row_dim == 0, (
f"input_len {input_len} is not a multiple of row dim {ctx.row_dim} "
"Please check your batch size (power of 2 batch size is recommended)"
)
nrows = input_len // ctx.row_dim
ncols = (ctx.row_dim + 3) // 4 * 4 + 2 * 4
return nrows * ncols
else:
return input_len
@property
def quantized_dtype(self) -> torch.dtype:
return self._comm_precision.as_dtype()
def create_context(self) -> Optional[QuantizationContext]:
# fp8 rowwise is activated when row_dim > 0
if self._comm_precision == SparseType.FP8:
return QuantizationContext(self._row_dim)
# int8 rowwise is default
return QuantizationContext()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger: logging.Logger = logging.getLogger()
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
TORCH_HALF_MIN: float = torch.finfo(torch.float16).min
TORCH_HALF_MAX: float = torch.finfo(torch.float16).max
TORCH_BFLOAT16_MIN: float = torch.finfo(torch.bfloat16).min
TORCH_BFLOAT16_MAX: float = torch.finfo(torch.bfloat16).max
def fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half()
def fp32_to_bf16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_BFLOAT16_MIN, TORCH_BFLOAT16_MAX).bfloat16()
def fp32_to_hfp8_with_clamp(
tensor: torch.Tensor, ebits: int = 4, mbits: int = 3, bias: int = 15
) -> torch.Tensor:
max_pos: float = (2 ** ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
return torch.ops.fbgemm.FloatToHFP8Quantized(
tensor.contiguous(),
ebits,
bias,
max_pos,
)
def fp16_to_fp32(tensor: torch.Tensor) -> torch.Tensor:
return tensor.float()
def bf16_to_fp32(tensor: torch.Tensor) -> torch.Tensor:
return tensor.view(torch.bfloat16).float()
def hfp8_to_fp32(tensor: torch.Tensor, ebits: int = 4, bias: int = 15) -> torch.Tensor:
return torch.ops.fbgemm.HFP8QuantizedToFloat(
tensor.contiguous().view(torch.uint8),
ebits,
bias,
)
def measure_fp16_quant_error(input_tensor: torch.Tensor) -> None:
# TODO: log to tensorboard
num_nan_fp32_tensor = torch.numel(input_tensor[torch.isnan(input_tensor)])
logger.info(
"num NaN in fp32 tensor: {}, ratio: {}.".format(
num_nan_fp32_tensor, num_nan_fp32_tensor / torch.numel(input_tensor)
)
)
logger.info(
"fp32 tensor profile: min: {}, max: {}, min abs:{}, max abs:{}.".format(
torch.min(input_tensor),
torch.max(input_tensor),
torch.min(torch.abs(input_tensor)),
torch.max(torch.abs(input_tensor)),
)
)
fp16_tensor = fp32_to_fp16_with_clamp(input_tensor)
num_nan_fp16_tensor = torch.numel(fp16_tensor[torch.isnan(fp16_tensor)])
logger.info(
"num NaN in fp16 tensor: {}, ratio: {}.".format(
num_nan_fp16_tensor, num_nan_fp16_tensor / torch.numel(input_tensor)
)
)
diff = torch.abs(input_tensor - fp16_tensor.float())
rel_diff = diff / torch.abs(input_tensor)
logger.info(
"fp32_to_fp16 abs error: min={}, max={}, avg={}.".format(
torch.min(diff), torch.max(diff), torch.mean(diff)
)
)
rel_diff_not_nan = rel_diff[torch.logical_not(torch.isnan(rel_diff))]
logger.info(
"fp32_to_fp16 rel error: min={}, max={}, avg={}.".format(
torch.min(rel_diff_not_nan),
torch.max(rel_diff_not_nan),
torch.mean(rel_diff_not_nan),
)
)
rel_diff_1_idx = torch.where(rel_diff == 1.0)
fp32_rel_err_1_vals = input_tensor[rel_diff_1_idx]
if torch.numel(fp32_rel_err_1_vals) > 0:
fp32_rel_err_1_vals = torch.abs(fp32_rel_err_1_vals)
logger.info(
"fp32_to_fp16 rel error == 1: fp32 min:{}, fp32 max:{}, fp32 avg:{}.".format(
torch.min(fp32_rel_err_1_vals),
torch.max(fp32_rel_err_1_vals),
torch.mean(fp32_rel_err_1_vals),
)
)
subrange_ratio = torch.numel(fp16_tensor[rel_diff_1_idx]) / torch.numel(
fp16_tensor
)
logger.info("sub fp16 range ratio: {}".format(subrange_ratio))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import Optional
import torch
from fbgemm_gpu.enums import create_enums
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:cumem_utils")
# Import all uvm enums from c++ library
create_enums(globals(), torch.ops.fbgemm.fbgemm_gpu_uvm_enum_query)
def cudaMemAdvise(
t: torch.Tensor,
advice: Enum,
) -> None:
torch.ops.fbgemm.cuda_mem_advise(t, advice.value)
def cudaMemPrefetchAsync(
t: torch.Tensor,
device_t: Optional[torch.Tensor] = None,
) -> None:
torch.ops.fbgemm.cuda_mem_prefetch_async(t, device_t)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from math import sqrt
from typing import List
import torch
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
def wrap_weight_to_parameter(weights: List[torch.Tensor]) -> List[torch.Tensor]:
for i, v in enumerate(weights):
if not isinstance(v, torch.nn.Parameter):
weights[i] = torch.nn.Parameter(v)
return weights
class BatchedUnaryEmbeddingBag(torch.nn.Module):
def __init__(self, num_tasks: int, hash_sizes: List[int], long_index: bool = False):
super().__init__()
self.num_tasks = num_tasks
self.hash_sizes = hash_sizes
# [N][sum(E)][1]
embedding_data = torch.randn(size=(num_tasks, sum(self.hash_sizes), 1))
self.weight = torch.nn.Parameter(embedding_data)
index_dtype = torch.int64 if long_index else torch.int32
table_offsets_tensor = torch.cat(
[
torch.tensor([0], dtype=index_dtype),
torch.cumsum(
torch.tensor(hash_sizes),
dim=0,
dtype=index_dtype,
),
]
)
self.register_buffer("table_offsets_tensor", table_offsets_tensor)
self.init_parameters()
def forward(self, offsets: torch.Tensor, input: torch.Tensor):
# output is [N][B][T]
return torch.ops.fbgemm.batched_unary_embeddings(
self.weight,
self.table_offsets_tensor,
offsets,
input,
)
@torch.jit.export
def split_embedding_weights(self):
embedding_weights = []
for n in range(self.num_tasks):
for t in range(len(self.hash_sizes)):
embedding_weights.append(
self.weight.detach()[
n,
self.table_offsets_tensor[t] : self.table_offsets_tensor[t + 1],
:,
]
)
return embedding_weights
@torch.jit.export
def init_parameters(self):
for num_emb, param in zip(
self.hash_sizes * self.num_tasks,
wrap_weight_to_parameter(self.split_embedding_weights()),
):
assert param.shape == (num_emb, 1)
param.data.uniform_(-sqrt(1 / num_emb), sqrt(1 / num_emb))
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import enum
from dataclasses import dataclass
from typing import List, NamedTuple
# Maximum number of times prefetch() can be called without
# a corresponding forward() call
MAX_PREFETCH_DEPTH = 100
# GPU and CPU use 16-bit scale and bias for quantized embedding bags in TBE
# The total size is 2 + 2 = 4 bytes
DEFAULT_SCALE_BIAS_SIZE_IN_BYTES = 4
class EmbeddingLocation(enum.IntEnum):
DEVICE = 0
MANAGED = 1
MANAGED_CACHING = 2
HOST = 3
class CacheAlgorithm(enum.Enum):
LRU = 0
LFU = 1
class PoolingMode(enum.IntEnum):
SUM = 0
MEAN = 1
NONE = 2
class BoundsCheckMode(enum.IntEnum):
# Raise an exception (CPU) or device-side assert (CUDA)
FATAL = 0
# Log the first out-of-bounds instance per kernel, and set to zero.
WARNING = 1
# Set to zero.
IGNORE = 2
# No bounds checks.
NONE = 3
RecordCacheMetrics: NamedTuple = NamedTuple(
"RecordCacheMetrics",
[("record_cache_miss_counter", bool), ("record_tablewise_cache_miss", bool)],
)
SplitState: NamedTuple = NamedTuple(
"SplitState",
[
("dev_size", int),
("host_size", int),
("uvm_size", int),
("placements", List[EmbeddingLocation]),
("offsets", List[int]),
],
)
@dataclass
class CacheState:
# T + 1 elements and cache_hash_size_cumsum[-1] == total_cache_hash_size
cache_hash_size_cumsum: List[int]
cache_index_table_map: List[int]
total_cache_hash_size: int
def construct_cache_state(
row_list: List[int],
location_list: List[EmbeddingLocation],
feature_table_map: List[int],
) -> CacheState:
_cache_hash_size_cumsum = [0]
total_cache_hash_size = 0
for num_embeddings, location in zip(row_list, location_list):
if location == EmbeddingLocation.MANAGED_CACHING:
total_cache_hash_size += num_embeddings
_cache_hash_size_cumsum.append(total_cache_hash_size)
# [T], -1: non-cached table
cache_hash_size_cumsum = []
# [total_cache_hash_size], linear cache index -> table index
cache_index_table_map = [-1] * total_cache_hash_size
unique_feature_table_map = {}
for t, t_ in enumerate(feature_table_map):
unique_feature_table_map[t_] = t
for t_, t in unique_feature_table_map.items():
start, end = _cache_hash_size_cumsum[t_], _cache_hash_size_cumsum[t_ + 1]
cache_index_table_map[start:end] = [t] * (end - start)
cache_hash_size_cumsum = [
_cache_hash_size_cumsum[t_]
if location_list[t_] == EmbeddingLocation.MANAGED_CACHING
else -1
for t_ in feature_table_map
]
cache_hash_size_cumsum.append(total_cache_hash_size)
s = CacheState(
cache_hash_size_cumsum=cache_hash_size_cumsum,
cache_index_table_map=cache_index_table_map,
total_cache_hash_size=total_cache_hash_size,
)
return s
# NOTE: This is also defined in fbgemm_gpu.split_embedding_utils, but declaring
# target dependency on :split_embedding_utils will result in compatibility
# breakage with Caffe2 module_factory because it will pull in numpy
def round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import enum
import functools
import logging
import os
from dataclasses import dataclass, field
from itertools import accumulate
from math import log2
from typing import Callable, Dict, List, Optional, Tuple, Type, Union
import torch # usort:skip
from torch import nn, Tensor # usort:skip
import fbgemm_gpu.split_embedding_codegen_lookup_invokers as invokers
from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType, SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
CacheState,
construct_cache_state,
EmbeddingLocation,
MAX_PREFETCH_DEPTH,
PoolingMode,
RecordCacheMetrics,
SplitState,
)
try:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cuda_training"
)
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu_training"
)
except Exception:
pass
DEFAULT_ASSOC = 32 if torch.version.hip is None else 64
INT8_EMB_ROW_DIM_OFFSET = 8
class DoesNotHavePrefix(Exception):
pass
class ComputeDevice(enum.IntEnum):
CPU = 0
CUDA = 1
MTIA = 2
class WeightDecayMode(enum.IntEnum):
NONE = 0
L2 = 1
DECOUPLE = 2
COUNTER = 3
class CounterWeightDecayMode(enum.IntEnum):
NONE = 0
L2 = 1
DECOUPLE = 2
class LearningRateMode(enum.IntEnum):
EQUAL = -1
TAIL_ID_LR_INCREASE = 0
TAIL_ID_LR_DECREASE = 1
COUNTER_SGD = 2
class GradSumDecay(enum.IntEnum):
NO_DECAY = -1
CTR_DECAY = 0
@dataclass
class TailIdThreshold:
val: float = 0
is_ratio: bool = False
@dataclass
class CounterBasedRegularizationDefinition:
counter_weight_decay_mode: CounterWeightDecayMode = CounterWeightDecayMode.NONE
counter_halflife: int = -1
adjustment_iter: int = -1
adjustment_ub: float = 1.0
learning_rate_mode: LearningRateMode = LearningRateMode.EQUAL
grad_sum_decay: GradSumDecay = GradSumDecay.NO_DECAY
tail_id_threshold: TailIdThreshold = field(default_factory=TailIdThreshold)
max_counter_update_freq: int = 1000
def construct_split_state(
embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]],
rowwise: bool,
cacheable: bool,
precision: SparseType = SparseType.FP32,
int8_emb_row_dim_offset: int = INT8_EMB_ROW_DIM_OFFSET,
placement: Optional[EmbeddingLocation] = None,
) -> SplitState:
placements: List[EmbeddingLocation] = []
offsets: List[int] = []
dev_size: int = 0
host_size: int = 0
uvm_size: int = 0
for num_embeddings, embedding_dim, location, _ in embedding_specs:
assert (
embedding_dim % 4 == 0
), f"embedding_dim must be a multiple of 4, but got {embedding_dim}"
if precision == SparseType.INT8:
embedding_dim += int8_emb_row_dim_offset
state_size = num_embeddings * embedding_dim if not rowwise else num_embeddings
location = placement if placement is not None else location
if location == EmbeddingLocation.HOST:
placements.append(EmbeddingLocation.HOST)
offsets.append(host_size)
host_size += state_size
# If table is on device, then opimtizer is on device.
# If table is managed, then if optimizer state is rowwise, optimizer is on device, otherwise optimizer is managed.
elif location == EmbeddingLocation.DEVICE or rowwise:
placements.append(EmbeddingLocation.DEVICE)
offsets.append(dev_size)
dev_size += state_size
else:
if cacheable and location == EmbeddingLocation.MANAGED_CACHING:
placements.append(EmbeddingLocation.MANAGED_CACHING)
else:
placements.append(EmbeddingLocation.MANAGED)
offsets.append(uvm_size)
uvm_size += state_size
assert len(placements) == len(offsets)
return SplitState(
dev_size=dev_size,
host_size=host_size,
uvm_size=uvm_size,
placements=placements,
offsets=offsets,
)
def apply_split_helper(
persistent_state_fn: Callable[[str, Tensor], None],
set_attr_fn: Callable[
[str, Union[Tensor, List[int], List[EmbeddingLocation]]], None
],
current_device: torch.device,
use_cpu: bool,
feature_table_map: List[int],
split: SplitState,
prefix: str,
dtype: Type[torch.dtype],
enforce_hbm: bool = False,
make_dev_param: bool = False,
dev_reshape: Optional[Tuple[int, ...]] = None,
) -> None:
set_attr_fn(f"{prefix}_physical_placements", split.placements)
set_attr_fn(f"{prefix}_physical_offsets", split.offsets)
offsets = [split.offsets[t] for t in feature_table_map]
placements = [split.placements[t] for t in feature_table_map]
persistent_state_fn(
f"{prefix}_offsets",
torch.tensor(offsets, device=current_device, dtype=torch.int64),
)
persistent_state_fn(
f"{prefix}_placements",
torch.tensor(placements, device=current_device, dtype=torch.int32),
)
if split.dev_size > 0:
dev_buffer = torch.zeros(
split.dev_size,
device=current_device,
# pyre-fixme[6]
dtype=dtype,
)
dev_buffer = (
dev_buffer.view(*dev_reshape) if dev_reshape is not None else dev_buffer
)
else:
# pyre-fixme[6]
dev_buffer = torch.empty(0, device=current_device, dtype=dtype)
if make_dev_param:
set_attr_fn(f"{prefix}_dev", nn.Parameter(dev_buffer))
else:
persistent_state_fn(f"{prefix}_dev", dev_buffer)
if split.host_size > 0:
if dtype == torch.uint8:
persistent_state_fn(
f"{prefix}_host",
torch.zeros(
split.host_size,
device=current_device,
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for
# 3rd param but got `Type[Type[torch._dtype]]`.
dtype=dtype,
),
)
else:
set_attr_fn(
f"{prefix}_host",
nn.Parameter(
torch.zeros(
split.host_size,
device=current_device,
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]`
# for 3rd param but got `Type[Type[torch._dtype]]`.
dtype=dtype,
)
),
)
else:
persistent_state_fn(
f"{prefix}_host",
# pyre-fixme[6]: For 3rd param expected `dtype` but got `Type[dtype]`.
torch.empty(0, device=current_device, dtype=dtype),
)
if split.uvm_size > 0:
assert not use_cpu
if enforce_hbm:
logging.info("Enforce hbm for the cache location")
persistent_state_fn(
f"{prefix}_uvm",
torch.zeros(
split.uvm_size,
device=current_device,
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for
# 3rd param but got `Type[Type[torch._dtype]]`.
dtype=dtype,
),
)
else:
persistent_state_fn(
f"{prefix}_uvm",
torch.zeros(
split.uvm_size,
out=torch.ops.fbgemm.new_managed_tensor(
# pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]`
# for 3rd param but got `Type[Type[torch._dtype]]`.
torch.zeros(1, device=current_device, dtype=dtype),
[split.uvm_size],
),
),
)
else:
persistent_state_fn(
f"{prefix}_uvm",
# pyre-fixme[6]: For 3rd param expected `dtype` but got `Type[dtype]`.
torch.empty(0, device=current_device, dtype=dtype),
)
# pyre-fixme[13]: Attribute `uvm_cache_stats` is never initialized.
# pyre-fixme[13]: Attribute `local_uvm_cache_stats` is never initialized.
class SplitTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Multiple sparse features can share one embedding table.
'feature_table_map' specifies the feature-table mapping.
T: number of logical tables
T_: number of physical tables
T >= T_
For supported optimizer hyperparams, see inline comments below
"""
embedding_specs: List[Tuple[int, int, EmbeddingLocation, ComputeDevice]]
optimizer_args: invokers.lookup_args.OptimizerArgs
lxu_cache_locations_list: List[Tensor]
lxu_cache_locations_empty: Tensor
timesteps_prefetched: List[int]
record_cache_metrics: RecordCacheMetrics
uvm_cache_stats: torch.Tensor
local_uvm_cache_stats: torch.Tensor
linear_cache_indices_list: List[Tensor]
def __init__( # noqa C901
self,
embedding_specs: List[
Tuple[int, int, EmbeddingLocation, ComputeDevice]
], # tuple of (rows, dims, placements, compute_devices)
feature_table_map: Optional[List[int]] = None, # [T]
cache_algorithm: CacheAlgorithm = CacheAlgorithm.LRU,
cache_load_factor: float = 0.2,
cache_sets: int = 0,
cache_reserved_memory: float = 0.0,
cache_precision: SparseType = SparseType.FP32,
weights_precision: SparseType = SparseType.FP32,
output_dtype: SparseType = SparseType.FP32,
enforce_hbm: bool = False, # place all weights/momentums in HBM when using cache
optimizer: OptimType = OptimType.EXACT_SGD,
record_cache_metrics: Optional[RecordCacheMetrics] = None,
gather_uvm_cache_stats: Optional[bool] = False,
# General Optimizer args
stochastic_rounding: bool = True,
gradient_clipping: bool = False,
max_gradient: float = 1.0,
learning_rate: float = 0.01,
# used by EXACT_ADAGRAD, EXACT_ROWWISE_ADAGRAD, EXACT_ROWWISE_WEIGHTED_ADAGRAD, LAMB, and ADAM only
# NOTE that default is different from nn.optim.Adagrad default of 1e-10
eps: float = 1.0e-8,
momentum: float = 0.9, # used by LARS-SGD
# EXACT_ADAGRAD, SGD, EXACT_SGD do not support weight decay
# LAMB, ADAM, PARTIAL_ROWWISE_ADAM, PARTIAL_ROWWISE_LAMB, LARS_SGD support decoupled weight decay
# EXACT_ROWWISE_WEIGHTED_ADAGRAD supports L2 weight decay
# EXACT_ROWWISE_ADAGRAD support both L2 and decoupled weight decay (via weight_decay_mode)
weight_decay: float = 0.0,
weight_decay_mode: WeightDecayMode = WeightDecayMode.NONE,
eta: float = 0.001, # used by LARS-SGD,
beta1: float = 0.9, # used by LAMB and ADAM
beta2: float = 0.999, # used by LAMB and ADAM
counter_based_regularization: Optional[
CounterBasedRegularizationDefinition
] = None, # used by Rowwise Adagrad
pooling_mode: PoolingMode = PoolingMode.SUM,
device: Optional[Union[str, int, torch.device]] = None,
bounds_check_mode: BoundsCheckMode = BoundsCheckMode.WARNING,
uvm_non_rowwise_momentum: bool = False, # place non-rowwise momentum on UVM
use_experimental_tbe: bool = False, # set to True to use TBE v2 (only support NVIDIA GPUs)
# set to True to enable prefetch pipeline, currently only supports LRU cache policy.
# If a separate stream is used for prefetch, the optional forward_stream arg of prefetch function
# should be set.
prefetch_pipeline: bool = False,
) -> None:
super(SplitTableBatchedEmbeddingBagsCodegen, self).__init__()
self.pooling_mode = pooling_mode
self.bounds_check_mode_int: int = bounds_check_mode.value
self.weights_precision = weights_precision
self.output_dtype: int = output_dtype.as_int()
assert (
not prefetch_pipeline or cache_algorithm == CacheAlgorithm.LRU
), "Only LRU cache policy supports prefetch_pipeline."
self.prefetch_pipeline: bool = prefetch_pipeline
self.lock_cache_line: bool = self.prefetch_pipeline
if record_cache_metrics is not None:
self.record_cache_metrics = record_cache_metrics
else:
self.record_cache_metrics = RecordCacheMetrics(False, False)
self.embedding_specs = embedding_specs
(rows, dims, locations, compute_devices) = zip(*embedding_specs)
T_ = len(self.embedding_specs)
self.dims: List[int] = dims
assert T_ > 0
# mixed D is not supported by no bag kernels
mixed_D = False
D = self.dims[0]
for d in self.dims:
if d != D:
mixed_D = True
break
if mixed_D:
assert (
self.pooling_mode != PoolingMode.NONE
), "Mixed dimension tables only supported for pooling tables."
assert all(
cd == compute_devices[0] for cd in compute_devices
), "Heterogenous compute_devices are NOT supported!"
# Split TBE has different function schemas for CUDA and CPU.
# For MTIA device type, it uses the CPU one.
self.use_cpu: bool = (
compute_devices[0] == ComputeDevice.CPU
or compute_devices[0] == ComputeDevice.MTIA
)
assert not self.use_cpu or all(
loc == EmbeddingLocation.HOST for loc in locations
), "ComputeDevice.CPU is only for EmbeddingLocation.HOST!"
assert self.use_cpu or all(
loc != EmbeddingLocation.HOST for loc in locations
), "EmbeddingLocation.HOST doesn't work for CUDA device!"
if self.use_cpu or self.pooling_mode == PoolingMode.NONE:
assert output_dtype in [
SparseType.FP32,
SparseType.FP16,
SparseType.BF16,
], "Fused pooled embedding quantization only supported for cuda."
if optimizer == OptimType.NONE:
assert all(
loc == EmbeddingLocation.DEVICE for loc in locations
), "OptimType.NONE supports only EmbeddingLocation.DEVICE"
assert all(
cd == ComputeDevice.CUDA for cd in compute_devices
), "OptimType.NONE supports only ComputeDevice.CUDA"
assert (
not mixed_D
), "OptimType.NONE does not support mixed embedding dimension"
if device is None:
self.current_device: torch.device = (
torch.device("cpu")
if self.use_cpu
else torch.device(torch.cuda.current_device())
)
elif isinstance(device, torch.device):
self.current_device = device
else:
self.current_device = torch.device(device)
# add placeholder require_grad param tensor to enable autograd with int8 weights
self.placeholder_autograd_tensor = nn.Parameter(
torch.zeros(0, device=self.current_device, dtype=torch.float)
)
self.gather_uvm_cache_stats = gather_uvm_cache_stats
# Define the size of uvm cache stats as class variable
# to make it work with torch jit script.
self.uvm_cache_stats_size = 6
# 0: N_calls, 1: N_requested_indices, 2: N_unique_indices, 3: N_unique_misses,
# 4: N_conflict_unique_misses, 5: N_conflict_misses
self.int8_emb_row_dim_offset: int = INT8_EMB_ROW_DIM_OFFSET
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
feature_dims = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(accumulate(feature_dims))
self.total_D: int = D_offsets[-1]
self.max_D: int = max(dims)
cached_dims = [
embedding_spec[1]
for embedding_spec in embedding_specs
if embedding_spec[2] == EmbeddingLocation.MANAGED_CACHING
]
self.max_D_cache: int = max(cached_dims) if len(cached_dims) > 0 else 0
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
hash_size_cumsum = [0] + list(accumulate(rows))
self.total_hash_size: int = int(hash_size_cumsum[-1])
if self.total_hash_size == 0:
self.total_hash_size_bits: int = 0
else:
self.total_hash_size_bits: int = int(log2(float(self.total_hash_size)) + 1)
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in self.feature_table_map] + [
self.total_hash_size
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
self.register_buffer(
"rows_per_table",
torch.tensor(
[rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"bounds_check_warning",
torch.tensor([0], device=self.current_device, dtype=torch.int64),
)
# Required for VBE
self.register_buffer(
"feature_dims",
torch.tensor(feature_dims, device="cpu", dtype=torch.int64),
)
weight_split = construct_split_state(
embedding_specs,
rowwise=False,
cacheable=True,
precision=weights_precision,
)
table_embedding_dtype = weights_precision.as_dtype()
self._apply_split(
weight_split,
prefix="weights",
# pyre-fixme[6]: For 3rd param expected `Type[Type[_dtype]]` but got
# `Type[_dtype]`.
dtype=table_embedding_dtype,
enforce_hbm=enforce_hbm,
make_dev_param=optimizer == OptimType.NONE,
dev_reshape=(-1, self.max_D) if optimizer == OptimType.NONE else None,
)
assert optimizer not in (
OptimType.SGD,
OptimType.ROWWISE_ADAGRAD,
), f"Optimizer {optimizer} is deprecated in the CPU + GPU modes."
if self.use_cpu:
# Construct optimizer states
assert optimizer in (
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
OptimType.EXACT_SGD,
), f"Optimizer {optimizer} is not supported in CPU mode."
else:
assert optimizer in (
OptimType.ADAM,
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
OptimType.EXACT_SGD,
OptimType.LAMB,
OptimType.LARS_SGD,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.PARTIAL_ROWWISE_LAMB,
OptimType.NONE,
), f"Optimizer {optimizer} is not supported."
self.stochastic_rounding = stochastic_rounding
self.optimizer = optimizer
self.weight_decay_mode = weight_decay_mode
if (
weight_decay_mode == WeightDecayMode.COUNTER
and counter_based_regularization is None
):
raise AssertionError(
"weight_decay_mode is set to WeightDecayMode.COUNTER but counter_based_regularization is None"
)
if (
weight_decay_mode != WeightDecayMode.COUNTER
and counter_based_regularization is not None
):
raise AssertionError(
"Need to set weight_decay_mode to WeightDecayMode.COUNTER together with counter_based_regularization"
)
self._used_rowwise_adagrad_with_counter: bool = (
optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
and weight_decay_mode == WeightDecayMode.COUNTER
and counter_based_regularization is not None
)
if counter_based_regularization is None:
counter_based_regularization = CounterBasedRegularizationDefinition()
self._max_counter_update_freq: int = -1
if self._used_rowwise_adagrad_with_counter:
self._max_counter_update_freq = (
counter_based_regularization.max_counter_update_freq
)
opt_arg_weight_decay_mode = (
counter_based_regularization.counter_weight_decay_mode
)
else:
opt_arg_weight_decay_mode = weight_decay_mode
self.optimizer_args = invokers.lookup_args.OptimizerArgs(
stochastic_rounding=stochastic_rounding,
gradient_clipping=gradient_clipping,
max_gradient=max_gradient,
learning_rate=learning_rate,
eps=eps,
beta1=beta1,
beta2=beta2,
weight_decay=weight_decay,
weight_decay_mode=opt_arg_weight_decay_mode.value,
eta=eta,
momentum=momentum,
counter_halflife=counter_based_regularization.counter_halflife,
adjustment_iter=counter_based_regularization.adjustment_iter,
adjustment_ub=counter_based_regularization.adjustment_ub,
learning_rate_mode=counter_based_regularization.learning_rate_mode.value,
grad_sum_decay=counter_based_regularization.grad_sum_decay.value,
tail_id_threshold=counter_based_regularization.tail_id_threshold.val,
is_tail_id_thresh_ratio=int(
counter_based_regularization.tail_id_threshold.is_ratio
),
total_hash_size=self.total_hash_size,
)
if optimizer != OptimType.NONE:
if optimizer in (OptimType.EXACT_SGD,):
# NOTE: make TorchScript work!
self._register_nonpersistent_buffers("momentum1")
else:
rowwise = optimizer in [
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
]
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=rowwise,
cacheable=False,
placement=EmbeddingLocation.MANAGED
if ((not rowwise) and uvm_non_rowwise_momentum)
else None,
),
prefix="momentum1",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
enforce_hbm=enforce_hbm,
)
if optimizer in (
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
):
rowwise = optimizer in (
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.PARTIAL_ROWWISE_LAMB,
)
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=rowwise,
cacheable=False,
placement=EmbeddingLocation.MANAGED
if ((not rowwise) and uvm_non_rowwise_momentum)
else None,
),
prefix="momentum2",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
)
else:
# NOTE: make TorchScript work!
self._register_nonpersistent_buffers("momentum2")
if self._used_rowwise_adagrad_with_counter:
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=True,
cacheable=False,
),
prefix="prev_iter",
# TODO: ideally we should use int64 to track iter but it failed to compile.
# It may be related to low precision training code. Currently using float32
# as a workaround while investigating the issue.
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
)
self._apply_split(
construct_split_state(
embedding_specs,
rowwise=True,
cacheable=False,
),
prefix="row_counter",
# pyre-fixme[6]: Expected `Type[Type[torch._dtype]]` for 3rd param
# but got `Type[torch.float32]`.
dtype=torch.float32,
)
self.register_buffer(
"max_counter", torch.tensor([1], dtype=torch.float32)
)
else:
self._register_nonpersistent_buffers("prev_iter")
self._register_nonpersistent_buffers("row_counter")
self.register_buffer(
"max_counter",
torch.ones(1, dtype=torch.float32, device=self.current_device),
persistent=False,
)
if optimizer in (
OptimType.ADAM,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.PARTIAL_ROWWISE_LAMB,
):
self.register_buffer(
"iter",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
)
else:
self.register_buffer(
"iter",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
cache_state = construct_cache_state(rows, locations, self.feature_table_map)
# Add table-wise cache miss counter
if self.record_cache_metrics.record_tablewise_cache_miss:
num_tables = len(cache_state.cache_hash_size_cumsum) - 1
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
num_tables,
device=self.current_device,
dtype=torch.int64,
),
)
# NOTE: make TorchScript work!
else:
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
0,
device=self.current_device,
dtype=torch.int64,
),
)
if cache_precision == SparseType.FP32:
cache_embedding_dtype = torch.float32
elif cache_precision == SparseType.FP16:
cache_embedding_dtype = torch.float16
else:
raise AssertionError(f"cache_precision {cache_precision} not supported!")
self._apply_cache_state(
cache_state,
cache_algorithm,
cache_load_factor,
cache_sets,
cache_reserved_memory,
dtype=cache_embedding_dtype,
)
logging.info(
f"Using fused {optimizer} with optimizer_args={self.optimizer_args if optimizer != OptimType.NONE else None}\n"
f"Using rowwise_adagrad_with_counter={self._used_rowwise_adagrad_with_counter}"
)
self.step = 0
# Check whether to use TBE v2
is_experimental = False
fbgemm_exp_tbe = os.environ.get("FBGEMM_EXPERIMENTAL_TBE")
if use_experimental_tbe:
is_experimental = True
logging.info(
"use_experimental_tbe is set to True; Use experimental TBE: True"
)
elif fbgemm_exp_tbe is not None:
is_experimental = int(fbgemm_exp_tbe) == 1
logging.info(
f"FBGEMM_EXPERIMENTAL_TBE is set to {fbgemm_exp_tbe}; "
f"Use experimental TBE: {is_experimental}"
)
self.is_experimental: bool = is_experimental
def _register_nonpersistent_buffers(self, prefix: str) -> None:
# NOTE: make TorchScript work!
self.register_buffer(
f"{prefix}_dev",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
f"{prefix}_host",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
f"{prefix}_uvm",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
f"{prefix}_placements",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
f"{prefix}_offsets",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
def get_states(self, prefix: str) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
if not hasattr(self, f"{prefix}_physical_placements"):
raise DoesNotHavePrefix()
dev_param = getattr(self, f"{prefix}_dev")
host_param = getattr(self, f"{prefix}_host")
uvm_param = getattr(self, f"{prefix}_uvm")
placements = getattr(self, f"{prefix}_physical_placements")
offsets = getattr(self, f"{prefix}_physical_offsets")
return (
dev_param,
host_param,
uvm_param,
torch.tensor(placements, dtype=torch.int32),
torch.tensor(offsets, dtype=torch.int64),
)
def get_all_states(self) -> List[Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]]:
all_states = []
for prefix in ["weights", "momentum1", "momentum2", "prev_iter", "row_counter"]:
try:
all_states.append(self.get_states(prefix))
except DoesNotHavePrefix:
pass
return all_states
@torch.jit.export
def get_cache_miss_counter(self) -> Tensor:
# cache_miss_counter contains two items:
# The first one is cache_miss_forward_count which records the total number of forwards which has at least one cache miss
# The second one is the unique_cache_miss_count which records to total number of unique (dedup) cache misses
return self.cache_miss_counter
@torch.jit.export
def get_table_wise_cache_miss(self) -> Tensor:
# table_wise_cache_miss contains all the cache miss count for each table in this embedding table object:
return self.table_wise_cache_miss
def forward( # noqa: C901
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
# 2D tensor of batch size for each rank and feature.
# Shape (number of features, number of ranks)
batch_size_per_feature_per_rank: Optional[List[List[int]]] = None,
total_unique_indices: Optional[int] = None,
) -> Tensor:
if batch_size_per_feature_per_rank is not None:
assert (
self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
or self.optimizer == OptimType.EXACT_SGD
), "Variable batch size TBE support is enabled for OptimType.EXACT_ROWWISE_ADAGRAD only"
assert (
self.pooling_mode != PoolingMode.NONE.value
), "Variable batch size TBE support is not enabled for PoolingMode.NONE"
# TODO: Add input check
zero_tensor = torch.zeros(1, device="cpu", dtype=torch.int32)
# Create B offsets
total_batch_size_per_feature = torch.tensor(
[sum(batch_sizes) for batch_sizes in batch_size_per_feature_per_rank],
device="cpu",
dtype=torch.int32,
)
max_B = int(total_batch_size_per_feature.max().item())
Bs = torch.concat([zero_tensor, total_batch_size_per_feature])
B_offsets = Bs.cumsum(dim=0).to(torch.int)
# Create output offsets
B_feature_rank = torch.tensor(
batch_size_per_feature_per_rank,
device="cpu",
dtype=torch.int64,
)
max_B_feature_rank = int(B_feature_rank.max().item())
# D->H only once
self.feature_dims = self.feature_dims.cpu()
output_sizes_feature_rank = B_feature_rank.transpose(
0, 1
) * self.feature_dims.view(1, -1)
output_offsets_feature_rank = torch.concat(
[
zero_tensor.to(torch.int64),
output_sizes_feature_rank.flatten().cumsum(dim=0),
]
)
output_size = int(output_offsets_feature_rank[-1].item())
# TODO: Support INT8 output
# B_offsets_rank_per_feature is for rank and (b, t) mapping
B_offsets_rank_per_feature = (
torch.tensor(
[
[0] + batch_size_per_feature
for batch_size_per_feature in batch_size_per_feature_per_rank
],
device="cpu",
dtype=torch.int32,
)
.cumsum(dim=1)
.to(torch.int)
)
B_offsets = B_offsets.to(self.current_device, non_blocking=True)
output_offsets_feature_rank = output_offsets_feature_rank.to(
self.current_device, non_blocking=True
)
B_offsets_rank_per_feature = B_offsets_rank_per_feature.to(
self.current_device, non_blocking=True
)
# TODO: Use int32 for B_offsets and int64 for output_offsets_feature_rank
vbe_metadata = invokers.lookup_args.VBEMetadata(
B_offsets=B_offsets,
output_offsets_feature_rank=output_offsets_feature_rank,
B_offsets_rank_per_feature=B_offsets_rank_per_feature,
max_B=max_B,
max_B_feature_rank=max_B_feature_rank,
output_size=output_size,
)
else:
vbe_metadata = invokers.lookup_args.VBEMetadata(
B_offsets=None,
output_offsets_feature_rank=None,
B_offsets_rank_per_feature=None,
max_B=-1,
max_B_feature_rank=-1,
output_size=-1,
)
(indices, offsets) = indices.long(), offsets.long()
if self.bounds_check_mode_int != BoundsCheckMode.NONE.value:
torch.ops.fbgemm.bounds_check_indices(
self.rows_per_table,
indices,
offsets,
self.bounds_check_mode_int,
self.bounds_check_warning,
per_sample_weights,
B_offsets=vbe_metadata.B_offsets,
max_B=vbe_metadata.max_B,
)
self.step += 1
if len(self.timesteps_prefetched) == 0:
self._prefetch(indices, offsets)
self.timesteps_prefetched.pop(0)
self.lxu_cache_locations = (
self.lxu_cache_locations_empty
if len(self.lxu_cache_locations_list) == 0
else self.lxu_cache_locations_list.pop(0)
)
common_args = invokers.lookup_args.CommonArgs(
placeholder_autograd_tensor=self.placeholder_autograd_tensor,
dev_weights=self.weights_dev,
host_weights=self.weights_host,
uvm_weights=self.weights_uvm,
lxu_cache_weights=self.lxu_cache_weights,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_D=self.max_D,
hash_size_cumsum=self.hash_size_cumsum,
total_hash_size_bits=self.total_hash_size_bits,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
feature_requires_grad=feature_requires_grad,
lxu_cache_locations=self.lxu_cache_locations,
output_dtype=self.output_dtype,
vbe_metadata=vbe_metadata,
is_experimental=self.is_experimental,
)
if self.optimizer == OptimType.NONE:
assert (
total_unique_indices is not None
and total_unique_indices <= indices.numel()
), f"OptimType.NONE requires total_unique_indices. Please pass it or check the value (total_unique_indices = {total_unique_indices})"
return invokers.lookup_none.invoke(
common_args, self.optimizer_args, total_unique_indices
)
elif self.optimizer == OptimType.EXACT_SGD:
return invokers.lookup_sgd.invoke(common_args, self.optimizer_args)
momentum1 = invokers.lookup_args.Momentum(
dev=self.momentum1_dev,
host=self.momentum1_host,
uvm=self.momentum1_uvm,
offsets=self.momentum1_offsets,
placements=self.momentum1_placements,
)
if self.optimizer == OptimType.LARS_SGD:
return invokers.lookup_lars_sgd.invoke(
common_args, self.optimizer_args, momentum1
)
if self.optimizer == OptimType.EXACT_ADAGRAD:
return invokers.lookup_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
momentum2 = invokers.lookup_args.Momentum(
dev=self.momentum2_dev,
host=self.momentum2_host,
uvm=self.momentum2_uvm,
offsets=self.momentum2_offsets,
placements=self.momentum2_placements,
)
# Ensure iter is always on CPU so the increment doesn't synchronize.
if not self.iter.is_cpu:
self.iter = self.iter.cpu()
self.iter[0] += 1
if self.optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD:
return invokers.lookup_rowwise_weighted_adagrad.invoke(
common_args,
self.optimizer_args,
momentum1,
# pyre-fixme[6]: Expected `int` for 4th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.ADAM:
return invokers.lookup_adam.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.PARTIAL_ROWWISE_ADAM:
return invokers.lookup_partial_rowwise_adam.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.LAMB:
return invokers.lookup_lamb.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
if self.optimizer == OptimType.PARTIAL_ROWWISE_LAMB:
return invokers.lookup_partial_rowwise_lamb.invoke(
common_args,
self.optimizer_args,
momentum1,
momentum2,
# pyre-fixme[6]: Expected `int` for 5th param but got `Union[float,
# int]`.
self.iter.item(),
)
prev_iter = invokers.lookup_args.Momentum(
dev=self.prev_iter_dev,
host=self.prev_iter_host,
uvm=self.prev_iter_uvm,
offsets=self.prev_iter_offsets,
placements=self.prev_iter_placements,
)
row_counter = invokers.lookup_args.Momentum(
dev=self.row_counter_dev,
host=self.row_counter_host,
uvm=self.row_counter_uvm,
offsets=self.row_counter_offsets,
placements=self.row_counter_placements,
)
if self._used_rowwise_adagrad_with_counter:
if self.iter.item() % self._max_counter_update_freq == 0:
row_counter_dev = self.row_counter_dev.detach()
if row_counter_dev.numel() > 0:
self.max_counter[0] = torch.max(row_counter_dev).cpu().item() + 1
else:
self.max_counter[0] = 1
if self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD:
if self._used_rowwise_adagrad_with_counter:
return invokers.lookup_rowwise_adagrad_with_counter.invoke(
common_args,
self.optimizer_args,
momentum1,
prev_iter,
row_counter,
# pyre-fixme[6]: Expected `int` for 6th param but got `Union[float, int]`.
self.iter.item(),
self.max_counter.item(),
)
else:
return invokers.lookup_rowwise_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
raise ValueError(f"Invalid OptimType: {self.optimizer}")
def reset_uvm_cache_stats(self) -> None:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
self.uvm_cache_stats.zero_()
self.local_uvm_cache_stats.zero_()
def get_uvm_cache_stats(self) -> Tensor:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
return self.uvm_cache_stats
def print_uvm_cache_stats(self) -> None:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
uvm_cache_stats = self.uvm_cache_stats.tolist()
logging.info(
f"N_called: {uvm_cache_stats[0]}\n"
f"N_requested_indices: {uvm_cache_stats[1]}\n"
f"N_unique_indices: {uvm_cache_stats[2]}\n"
f"N_unique_misses: {uvm_cache_stats[3]}\n"
f"N_conflict_unique_misses: {uvm_cache_stats[4]}\n"
f"N_conflict_misses: {uvm_cache_stats[5]}\n"
)
if uvm_cache_stats[1]:
logging.info(
f"unique indices / requested indices: {uvm_cache_stats[2]/uvm_cache_stats[1]}\n"
f"unique misses / requested indices: {uvm_cache_stats[3]/uvm_cache_stats[1]}\n"
)
def prefetch(
self,
indices: Tensor,
offsets: Tensor,
forward_stream: Optional[torch.cuda.Stream] = None,
) -> None:
if self.prefetch_stream is None and forward_stream is not None:
self.prefetch_stream = torch.cuda.current_stream()
assert (
self.prefetch_stream != forward_stream
), "prefetch_stream and forward_stream should not be the same stream"
self._prefetch(indices, offsets)
if forward_stream is not None:
self._prefetch_tensors_record_stream(forward_stream)
def _prefetch(self, indices: Tensor, offsets: Tensor) -> None:
self.timestep += 1
self.timesteps_prefetched.append(self.timestep)
if not self.lxu_cache_weights.numel():
return
(indices, offsets) = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.cache_hash_size_cumsum,
indices,
offsets,
)
if (
self.record_cache_metrics.record_cache_miss_counter
or self.record_cache_metrics.record_tablewise_cache_miss
):
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
if self.record_cache_metrics.record_cache_miss_counter:
self._update_cache_miss_counter(
lxu_cache_locations, linear_cache_indices
)
if self.record_cache_metrics.record_tablewise_cache_miss:
self._update_tablewise_cache_miss(
lxu_cache_locations, linear_cache_indices, offsets
)
if self.cache_algorithm == CacheAlgorithm.LRU:
torch.ops.fbgemm.lru_cache_populate(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.timestep,
self.lxu_state,
self.stochastic_rounding,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
self.lock_cache_line,
self.lxu_cache_locking_counter,
)
elif self.cache_algorithm == CacheAlgorithm.LFU:
torch.ops.fbgemm.lfu_cache_populate(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.lxu_state,
self.stochastic_rounding,
)
assert (
len(self.lxu_cache_locations_list) < self.max_prefetch_depth
), f"self.lxu_cache_locations_list has grown to size: {len(self.lxu_cache_locations_list)}, this exceeds the maximum: {self.max_prefetch_depth}. This probably indicates an error in logic where prefetch() is being called more frequently than forward()"
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
self.lxu_cache_locations_list.append(lxu_cache_locations)
if self.prefetch_pipeline:
self.linear_cache_indices_list.append(linear_cache_indices)
if self.gather_uvm_cache_stats:
# Accumulate local_uvm_cache_stats (int32) into uvm_cache_stats (int64).
# We may wanna do this accumulation atomically, but as it's only for monitoring,
# slightly inaccurate result may be acceptable.
self.uvm_cache_stats = torch.add(
self.uvm_cache_stats, self.local_uvm_cache_stats
)
self.local_uvm_cache_stats.zero_()
def _prefetch_tensors_record_stream(
self, forward_stream: torch.cuda.Stream
) -> None:
# Record the tensors created by prefetch stream and consumed by forward/backward
# to the forward stream. In PyTorch, each backward CUDA op runs on the same
# stream that was used for its corresponding forward op.
for t in self.lxu_cache_locations_list:
# pyre-fixme[6]: For 1st param expected `_C.Stream` but got `streams.Stream`
t.record_stream(forward_stream)
for t in self.linear_cache_indices_list:
# pyre-fixme[6]: For 1st param expected `_C.Stream` but got `streams.Stream`
t.record_stream(forward_stream)
def _update_cache_miss_counter(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
) -> None:
CACHE_MISS = -1
CACHE_HIT = -2
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
unique_ids_list = torch.unique(cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.cache_miss_counter[0] += (miss_count > 0).to(torch.int64)
self.cache_miss_counter[1] += miss_count
def _update_tablewise_cache_miss(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
offsets: Tensor,
) -> None:
CACHE_MISS = -1
CACHE_HIT = -2
num_tables = len(self.cache_hash_size_cumsum) - 1
num_offsets_per_table = (len(offsets) - 1) // num_tables
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
for i in range(num_tables):
start = offsets[i * num_offsets_per_table]
end = offsets[(i + 1) * num_offsets_per_table]
current_cache_missed_locations = cache_missed_locations[start:end]
unique_ids_list = torch.unique(current_cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.table_wise_cache_miss[i] += miss_count
def init_embedding_weights_uniform(self, min_val: float, max_val: float) -> None:
splits = self.split_embedding_weights()
if self.weights_precision == SparseType.INT8:
# TODO: add in-place FloatToFused8BitRowwiseQuantized conversion
for emb in splits:
assert (
len(emb.shape) == 2
), "Int8 embedding only supported for 2D weight tensors."
shape = [emb.shape[0], emb.shape[1] - self.int8_emb_row_dim_offset]
tmp_emb = torch.zeros(shape, device=self.current_device)
tmp_emb.uniform_(min_val, max_val)
tmp_emb_i8 = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(tmp_emb)
emb.data.copy_(tmp_emb_i8)
else:
for param in splits:
param.uniform_(min_val, max_val)
@torch.jit.ignore
def split_embedding_weights(self) -> List[Tensor]:
"""
Returns a list of weights, split by table
"""
splits = []
for t, (rows, dim, _, _) in enumerate(self.embedding_specs):
if self.weights_precision == SparseType.INT8:
dim += self.int8_emb_row_dim_offset
placement = self.weights_physical_placements[t]
offset = self.weights_physical_offsets[t]
if placement == EmbeddingLocation.DEVICE.value:
weights = self.weights_dev
elif placement == EmbeddingLocation.HOST.value:
weights = self.weights_host
else:
weights = self.weights_uvm
if weights.dim() == 2:
weights = weights.flatten()
splits.append(
weights.detach()[offset : offset + rows * dim].view(rows, dim)
)
return splits
@torch.jit.ignore
def get_optimizer_buffer(self, state: str) -> torch.Tensor:
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Getting optimizer buffer is not supported for {self.optimizer}"
)
for name, buffer in self.named_buffers():
if name == state:
return buffer
return torch.tensor(0)
@torch.jit.export
def get_optimizer_state(self) -> List[Dict[str, torch.Tensor]]:
r"""
Get the optimizer state dict that matches the OSS Pytorch optims
TODO: populate the supported list of optimizers
"""
split_optimizer_states = self.split_optimizer_states()
if (
self.optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
or self.optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD
or self.optimizer == OptimType.EXACT_ADAGRAD
):
list_of_state_dict = [
{"sum": states[0], "prev_iter": states[1], "row_counter": states[2]}
if self._used_rowwise_adagrad_with_counter
else {"sum": states[0]}
for states in split_optimizer_states
]
elif self.optimizer == OptimType.SGD or self.optimizer == OptimType.EXACT_SGD:
list_of_state_dict = [
{"momentum_buffer": states[0]} for states in split_optimizer_states
]
elif (
self.optimizer == OptimType.ADAM
or self.optimizer == OptimType.PARTIAL_ROWWISE_ADAM
or self.optimizer == OptimType.LAMB
or self.optimizer == OptimType.PARTIAL_ROWWISE_LAMB
):
list_of_state_dict = [
{"exp_avg": states[0], "exp_avg_sq": states[1]}
for states in split_optimizer_states
]
else:
raise NotImplementedError(
f"Getting optimizer state {self.optimizer} is not implmeneted"
)
return list_of_state_dict
@torch.jit.ignore
def split_optimizer_states(
self,
) -> List[List[torch.Tensor]]:
"""
Returns a list of states, split by table
"""
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Getting optimizer states is not supported for {self.optimizer}"
)
def get_optimizer_states(
state_dev: Tensor,
state_host: Tensor,
state_uvm: Tensor,
state_offsets: Tensor,
state_placements: Tensor,
rowwise: bool,
) -> List[torch.Tensor]:
splits = []
for t, (rows, dim, _, _) in enumerate(self.embedding_specs):
offset = state_offsets[t]
placement = state_placements[t]
if placement == EmbeddingLocation.DEVICE:
state = state_dev
elif placement == EmbeddingLocation.HOST:
state = state_host
else:
state = state_uvm
if not rowwise:
splits.append(
state.detach()[offset : offset + rows * dim].view(rows, dim)
)
else:
splits.append(state.detach()[offset : offset + rows].view(rows))
return splits
states: List[List[torch.Tensor]] = []
if self.optimizer not in (OptimType.EXACT_SGD,):
states.append(
get_optimizer_states(
self.momentum1_dev,
self.momentum1_host,
self.momentum1_uvm,
self.momentum1_physical_offsets,
self.momentum1_physical_placements,
rowwise=self.optimizer
in [
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
],
)
)
if self.optimizer in (
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
):
states.append(
get_optimizer_states(
self.momentum2_dev,
self.momentum2_host,
self.momentum2_uvm,
self.momentum2_physical_offsets,
self.momentum2_physical_placements,
rowwise=self.optimizer
in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.PARTIAL_ROWWISE_LAMB),
)
)
if self._used_rowwise_adagrad_with_counter:
states.append(
get_optimizer_states(
self.prev_iter_dev,
self.prev_iter_host,
self.prev_iter_uvm,
self.prev_iter_physical_offsets,
self.prev_iter_physical_placements,
rowwise=True,
)
)
states.append(
get_optimizer_states(
self.row_counter_dev,
self.row_counter_host,
self.row_counter_uvm,
self.row_counter_physical_offsets,
self.row_counter_physical_placements,
rowwise=True,
)
)
return_states = [list(s) for s in zip(*states)]
return return_states
@torch.jit.export
def set_learning_rate(self, lr: float) -> None:
"""
Sets the learning rate.
"""
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Setting learning rate is not supported for {self.optimizer}"
)
self._set_learning_rate(lr)
@torch.jit.ignore
def _set_learning_rate(self, lr: float) -> float:
"""
Helper function to script `set_learning_rate`.
Note that returning None does not work.
"""
self.optimizer_args = self.optimizer_args._replace(learning_rate=lr)
return 0.0
@torch.jit.export
def set_optimizer_step(self, step: int) -> None:
"""
Sets the optimizer step.
"""
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Setting optimizer step is not supported for {self.optimizer}"
)
self.iter[0] = step
@torch.jit.export
def flush(self) -> None:
if not self.lxu_cache_weights.numel():
return
torch.ops.fbgemm.lxu_cache_flush(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.cache_index_table_map,
self.weights_offsets,
self.D_offsets,
self.total_D,
self.lxu_cache_state,
self.lxu_cache_weights,
self.stochastic_rounding,
)
def _apply_split(
self,
split: SplitState,
prefix: str,
dtype: Type[torch.dtype],
enforce_hbm: bool = False,
make_dev_param: bool = False,
dev_reshape: Optional[Tuple[int, ...]] = None,
) -> None:
apply_split_helper(
self.register_buffer,
functools.partial(setattr, self),
self.current_device,
self.use_cpu,
self.feature_table_map,
split,
prefix,
dtype,
enforce_hbm,
make_dev_param,
dev_reshape,
)
def _apply_cache_state(
self,
cache_state: CacheState,
cache_algorithm: CacheAlgorithm,
cache_load_factor: float,
cache_sets: int,
cache_reserved_memory: float,
dtype: torch.dtype,
) -> None:
self.cache_algorithm = cache_algorithm
self.timestep = 1
self.timesteps_prefetched = []
self.max_prefetch_depth = MAX_PREFETCH_DEPTH
self.lxu_cache_locations_list = []
self.lxu_cache_locations_empty = torch.empty(
0, device=self.current_device, dtype=torch.int32
).fill_(-1)
self.lxu_cache_locations = self.lxu_cache_locations_empty
self.prefetch_stream: Optional[torch.cuda.Stream] = None
self.linear_cache_indices_list = []
self._init_uvm_cache_stats()
# NOTE: no cache for CPU mode!
if cache_state.total_cache_hash_size == 0 or self.use_cpu:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(0, 0, device=self.current_device, dtype=dtype),
)
# NOTE: make TorchScript work!
self.register_buffer(
"cache_hash_size_cumsum",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"total_cache_hash_size",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_index_table_map",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0], dtype=torch.int64),
persistent=False,
)
self._init_uvm_cache_counter(cache_sets, persistent=False)
return
assert cache_load_factor > 0
element_size = 2 if dtype == torch.float16 else 4
if cache_sets <= 0:
total_memory = torch.cuda.get_device_properties(
self.current_device
).total_memory
free_memory = (
total_memory
- torch.cuda.memory_reserved(self.current_device)
- int(cache_reserved_memory)
)
assert free_memory > 0
cache_sets = (
int(cache_state.total_cache_hash_size * cache_load_factor)
+ DEFAULT_ASSOC
- 1
) // DEFAULT_ASSOC
cache_sets = 1 if cache_sets == 0 else cache_sets
cache_size = cache_sets * DEFAULT_ASSOC * element_size * self.max_D_cache
if cache_size > free_memory:
cache_sets = (
int(1.0 * free_memory / self.max_D_cache / element_size)
+ DEFAULT_ASSOC
- 1
) // DEFAULT_ASSOC
cache_load_factor = (
1.0 * cache_sets * DEFAULT_ASSOC / int(cache_state.total_cache_hash_size)
)
assert cache_sets > 0
if cache_algorithm == CacheAlgorithm.LFU:
assert cache_sets < 2**24 - 1
cache_size = cache_sets * DEFAULT_ASSOC * element_size * self.max_D_cache
logging.info(
f"Using on-device cache with admission algorithm "
f"{cache_algorithm}, {cache_sets} sets, "
f"load_factor: {cache_load_factor : .3f}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.total_cache_hash_size = cache_state.total_cache_hash_size
self.register_buffer(
"cache_hash_size_cumsum",
torch.tensor(
cache_state.cache_hash_size_cumsum,
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"cache_index_table_map",
torch.tensor(
cache_state.cache_index_table_map,
device=self.current_device,
dtype=torch.int32,
),
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(
cache_sets, DEFAULT_ASSOC, device=self.current_device, dtype=torch.int64
).fill_(-1),
)
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * DEFAULT_ASSOC,
self.max_D_cache,
device=self.current_device,
dtype=dtype,
),
)
self.register_buffer(
"lxu_state",
torch.zeros(
size=(self.total_cache_hash_size + 1,)
if cache_algorithm == CacheAlgorithm.LFU
else (cache_sets, DEFAULT_ASSOC),
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0], device=self.current_device, dtype=torch.int64),
)
self._init_uvm_cache_counter(cache_sets, persistent=True)
if self.prefetch_pipeline:
# using the placeholder_autograd_tensor to make sure
# the hook is executed after the backward pass
# not using register_module_full_backward_hook
# due to https://github.com/pytorch/pytorch/issues/100528
self.placeholder_autograd_tensor.register_hook(
self._sync_stream_post_backward
)
self.register_full_backward_pre_hook(
self._update_cache_counter_and_locations
)
if cache_algorithm not in (CacheAlgorithm.LFU, CacheAlgorithm.LRU):
raise ValueError(
f"cache_algorithm must be {CacheAlgorithm.LRU} "
f"or {CacheAlgorithm.LFU}"
)
def _sync_stream_post_backward(
self,
grad: Tensor,
) -> None:
"""
backward hook function when prefetch_pipeline is enabled.
With the pipeline, prefetch(batch_{i+2}) may overlap with backward(batch_{i}).
There is race condition that backward(batch_i) writes to UVM memory and
at the same time prefetch(batch_{i+2}) loads UVM memory to cache. This stream sync forces
backward(batch_i) to finish before prefetch(batch_{i+2}).
"""
if self.prefetch_stream is not None:
self.prefetch_stream.wait_stream(torch.cuda.current_stream())
def _update_cache_counter_and_locations(
self,
module: nn.Module,
grad_input: Union[Tuple[Tensor, ...], Tensor],
) -> None:
"""
Backward prehook function when prefetch_pipeline is enabled.
This function does 3 things:
1. backward stream waits for prefetch stream to finish.
Otherwise the prefetch(batch_{i+1}) might overlap with backward(batch_i).
If an idx is not in cache in batch_i, but it is being inserted in batch_{i+1},
there is race condition that backward(batch_i) writes to UVM memory and
at the same time prefetch(batch_{i+1}) loads UVM memory to cache.
2. decrement the lxu_cache_locking_counter to indicate the current batch is finished.
The lxu_cache_locking_counter is updated in both prefetch and TBE backward.
As there is no overlap between prefetch and backward, we can decrement either before or
after backward. It's better to decrement before lxu_cache_locations gets updated.
3. update lxu_cache_locations to address the cache inconsistency issue.
In the case that the same index is not inserted into cache in batch_i,
but it is inserted in batch_{i+1}, the cache can be invalid in
the sense that the cached weight for this index does not have the
backward update of batch_i.
Example of the issue is as follows:
idx is in batch_i, batch_{i+1}
prefetch(batch_i)
- failed to insert idx into cache, cache_locations_batch_i of idx is -1 (cache miss)
forward(batch_i)
prefetch(batch_{i+1})
- insert idx into cache, cache is loaded from host memory
backward(batch_i)
- cache_locations_batch_i of idx is -1, the host memory is updated
forward(batch_{i+1})
- OUTPUT IS WRONG. the weight for idx is fetched from cache, but the cache is outdated.
The fix to this cache inconsistency is to update the cache_locations_batch_i before backward of batch_i,
so that the cache gets updated correctly by the backward pass of TBE.
"""
if self.prefetch_stream is not None:
# need to wait for the prefetch of next batch,
# so that cache states are valid
torch.cuda.current_stream().wait_stream(self.prefetch_stream)
torch.ops.fbgemm.lxu_cache_locking_counter_decrement(
self.lxu_cache_locking_counter,
self.lxu_cache_locations,
)
linear_cache_indices = self.linear_cache_indices_list.pop(0)
lxu_cache_locations_new = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
False, # not collecting cache stats
self.local_uvm_cache_stats,
)
# self.lxu_cache_locations is updated inplace
torch.ops.fbgemm.lxu_cache_locations_update(
self.lxu_cache_locations,
lxu_cache_locations_new,
)
def _init_uvm_cache_counter(self, cache_sets: int, persistent: bool) -> None:
if self.prefetch_pipeline and persistent:
self.register_buffer(
"lxu_cache_locking_counter",
torch.zeros(
cache_sets,
DEFAULT_ASSOC,
device=self.current_device,
dtype=torch.int32,
),
)
else:
self.register_buffer(
"lxu_cache_locking_counter",
torch.zeros([0, 0], dtype=torch.int32, device=self.current_device),
persistent=persistent,
)
def _init_uvm_cache_stats(self) -> None:
if not self.gather_uvm_cache_stats:
# If uvm_cache_stats is not enabled, register stub entries via buffer to state_dict for TorchScript to JIT properly.
# Since we're not using these variables, we can choose minimize tensor size to keep state_dict size small.
self.register_buffer(
"uvm_cache_stats",
torch.zeros(
1,
device=self.current_device,
dtype=torch.int64,
),
persistent=False,
)
self.register_buffer(
"local_uvm_cache_stats",
torch.zeros(
1,
device=self.current_device,
dtype=torch.int32,
),
persistent=False,
)
else:
self.register_buffer(
"uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"local_uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int32,
),
)
self.reset_uvm_cache_stats()
def reset_cache_states(self) -> None:
if not self.lxu_cache_weights.numel():
return
self.lxu_cache_state.fill_(-1)
self.lxu_state.fill_(0)
self.timestep = 1
def reset_embedding_weight_momentum(
self,
pruned_indices: Tensor,
pruned_indices_offsets: Tensor,
logical_table_ids: Tensor,
buffer_ids: Tensor,
) -> None:
if self.optimizer == OptimType.NONE:
raise NotImplementedError(
f"Resetting embedding weight momentum is not supported for {self.optimizer}"
)
total_cache_hash_size = 0
if isinstance(self.total_cache_hash_size, Tensor):
total_cache_hash_size = self.total_cache_hash_size.item()
else:
total_cache_hash_size = self.total_cache_hash_size
rowwise = self.optimizer in [
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
]
if rowwise:
torch.ops.fbgemm.reset_weight_momentum(
dev_weights=self.weights_dev,
uvm_weights=self.weights_uvm,
lxu_cache_weights=self.lxu_cache_weights,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
momentum1_dev=self.momentum1_dev,
momentum1_uvm=self.momentum1_uvm,
momentum1_placements=self.momentum1_placements,
momentum1_offsets=self.momentum1_offsets,
D_offsets=self.D_offsets,
pruned_indices=pruned_indices.to(device=self.current_device),
pruned_indices_offsets=pruned_indices_offsets.to(
device=self.current_device
),
logical_table_ids=logical_table_ids.to(device=self.current_device),
buffer_ids=buffer_ids.to(device=self.current_device),
cache_hash_size_cumsum=self.cache_hash_size_cumsum,
lxu_cache_state=self.lxu_cache_state,
total_cache_hash_size=total_cache_hash_size,
)
class DenseTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Table-batched version of nn.EmbeddingBag(sparse=False)
"""
weights: Tensor
weights_offsets: Tensor
D_offsets: Tensor
total_D: int
max_D: int
hash_size_cumsum: Tensor
total_hash_size_bits: int
embedding_specs: List[Tuple[int, int]]
def __init__(
self,
embedding_specs: List[Tuple[int, int]], # tuple of (rows, dims)
feature_table_map: Optional[List[int]] = None, # [T]
weights_precision: SparseType = SparseType.FP32,
pooling_mode: PoolingMode = PoolingMode.SUM,
use_cpu: bool = False,
output_dtype: SparseType = SparseType.FP32,
) -> None: # noqa C901 # tuple of (rows, dims,)
super(DenseTableBatchedEmbeddingBagsCodegen, self).__init__()
self.pooling_mode = pooling_mode
self.weights_precision = weights_precision
self.output_dtype: int = output_dtype.as_int()
table_embedding_dtype = weights_precision.as_dtype()
self.use_cpu = use_cpu
if self.use_cpu or self.pooling_mode == PoolingMode.NONE:
assert output_dtype in [
SparseType.FP32,
SparseType.FP16,
SparseType.BF16,
], "Fused pooled embedding quantization only supported for cuda."
# pyre-fixme[8]: Attribute has type `device`; used as `Union[int, device]`.
self.current_device: torch.device = (
torch.device("cpu") if self.use_cpu else torch.cuda.current_device()
)
self.embedding_specs = embedding_specs
(rows, dims) = zip(*embedding_specs)
T_ = len(self.embedding_specs)
assert T_ > 0
feature_table_map = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(feature_table_map)
assert T_ <= T
D_offsets = [dims[t] for t in feature_table_map]
D_offsets = [0] + list(accumulate(D_offsets))
self.total_D = D_offsets[-1]
self.max_D = max(dims)
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
assert self.D_offsets.numel() == T + 1
hash_size_cumsum = [0] + list(accumulate(rows))
if hash_size_cumsum[-1] == 0:
self.total_hash_size_bits: int = 0
else:
self.total_hash_size_bits: int = int(log2(float(hash_size_cumsum[-1])) + 1)
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in feature_table_map] + [
hash_size_cumsum[-1]
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
weights_offsets = [0] + list(
accumulate([row * dim for (row, dim) in embedding_specs])
)
self.weights = nn.Parameter(
torch.randn(
weights_offsets[-1],
device=self.current_device,
dtype=table_embedding_dtype,
)
)
for feature in range(T):
t = feature_table_map[feature]
row, dim = embedding_specs[t]
if (
self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()
!= row * dim
):
logging.info(
f"row {row} dim {dim} feature {feature} t {t} {self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()}"
)
assert (
self.weights[weights_offsets[t] : weights_offsets[t + 1]].numel()
== row * dim
)
assert self.hash_size_cumsum[feature] == sum(
row for (row, _) in embedding_specs[:t]
)
self.weights_physical_offsets: List[int] = weights_offsets
weights_offsets = [weights_offsets[t] for t in feature_table_map]
self.register_buffer(
"weights_offsets",
torch.tensor(
weights_offsets, device=self.current_device, dtype=torch.int64
),
)
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
(indices, offsets) = indices.long(), offsets.long()
return torch.ops.fbgemm.dense_embedding_codegen_lookup_function(
dev_weights=self.weights,
weights_offsets=self.weights_offsets,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_D=self.max_D,
hash_size_cumsum=self.hash_size_cumsum,
total_hash_size_bits=self.total_hash_size_bits,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
feature_requires_grad=feature_requires_grad,
output_dtype=self.output_dtype,
)
@torch.jit.export
def split_embedding_weights(self) -> List[Tensor]:
"""
Returns a list of weights, split by table
"""
splits = []
for t, (rows, dim) in enumerate(self.embedding_specs):
offset = self.weights_physical_offsets[t]
splits.append(
self.weights.detach()[offset : offset + rows * dim].view(rows, dim)
)
return splits
def init_embedding_weights_uniform(self, min_val: float, max_val: float) -> None:
splits = self.split_embedding_weights()
for param in splits:
param.uniform_(min_val, max_val)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import logging
from itertools import accumulate
from typing import List, Optional, Tuple, Union
import torch # usort:skip
from torch import nn, Tensor # usort:skip
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
CacheState,
construct_cache_state,
DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
EmbeddingLocation,
MAX_PREFETCH_DEPTH,
PoolingMode,
RecordCacheMetrics,
round_up,
SplitState,
)
try:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cuda_inference"
)
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu_inference"
)
except Exception:
pass
def rounded_row_size_in_bytes(
dim: int,
weight_ty: SparseType,
row_alignment: int,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
) -> int:
r = unpadded_row_size_in_bytes(dim, weight_ty, scale_bias_size_in_bytes)
# align each row to 16-byte boundaries.
return round_up(r, row_alignment)
def unpadded_row_size_in_bytes(
dim: int,
weight_ty: SparseType,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
) -> int:
r = {
SparseType.FP32.value: dim * 4,
SparseType.FP16.value: dim * 2,
SparseType.FP8.value: dim,
SparseType.INT8.value: dim + scale_bias_size_in_bytes,
SparseType.INT4.value: dim // 2 + scale_bias_size_in_bytes,
SparseType.INT2.value: dim // 4 + scale_bias_size_in_bytes,
}[weight_ty.value]
return r
def align_to_cacheline(a: int) -> int:
# align each table to 128b cache line boundary.
return round_up(a, 128)
def nbit_construct_split_state(
embedding_specs: List[Tuple[str, int, int, SparseType, EmbeddingLocation]],
cacheable: bool,
row_alignment: int,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
cacheline_alignment: bool = True,
) -> SplitState:
placements = torch.jit.annotate(List[EmbeddingLocation], [])
offsets = torch.jit.annotate(List[int], [])
dev_size = 0
host_size = 0
uvm_size = 0
for _, num_embeddings, embedding_dim, weight_ty, location in embedding_specs:
embedding_dim = rounded_row_size_in_bytes(
embedding_dim, weight_ty, row_alignment, scale_bias_size_in_bytes
)
state_size = num_embeddings * embedding_dim
if cacheline_alignment:
state_size = align_to_cacheline(state_size)
if location == EmbeddingLocation.HOST:
placements.append(EmbeddingLocation.HOST)
offsets.append(host_size)
host_size += state_size
elif location == EmbeddingLocation.DEVICE:
placements.append(EmbeddingLocation.DEVICE)
offsets.append(dev_size)
dev_size += state_size
else:
if cacheable and location == EmbeddingLocation.MANAGED_CACHING:
placements.append(EmbeddingLocation.MANAGED_CACHING)
else:
placements.append(EmbeddingLocation.MANAGED)
offsets.append(uvm_size)
uvm_size += state_size
assert len(placements) == len(offsets)
return SplitState(
dev_size=dev_size,
host_size=host_size,
uvm_size=uvm_size,
placements=placements,
offsets=offsets,
)
def random_quant_scaled_tensor(shape: torch.Size, device: torch.device) -> torch.Tensor:
return torch.randint(
0,
255,
size=shape,
dtype=torch.uint8,
device=device,
)
# pyre-fixme[13]: Attribute `cache_miss_counter` is never initialized.
class IntNBitTableBatchedEmbeddingBagsCodegen(nn.Module):
"""
Table-batched version of nn.EmbeddingBag(sparse=False)
Inference version, with FP32/FP16/FP8/INT8/INT4/INT2 supports
"""
embedding_specs: List[Tuple[str, int, int, SparseType, EmbeddingLocation]]
record_cache_metrics: RecordCacheMetrics
cache_miss_counter: torch.Tensor
uvm_cache_stats: torch.Tensor
local_uvm_cache_stats: torch.Tensor
weights_offsets: torch.Tensor
weights_placements: torch.Tensor
def __init__( # noqa C901
self,
embedding_specs: List[
Tuple[str, int, int, SparseType, EmbeddingLocation]
], # tuple of (feature_names, rows, dims, SparseType, EmbeddingLocation/placement)
feature_table_map: Optional[List[int]] = None, # [T]
index_remapping: Optional[List[Tensor]] = None,
pooling_mode: PoolingMode = PoolingMode.SUM,
device: Optional[Union[str, int, torch.device]] = None,
bounds_check_mode: BoundsCheckMode = BoundsCheckMode.WARNING,
weight_lists: Optional[List[Tuple[Tensor, Optional[Tensor]]]] = None,
pruning_hash_load_factor: float = 0.5,
use_array_for_index_remapping: bool = True,
output_dtype: SparseType = SparseType.FP16,
cache_algorithm: CacheAlgorithm = CacheAlgorithm.LRU,
cache_load_factor: float = 0.2,
cache_sets: int = 0,
cache_reserved_memory: float = 0.0,
enforce_hbm: bool = False, # place all weights/momentums in HBM when using cache
record_cache_metrics: Optional[RecordCacheMetrics] = None,
gather_uvm_cache_stats: Optional[bool] = False,
row_alignment: Optional[int] = None,
fp8_exponent_bits: Optional[int] = None,
fp8_exponent_bias: Optional[int] = None,
cache_assoc: int = 32,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
cacheline_alignment: bool = True,
uvm_host_mapped: bool = False, # True to use cudaHostAlloc; False to use cudaMallocManaged.
) -> None: # noqa C901 # tuple of (rows, dims,)
super(IntNBitTableBatchedEmbeddingBagsCodegen, self).__init__()
# 64 for AMD
if cache_assoc == 32 and torch.version.hip is not None:
cache_assoc = 64
if device is None:
self.current_device: torch.device = torch.device(
torch.cuda.current_device()
)
elif isinstance(device, torch.device):
self.current_device = device
else:
self.current_device = torch.device(device)
self.use_cpu: bool = self.current_device.type == "cpu"
self.scale_bias_size_in_bytes = scale_bias_size_in_bytes
self.pooling_mode = pooling_mode
self.bounds_check_mode_int: int = bounds_check_mode.value
self.embedding_specs = embedding_specs
self.output_dtype: int = output_dtype.as_int()
self.uvm_host_mapped = uvm_host_mapped
# (feature_names, rows, dims, weights_tys, locations) = zip(*embedding_specs)
# Pyre workaround
self.feature_names: List[str] = [e[0] for e in embedding_specs]
rows: List[int] = [e[1] for e in embedding_specs]
dims: List[int] = [e[2] for e in embedding_specs]
weights_tys: List[SparseType] = [e[3] for e in embedding_specs]
locations: List[EmbeddingLocation] = [e[4] for e in embedding_specs]
# if target device is meta then we set use_cpu based on the embedding location
# information in embedding_specs.
if self.current_device.type == "meta":
self.use_cpu = all(loc == EmbeddingLocation.HOST for loc in locations)
if row_alignment is None:
self.row_alignment: int = 1 if self.use_cpu else 16
else:
self.row_alignment = row_alignment
if record_cache_metrics is not None:
self.record_cache_metrics = record_cache_metrics
else:
self.record_cache_metrics = RecordCacheMetrics(False, False)
self.gather_uvm_cache_stats = gather_uvm_cache_stats
# Define the size of uvm cache stats as class variable
# to make it work with torch jit script.
self.uvm_cache_stats_size = 6
# 0: N_calls, 1: N_requested_indices, 2: N_unique_indices, 3: N_unique_misses,
# 4: N_conflict_unique_misses, 5: N_conflict_misses
# mixed D is not supported by no bag kernels
mixed_D = not all(d == dims[0] for d in dims)
if mixed_D:
assert (
self.pooling_mode != PoolingMode.NONE
), "Mixed dimension tables are only supported for pooling tables."
assert not self.use_cpu or all(
loc == EmbeddingLocation.HOST for loc in locations
), "CPU device requires EmbeddingLocation.HOST for location!"
assert self.use_cpu or all(
loc != EmbeddingLocation.HOST for loc in locations
), "EmbeddingLocation.HOST doesn't work for CUDA device!"
T_ = len(self.embedding_specs)
assert T_ > 0
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
D_offsets = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(accumulate(D_offsets))
self.total_D: int = D_offsets[-1]
for dim, weight_ty in zip(dims, weights_tys):
if not weight_ty.is_float():
assert (
dim % (8 / weight_ty.bit_rate()) == 0
), f"For quantized types we need to at least pack at byte granularity, dim: {dim}, weight_ty: {weight_ty}"
def max_ty_D(ty: SparseType) -> int:
return max(
[dim for dim, weight_ty in zip(dims, weights_tys) if weight_ty == ty],
default=0,
)
self.max_int2_D: int = max_ty_D(SparseType.INT2)
self.max_int4_D: int = max_ty_D(SparseType.INT4)
self.max_int8_D: int = max_ty_D(SparseType.INT8)
self.max_float8_D: int = max_ty_D(SparseType.FP8)
self.max_float16_D: int = max_ty_D(SparseType.FP16)
self.max_float32_D: int = max_ty_D(SparseType.FP32)
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
assert self.D_offsets.numel() == T + 1
self.register_buffer(
"rows_per_table",
torch.tensor(
[rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"bounds_check_warning",
torch.tensor([0], device=self.current_device, dtype=torch.int64),
)
weights_tys_int = [weights_tys[t].as_int() for t in self.feature_table_map]
self.register_buffer(
"weights_tys",
torch.tensor(
weights_tys_int, device=self.current_device, dtype=torch.uint8
),
)
self.weight_initialized: bool = False
self.weights_dev: torch.Tensor = torch.zeros(
0,
device=self.current_device,
dtype=torch.uint8,
)
self.weights_host: torch.Tensor = torch.zeros(
0, device=self.current_device, dtype=torch.uint8
)
self.weights_uvm: torch.Tensor = torch.empty(0, dtype=torch.uint8).to(
self.current_device
)
cached_dims = [
rounded_row_size_in_bytes(
embedding_spec[2], embedding_spec[3], 16, self.scale_bias_size_in_bytes
)
for embedding_spec in self.embedding_specs
if embedding_spec[4] == EmbeddingLocation.MANAGED_CACHING
]
self.max_D_cache: int = max(cached_dims) if len(cached_dims) > 0 else 0
self.initialize_physical_weights_placements_and_offsets(cacheline_alignment)
self.enforce_hbm: bool = enforce_hbm
# Assign weights after weights and weights_offsets are initialized.
if weight_lists:
self._apply_split(
self.dev_size,
self.host_size,
self.uvm_size,
self.weights_physical_placements,
self.weights_physical_offsets,
self.enforce_hbm,
)
self.assign_embedding_weights(weight_lists)
# Handle index remapping for embedding pruning.
self.register_buffer(
"index_remappings_array_offsets",
torch.empty(0, device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
"index_remappings_array",
torch.empty(0, device=self.current_device, dtype=torch.int32),
)
self.register_buffer(
"index_remapping_hash_table_offsets",
torch.empty(0, device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
"index_remapping_hash_table",
torch.empty(0, device=self.current_device, dtype=torch.int32),
)
self.register_buffer(
"original_rows_per_table",
torch.empty(0, device=self.current_device, dtype=torch.int64),
)
# pyre-fixme[4]: Attribute must be annotated.
self.index_remapping_hash_table_cpu = None
if index_remapping:
self.set_index_remappings(
index_remapping, pruning_hash_load_factor, use_array_for_index_remapping
)
# Currently only support cache_precision == embedding_precision.
# Both are represented as uint8_t
cache_state = construct_cache_state(rows, locations, self.feature_table_map)
if self.record_cache_metrics.record_tablewise_cache_miss:
num_tables = len(cache_state.cache_hash_size_cumsum) - 1
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
num_tables,
device=self.current_device,
dtype=torch.int64,
),
)
# NOTE: make TorchScript work!
else:
self.register_buffer(
"table_wise_cache_miss",
torch.zeros(
0,
device=self.current_device,
dtype=torch.int64,
),
)
self.cache_assoc = cache_assoc
self._apply_cache_state(
cache_state,
cache_algorithm,
cache_load_factor,
cache_sets,
cache_reserved_memory,
)
if self.max_float8_D > 0:
default_config = SparseType.FP8.default_config()
self.fp8_exponent_bits: int = (
default_config.get("exponent_bits")
if fp8_exponent_bits is None
else fp8_exponent_bits
)
self.fp8_exponent_bias: int = (
default_config.get("exponent_bias")
if fp8_exponent_bias is None
else fp8_exponent_bias
)
else:
self.fp8_exponent_bits = -1
self.fp8_exponent_bias = -1
def get_cache_miss_counter(self) -> Tensor:
# cache_miss_counter[0]: cache_miss_forward_count which records the total number of forwards which has at least one cache miss
# cache_miss_counter[1]: unique_cache_miss_count which records to total number of unique (dedup) cache misses
# cache_miss_counter[2]: total number of unique (dedup) access count
# cache_miss_counter[3]: total number of non-dedup access count
# How to get cache miss ratio
# cache miss ratio (# of missed entries / # of unique requests): ( cache_miss_counter[1] / cache_miss_counter[2] )
# cache miss ratio (# of missed entries / # of total access): ( cache_miss_counter[1] / cache_miss_counter[3] )
assert (
self.record_cache_metrics.record_cache_miss_counter
), "record_cache_miss_counter should be true to access counter values"
return self.cache_miss_counter
@torch.jit.export
def get_table_wise_cache_miss(self) -> Tensor:
assert (
self.record_cache_metrics.record_tablewise_cache_miss
), "record_tablewise_cache_miss should be true to access counter values"
# table_wise_cache_miss contains all the cache miss count for each table in this embedding table object:
return self.table_wise_cache_miss
def reset_cache_miss_counter(self) -> None:
assert (
self.record_cache_metrics.record_cache_miss_counter
), "record_cache_miss_counter should be true to access counter values"
self.cache_miss_counter = torch.tensor(
[0, 0, 0, 0], device=self.current_device, dtype=torch.int64
)
def reset_uvm_cache_stats(self) -> None:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
self.uvm_cache_stats.zero_()
self.local_uvm_cache_stats.zero_()
def print_cache_miss_counter(self) -> None:
assert (
self.record_cache_metrics.record_cache_miss_counter
), "record_cache_miss_counter should be true to access counter values"
logging.info(
f"\n"
f"Miss counter value [0] - # of miss occured iters : {self.cache_miss_counter[0]}, \n"
f"Miss counter value [1] - # of unique misses : {self.cache_miss_counter[1]}, \n"
f"Miss counter value [2] - # of unique requested indices : {self.cache_miss_counter[2]}, \n"
f"Miss counter value [3] - # of total requested indices : {self.cache_miss_counter[3]}, "
)
logging.info(
f"unique_miss_rate using counter : {self.cache_miss_counter[1]/self.cache_miss_counter[2]}, \n"
)
logging.info(
f"total_miss_rate using counter : {self.cache_miss_counter[1]/self.cache_miss_counter[3]}, \n"
)
def get_uvm_cache_stats(self) -> Tensor:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
return self.uvm_cache_stats
def print_uvm_cache_stats(self) -> None:
assert (
self.gather_uvm_cache_stats
), "gather_uvm_cache_stats should be set to true to access uvm cache stats."
uvm_cache_stats = self.uvm_cache_stats.tolist()
logging.info(
f"N_called: {uvm_cache_stats[0]}\n"
f"N_requested_indices: {uvm_cache_stats[1]}\n"
f"N_unique_indices: {uvm_cache_stats[2]}\n"
f"N_unique_misses: {uvm_cache_stats[3]}\n"
f"N_conflict_unique_misses: {uvm_cache_stats[4]}\n"
f"N_conflict_misses: {uvm_cache_stats[5]}\n"
)
if uvm_cache_stats[1]:
logging.info(
f"unique indices / requested indices: {uvm_cache_stats[2]/uvm_cache_stats[1]}\n"
f"unique misses / requested indices: {uvm_cache_stats[3]/uvm_cache_stats[1]}\n"
)
@torch.jit.export
def prefetch(self, indices: Tensor, offsets: Tensor) -> None:
self.timestep_counter.increment()
self.timestep_prefetch_size.increment()
if not self.lxu_cache_weights.numel():
return
# FIXME: check the int32_t range failure in https://fburl.com/gdoc/kcdnrnvg .
# The real failure should be in cache handling in https://fburl.com/ox3f26r0 .
indices, offsets = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.cache_hash_size_cumsum,
indices,
offsets,
)
if (
self.record_cache_metrics.record_cache_miss_counter
or self.record_cache_metrics.record_tablewise_cache_miss
):
lxu_cache_locations = (
torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
)
if self.cache_assoc in [32, 64]
else torch.ops.fbgemm.direct_mapped_lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
)
)
if self.record_cache_metrics.record_cache_miss_counter:
self._update_cache_miss_counter(
lxu_cache_locations, linear_cache_indices
)
if self.record_cache_metrics.record_tablewise_cache_miss:
self._update_tablewise_cache_miss(
lxu_cache_locations, linear_cache_indices, offsets
)
if self.cache_assoc in [32, 64]:
# 64 for AMD
self.prefetch_32way(linear_cache_indices)
elif self.cache_assoc == 1:
self.prefetch_1way(linear_cache_indices)
else:
raise ValueError(f"{self.cache_assoc} not in [1, 32, 64]")
def prefetch_32way(self, linear_cache_indices: Tensor) -> None:
if self.cache_algorithm == CacheAlgorithm.LRU:
torch.ops.fbgemm.lru_cache_populate_byte(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.weights_tys,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.timestep_counter.get(),
self.lxu_state,
16, # row_alignment; using default value.
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
elif self.cache_algorithm == CacheAlgorithm.LFU:
torch.ops.fbgemm.lfu_cache_populate_byte(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.weights_tys,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.lxu_state,
)
assert (
self.lxu_cache_locations_list.size() < self.max_prefetch_depth
), f"self.lxu_cache_locations_list has grown to size: {self.lxu_cache_locations_list.size()}, this exceeds the maximum: {self.max_prefetch_depth}. This probably indicates an error in logic where prefetch() is being called more frequently than forward()"
self.lxu_cache_locations_list.push(
torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
)
if self.gather_uvm_cache_stats:
self._accumulate_uvm_cache_stats()
def prefetch_1way(self, linear_cache_indices: Tensor) -> None:
if self.cache_algorithm == CacheAlgorithm.LRU:
torch.ops.fbgemm.direct_mapped_lru_cache_populate_byte(
self.weights_uvm,
self.cache_hash_size_cumsum,
self.total_cache_hash_size,
self.cache_index_table_map,
self.weights_offsets,
self.weights_tys,
self.D_offsets,
linear_cache_indices,
self.lxu_cache_state,
self.lxu_cache_weights,
self.timestep_counter.get(),
self.lxu_state,
self.lxu_cache_miss_timestamp,
16, # row_alignment; using default value.
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
else:
raise ValueError("Direct Mapped for LRU only")
assert (
self.lxu_cache_locations_list.size() < self.max_prefetch_depth
), f"self.lxu_cache_locations_list has grown to size: {self.lxu_cache_locations_list.size()}, this exceeds the maximum: {self.max_prefetch_depth}. This probably indicates an error in logic where prefetch() is being called more frequently than forward()"
self.lxu_cache_locations_list.push(
torch.ops.fbgemm.direct_mapped_lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.total_cache_hash_size,
self.gather_uvm_cache_stats,
self.local_uvm_cache_stats,
)
)
if self.gather_uvm_cache_stats:
self._accumulate_uvm_cache_stats()
def _accumulate_uvm_cache_stats(self) -> None:
# Accumulate local_uvm_cache_stats (int32) into uvm_cache_stats (int64).
# We may wanna do this accumulation atomically, but as it's only for monitoring,
# slightly inaccurate result may be acceptable.
self.uvm_cache_stats = torch.add(
self.uvm_cache_stats, self.local_uvm_cache_stats
)
self.local_uvm_cache_stats.zero_()
def _update_cache_miss_counter(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
) -> None:
CACHE_MISS = torch.tensor([-1], device=self.current_device, dtype=torch.int32)
CACHE_HIT = torch.tensor([-2], device=self.current_device, dtype=torch.int32)
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
unique_ids_list = torch.unique(cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.cache_miss_counter[0] += (miss_count > 0).to(torch.int64)
self.cache_miss_counter[1] += miss_count
# Number of unique requests
assert (
len(linear_cache_indices.size()) == 1
), f"linear_cache_indices should be 1-D was {len(linear_cache_indices.size())}-D"
assert (
self.cache_miss_counter.size()[0] == 4
), f"self.cache_miss_counter should be 4-D was {self.cache_miss_counter.size()[0]}-D"
self.cache_miss_counter[2] += torch.unique(linear_cache_indices).size()[0]
# Number of total requests
self.cache_miss_counter[3] += linear_cache_indices.size()[0]
def _update_tablewise_cache_miss(
self,
lxu_cache_locations: Tensor,
linear_cache_indices: Tensor,
offsets: Tensor,
) -> None:
CACHE_MISS = torch.tensor([-1], device=self.current_device, dtype=torch.int32)
CACHE_HIT = torch.tensor([-2], device=self.current_device, dtype=torch.int32)
num_tables = len(self.cache_hash_size_cumsum) - 1
num_offsets_per_table = (len(offsets) - 1) // num_tables
cache_missed_locations = torch.where(
lxu_cache_locations == CACHE_MISS, linear_cache_indices, CACHE_HIT
)
for i in range(num_tables):
start = offsets[i * num_offsets_per_table]
end = offsets[(i + 1) * num_offsets_per_table]
current_cache_missed_locations = cache_missed_locations[start:end]
unique_ids_list = torch.unique(current_cache_missed_locations)
unique_ids_count_list = torch.where(unique_ids_list == CACHE_HIT, 0, 1)
miss_count = torch.sum(unique_ids_count_list)
self.table_wise_cache_miss[i] += miss_count
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
) -> Tensor:
assert (
self.weight_initialized
), "weight needs to be initialized before forward function"
# First bound check: check if the indices/offsets are within the boundary
# of the original embedding rows before pruning.
# Note that this is only applied when we enable pruning (if the perf becomes
# an issue, we can fuse it inside the remapping kernel).
if (
self.index_remapping_hash_table_cpu is not None
or self.index_remapping_hash_table.numel() > 0
or self.index_remappings_array.numel() > 0
):
if self.bounds_check_mode_int != BoundsCheckMode.NONE.value:
torch.ops.fbgemm.bounds_check_indices(
self.original_rows_per_table,
indices,
offsets,
self.bounds_check_mode_int,
self.bounds_check_warning,
per_sample_weights,
)
# Index remapping changes input indices, and some of them becomes -1 (prunned rows).
# Hence, remapping should be done before prefetch and emb lookup
# so that these operations are with the remapped indices.
if self.index_remapping_hash_table_cpu is not None:
indices = self.index_remapping_hash_table_cpu.lookup(indices, offsets)
elif self.index_remapping_hash_table.numel() > 0:
# Convert from raw indices to pruned indices
indices = torch.ops.fbgemm.pruned_hashmap_lookup(
indices,
offsets,
self.index_remapping_hash_table,
self.index_remapping_hash_table_offsets,
)
elif self.index_remappings_array.numel() > 0:
indices = torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
self.index_remappings_array,
self.index_remappings_array_offsets,
)
if self.timestep_prefetch_size.get() <= 0:
self.prefetch(indices, offsets)
self.timestep_prefetch_size.decrement()
lxu_cache_locations = self.lxu_cache_locations_list.pop()
# Second bound check: check if the indices/offsets are within the boundary
# of the pruned embedding rows after pruning.
# Note: we cast to int as a TorchScript workaround.
if self.bounds_check_mode_int != BoundsCheckMode.NONE.value:
torch.ops.fbgemm.bounds_check_indices(
self.rows_per_table,
indices,
offsets,
self.bounds_check_mode_int,
self.bounds_check_warning,
per_sample_weights,
)
# Note: CPU and CUDA ops use the same interface to facilitate JIT IR
# generation for CUDA/CPU. For CPU op, we don't need weights_uvm and
# weights_placements
return torch.ops.fbgemm.int_nbit_split_embedding_codegen_lookup_function(
dev_weights=self.weights_host if self.host_size > 0 else self.weights_dev,
uvm_weights=self.weights_uvm,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
weights_tys=self.weights_tys,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_int2_D=self.max_int2_D,
max_int4_D=self.max_int4_D,
max_int8_D=self.max_int8_D,
max_float16_D=self.max_float16_D,
max_float32_D=self.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=int(self.pooling_mode),
indice_weights=per_sample_weights,
output_dtype=self.output_dtype,
lxu_cache_weights=self.lxu_cache_weights,
lxu_cache_locations=lxu_cache_locations,
row_alignment=self.row_alignment,
max_float8_D=self.max_float8_D,
fp8_exponent_bits=self.fp8_exponent_bits,
fp8_exponent_bias=self.fp8_exponent_bias,
)
def initialize_logical_weights_placements_and_offsets(
self,
) -> None:
assert len(self.weights_physical_offsets) == len(self.embedding_specs)
assert len(self.weights_physical_offsets) == len(
self.weights_physical_placements
)
offsets = [self.weights_physical_offsets[t] for t in self.feature_table_map]
placements = [
self.weights_physical_placements[t] for t in self.feature_table_map
]
self.weights_offsets = torch.tensor(
offsets, device=self.current_device, dtype=torch.int64
)
self.weights_placements = torch.tensor(
placements, device=self.current_device, dtype=torch.int32
)
def initialize_physical_weights_placements_and_offsets(
self,
cacheline_alignment: bool = True,
) -> None:
# Initialize physical weights placements and offsets
# and host/dev/uvm sizes
weight_split: SplitState = nbit_construct_split_state(
self.embedding_specs,
cacheable=True,
row_alignment=self.row_alignment,
scale_bias_size_in_bytes=self.scale_bias_size_in_bytes,
cacheline_alignment=cacheline_alignment,
)
self.weights_physical_placements = [t.value for t in weight_split.placements]
self.weights_physical_offsets = weight_split.offsets
self.host_size = weight_split.host_size
self.dev_size = weight_split.dev_size
self.uvm_size = weight_split.uvm_size
@torch.jit.export
def reset_weights_placements_and_offsets(
self, device: torch.device, location: int
) -> None:
# Reset device/location denoted in embedding specs
self.reset_embedding_spec_location(device, location)
# Initialize all physical/logical weights placements and offsets without initializing large dev weights tensor
self.initialize_physical_weights_placements_and_offsets()
self.initialize_logical_weights_placements_and_offsets()
def reset_embedding_spec_location(
self, device: torch.device, location: int
) -> None:
# Overwrite location in embedding_specs with new location
# Use map since can't script enum call (ie. EmbeddingLocation(value))
INT_TO_EMBEDDING_LOCATION = {
0: EmbeddingLocation.DEVICE,
1: EmbeddingLocation.MANAGED,
2: EmbeddingLocation.MANAGED_CACHING,
3: EmbeddingLocation.HOST,
}
target_location = INT_TO_EMBEDDING_LOCATION[location]
self.current_device = device
self.row_alignment = 1 if target_location == EmbeddingLocation.HOST else 16
self.embedding_specs = [
(spec[0], spec[1], spec[2], spec[3], target_location)
for spec in self.embedding_specs
]
def _apply_split(
self,
dev_size: int,
host_size: int,
uvm_size: int,
placements: List[int],
offsets: List[int],
enforce_hbm: bool,
) -> None:
assert not self.weight_initialized, "Weights have already been initialized."
self.weight_initialized = True
self.weights_physical_placements = placements
self.weights_physical_offsets = offsets
self.host_size = host_size
self.dev_size = dev_size
self.uvm_size = uvm_size
self.initialize_logical_weights_placements_and_offsets()
if dev_size > 0:
self.weights_dev = torch.zeros(
dev_size,
device=self.current_device,
dtype=torch.uint8,
)
if host_size > 0:
self.weights_host = torch.zeros(
host_size, device=self.current_device, dtype=torch.uint8
)
if uvm_size > 0:
assert not self.use_cpu
if enforce_hbm:
if not torch.jit.is_scripting():
logging.info("Enforce hbm for the cache location")
self.weights_uvm = torch.zeros(
uvm_size,
device=self.current_device,
dtype=torch.uint8,
)
else:
self.weights_uvm = torch.zeros(
uvm_size,
out=torch.ops.fbgemm.new_unified_tensor(
torch.zeros(1, device=self.D_offsets.device, dtype=torch.uint8),
[uvm_size],
self.uvm_host_mapped,
),
)
def _apply_cache_state(
self,
cache_state: CacheState,
cache_algorithm: CacheAlgorithm,
cache_load_factor: float,
cache_sets: int,
cache_reserved_memory: float,
) -> None:
assert self.cache_assoc in [
1,
32,
64,
], "Only 1-way or 32-way(64-way for AMD) implmeneted for now"
self.cache_algorithm = cache_algorithm
self.timestep_counter = torch.classes.fbgemm.AtomicCounter()
self.timestep_prefetch_size = torch.classes.fbgemm.AtomicCounter()
self.max_prefetch_depth = MAX_PREFETCH_DEPTH
if self.current_device.type == "meta":
# To reslove "Cannot copy out of meta tensor; no data!" error
lxu_cache_locations_empty = torch.empty(0, dtype=torch.int32).fill_(-1)
else:
lxu_cache_locations_empty = torch.empty(
0, device=self.current_device, dtype=torch.int32
).fill_(-1)
self.lxu_cache_locations_list = torch.classes.fbgemm.TensorQueue(
lxu_cache_locations_empty
)
# NOTE: no cache for CPU mode!
if cache_state.total_cache_hash_size == 0 or self.use_cpu:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(0, 0, device=self.current_device, dtype=torch.uint8),
)
# NOTE: make TorchScript work!
self.register_buffer(
"cache_hash_size_cumsum",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"total_cache_hash_size",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_index_table_map",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_state",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"lxu_cache_miss_timestamp",
torch.zeros(1, dtype=torch.int64, device=self.current_device),
persistent=False,
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0, 0, 0], dtype=torch.int64),
persistent=False,
)
self.register_buffer(
"uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int64,
),
persistent=False,
)
self.register_buffer(
"local_uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int32,
),
persistent=False,
)
return
assert cache_load_factor > 0
if cache_sets <= 0:
total_memory = torch.cuda.get_device_properties(
self.current_device
).total_memory
free_memory = (
total_memory
- torch.cuda.memory_reserved(self.current_device)
- int(cache_reserved_memory)
)
assert free_memory > 0
cache_sets = (
int(cache_state.total_cache_hash_size * cache_load_factor)
+ self.cache_assoc
- 1
) // self.cache_assoc
# Note that element_size has been included in max_D_cache (in Bytes)
cache_size = cache_sets * self.cache_assoc * self.max_D_cache
if cache_size > free_memory:
cache_sets = (
int(1.0 * free_memory / self.max_D_cache) + self.cache_assoc - 1
) // self.cache_assoc
cache_sets = 1 if cache_sets == 0 else cache_sets
cache_load_factor = (
1.0 * cache_sets * self.cache_assoc / int(cache_state.total_cache_hash_size)
)
assert cache_sets > 0
if cache_algorithm == CacheAlgorithm.LFU:
assert cache_sets < 2**24 - 1
cache_size = cache_sets * self.cache_assoc * self.max_D_cache
logging.info(
f"Using on-device cache with admission algorithm "
f"{cache_algorithm}, {cache_sets} sets, "
f"cache_load_factor: {cache_load_factor : .3f}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.total_cache_hash_size = cache_state.total_cache_hash_size
self.register_buffer(
"cache_hash_size_cumsum",
torch.tensor(
cache_state.cache_hash_size_cumsum,
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"cache_index_table_map",
torch.tensor(
cache_state.cache_index_table_map,
device=self.current_device,
dtype=torch.int32,
),
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(
cache_sets,
self.cache_assoc,
device=self.current_device,
dtype=torch.int64,
).fill_(-1),
)
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * self.cache_assoc,
self.max_D_cache,
device=self.current_device,
dtype=torch.uint8,
),
)
self.register_buffer(
"lxu_state",
torch.zeros(
size=(self.total_cache_hash_size + 1,)
if cache_algorithm == CacheAlgorithm.LFU
else (cache_sets, self.cache_assoc),
device=self.current_device,
dtype=torch.int64,
),
)
if self.cache_assoc == 1:
self.register_buffer(
"lxu_cache_miss_timestamp",
torch.zeros(
cache_sets,
self.cache_assoc,
device=self.current_device,
dtype=torch.int64,
),
)
else:
# make TorchScript work
self.register_buffer(
"lxu_cache_miss_timestamp",
torch.zeros(1, device=self.current_device, dtype=torch.int64),
persistent=False,
)
self.register_buffer(
"cache_miss_counter",
torch.tensor([0, 0, 0, 0], device=self.current_device, dtype=torch.int64),
)
self.register_buffer(
"uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int64,
),
persistent=False,
)
self.register_buffer(
"local_uvm_cache_stats",
torch.zeros(
size=(self.uvm_cache_stats_size,),
device=self.current_device,
dtype=torch.int32,
),
persistent=False,
)
if cache_algorithm not in (CacheAlgorithm.LFU, CacheAlgorithm.LRU):
raise ValueError(
f"cache_algorithm must be {CacheAlgorithm.LRU} "
f"or {CacheAlgorithm.LFU}"
)
if self.gather_uvm_cache_stats:
self.reset_uvm_cache_stats()
def reset_cache_states(self) -> None:
if not self.lxu_cache_weights.numel():
return
self.lxu_cache_state.fill_(-1)
self.lxu_state.fill_(0)
self.timestep_counter.reset()
@torch.jit.export
def split_embedding_weights_with_scale_bias(
self, split_scale_bias_mode: int = 1
) -> List[Tuple[Tensor, Optional[Tensor], Optional[Tensor]]]:
"""
Returns a list of weights, split by table
split_scale_bias_mode:
0: return one row;
1: return weights + scale_bias;
2: return weights, scale, bias.
"""
assert self.weight_initialized
splits: List[Tuple[Tensor, Optional[Tensor], Optional[Tensor]]] = []
for t, (_, rows, dim, weight_ty, _) in enumerate(self.embedding_specs):
placement = self.weights_physical_placements[t]
if placement == EmbeddingLocation.DEVICE.value:
weights = self.weights_dev
elif placement == EmbeddingLocation.HOST.value:
weights = self.weights_host
else:
weights = self.weights_uvm
offset = self.weights_physical_offsets[t]
weights_shifts = weights.detach()[
offset : offset
+ rows
* rounded_row_size_in_bytes(
dim, weight_ty, self.row_alignment, self.scale_bias_size_in_bytes
)
].view(
rows,
rounded_row_size_in_bytes(
dim, weight_ty, self.row_alignment, self.scale_bias_size_in_bytes
),
)
if split_scale_bias_mode == 1 or split_scale_bias_mode == 2:
# remove the padding at the end of each row.
weights_shifts = weights_shifts[
:,
: unpadded_row_size_in_bytes(
dim, weight_ty, self.scale_bias_size_in_bytes
),
]
if (
weight_ty == SparseType.INT8
or weight_ty == SparseType.INT4
or weight_ty == SparseType.INT2
):
if split_scale_bias_mode == 1:
splits.append(
(
weights_shifts[:, self.scale_bias_size_in_bytes :],
weights_shifts[:, : self.scale_bias_size_in_bytes],
None,
)
)
else: # 2
# weights_shifts: [0:2] is scale; [2:4] is bias; [4:] is real weights
splits.append(
(
weights_shifts[:, self.scale_bias_size_in_bytes :],
weights_shifts[
:, : self.scale_bias_size_in_bytes // 2
].view(torch.float16),
weights_shifts[
:,
self.scale_bias_size_in_bytes
// 2 : self.scale_bias_size_in_bytes,
].view(torch.float16),
)
)
elif (
weight_ty == SparseType.FP8
or weight_ty == SparseType.FP16
or weight_ty == SparseType.FP32
):
splits.append(
(
weights_shifts,
None,
None,
)
)
else:
raise ValueError("weight_ty is not supported")
else: # split_scale_bias_mode == 0:
splits.append((weights_shifts, None, None))
return splits
@torch.jit.export
def split_embedding_weights(
self,
split_scale_shifts: bool = True
# When true, return list of two tensors, the first with weights and
# the second with scale_bias.
# This should've been named as split_scale_bias.
# Keep as is for backward compatibility.
) -> List[Tuple[Tensor, Optional[Tensor]]]:
"""
Returns a list of weights, split by table
"""
splits: List[
Tuple[Tensor, Optional[Tensor], Optional[Tensor]]
] = self.split_embedding_weights_with_scale_bias(
split_scale_bias_mode=(1 if split_scale_shifts else 0)
)
return [
(split_weight_scale_bias[0], split_weight_scale_bias[1])
for split_weight_scale_bias in splits
]
@torch.jit.export
def initialize_weights(self) -> None:
if not self.weight_initialized:
self._apply_split(
self.dev_size,
self.host_size,
self.uvm_size,
self.weights_physical_placements,
self.weights_physical_offsets,
self.enforce_hbm,
)
self.weight_initialized = True
def fill_random_weights(self) -> None:
"""
Fill the buffer with random weights, table by table
FIXME: make it in-place fill.
"""
self.initialize_weights()
weights = self.split_embedding_weights()
for dest_weight in weights:
dest_weight[0].copy_(
random_quant_scaled_tensor(
shape=dest_weight[0].shape, device=self.current_device
)
)
def assign_embedding_weights(
self, q_weight_list: List[Tuple[Tensor, Optional[Tensor]]]
) -> None:
"""
Assigns self.split_embedding_weights() with values from the input list of weights and scale_shifts.
"""
weights = self.split_embedding_weights()
assert len(q_weight_list) == len(weights)
for dest_weight, input_weight in zip(weights, q_weight_list):
dest_weight[0].copy_(input_weight[0])
if input_weight[1] is not None:
assert dest_weight[1] is not None
dest_weight[1].copy_(input_weight[1])
else:
assert dest_weight[1] is None
@torch.jit.export
def set_index_remappings_array(
self,
index_remapping: List[Tensor],
) -> None:
rows: List[int] = [e[1] for e in self.embedding_specs]
index_remappings_array_offsets = [0]
original_feature_rows = torch.jit.annotate(List[int], [])
last_offset = 0
for t, mapping in enumerate(index_remapping):
if mapping is not None:
current_original_row = mapping.numel()
last_offset += current_original_row
original_feature_rows.append(current_original_row)
else:
original_feature_rows.append(rows[t])
index_remappings_array_offsets.append(last_offset)
self.index_remappings_array_offsets = torch.tensor(
index_remappings_array_offsets,
device=self.current_device,
dtype=torch.int64,
)
if len(original_feature_rows) == 0:
original_feature_rows = rows
self.original_rows_per_table = torch.tensor(
[original_feature_rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
)
if self.index_remappings_array_offsets[-1] == 0:
self.index_remappings_array = torch.empty(
0, dtype=torch.int32, device=self.current_device
)
else:
index_remappings_filter_nones = []
for mapping in index_remapping:
if mapping is not None:
index_remappings_filter_nones.append(mapping)
self.index_remappings_array = torch.cat(index_remappings_filter_nones).to(
self.current_device
)
def set_index_remappings(
self,
index_remapping: List[Tensor],
pruning_hash_load_factor: float = 0.5,
use_array_for_index_remapping: bool = True,
) -> None:
rows: List[int] = [e[1] for e in self.embedding_specs]
T = len(self.embedding_specs)
# Hash mapping pruning
if not use_array_for_index_remapping:
capacities = [
round_up(int(row * 1.0 / pruning_hash_load_factor), 32)
if index_remap is not None
else 0
for (index_remap, row) in zip(index_remapping, rows)
]
hash_table = torch.empty(
(sum(capacities), 2),
dtype=torch.int32,
)
hash_table[:, :] = -1
hash_table_offsets = torch.tensor([0] + list(accumulate(capacities))).long()
merged_index_remappings = [
mapping if mapping is not None else Tensor(list(range(row)))
for (mapping, row) in zip(index_remapping, rows)
]
original_feature_rows = [
mapping.numel() for mapping in merged_index_remappings
]
if len(original_feature_rows) == 0:
original_feature_rows = rows
self.original_rows_per_table = torch.tensor(
[original_feature_rows[t] for t in self.feature_table_map],
device=self.current_device,
dtype=torch.int64,
)
dense_indices = torch.cat(merged_index_remappings, dim=0).int()
indices = torch.cat(
[torch.arange(row) for row in original_feature_rows], dim=0
).int()
offsets = torch.tensor([0] + list(accumulate(original_feature_rows))).int()
if self.use_cpu:
self.index_remapping_hash_table_cpu = (
torch.classes.fbgemm.PrunedMapCPU()
)
self.index_remapping_hash_table_cpu.insert(
indices, dense_indices, offsets, T
)
else:
# pruned_hashmap_insert only has cpu implementation: Move dense_indices to CPU
torch.ops.fbgemm.pruned_hashmap_insert(
indices,
dense_indices.cpu(),
offsets,
hash_table,
hash_table_offsets,
)
self.index_remapping_hash_table = hash_table.to(self.current_device)
self.index_remapping_hash_table_offsets = hash_table_offsets.to(
self.current_device
)
self.index_remapping_hash_table_cpu = None
# Array mapping pruning
else:
self.set_index_remappings_array(index_remapping)
def _embedding_inplace_update_per_table(
self,
update_table_idx: int,
update_row_indices: List[int],
update_weights: Tensor,
) -> None:
row_size = len(update_row_indices)
if row_size == 0:
return
# pyre-fixme[9]: update_row_indices has type `List[int]`; used as `Tensor`.
update_row_indices = torch.tensor(
update_row_indices,
device=self.current_device,
dtype=torch.int64,
)
table_values = self.split_embedding_weights(split_scale_shifts=False)[
update_table_idx
]
table_values[0].scatter_(
dim=0,
# pyre-fixme[16]: `List` has no attribute `view`.
index=update_row_indices.view(row_size, 1).expand_as(update_weights),
src=update_weights,
)
@torch.jit.export
def embedding_inplace_update(
self,
update_table_indices: List[int],
update_row_indices: List[List[int]],
update_weights: List[Tensor],
) -> None:
for i in range(len(update_table_indices)):
self._embedding_inplace_update_per_table(
update_table_indices[i],
update_row_indices[i],
update_weights[i],
)
def embedding_inplace_update_internal(
self,
update_table_indices: List[int],
update_row_indices: List[int],
update_weights: Tensor,
) -> None:
assert len(update_table_indices) == len(update_row_indices)
update_offsets = []
update_offset = 0
for table_idx in update_table_indices:
D_bytes = rounded_row_size_in_bytes(
self.embedding_specs[table_idx][2],
self.embedding_specs[table_idx][3],
self.row_alignment,
self.scale_bias_size_in_bytes,
)
update_offsets.append(update_offset)
update_offset += D_bytes
update_offsets.append(update_offset)
# pyre-fixme[9]: update_table_indices has type `List[int]`; used as `Tensor`.
update_table_indices = torch.tensor(
update_table_indices,
device=self.current_device,
dtype=torch.int32,
)
# pyre-fixme[9]: update_row_indices has type `List[int]`; used as `Tensor`.
update_row_indices = torch.tensor(
update_row_indices,
device=self.current_device,
dtype=torch.int64,
)
update_offsets = torch.tensor(
update_offsets,
device=self.current_device,
dtype=torch.int64,
)
# Only support array based pruning for now.
assert self.index_remapping_hash_table_cpu is None
assert self.index_remapping_hash_table.numel() == 0
assert self.index_remappings_array.numel() >= 0
if self.index_remappings_array.numel() > 0:
update_row_indices = torch.ops.fbgemm.pruned_array_lookup_from_row_idx(
update_row_indices,
update_table_indices,
self.index_remappings_array,
self.index_remappings_array_offsets,
)
lxu_cache_locations = None
if self.lxu_cache_weights.numel() > 0:
linear_cache_indices = (
torch.ops.fbgemm.linearize_cache_indices_from_row_idx(
self.cache_hash_size_cumsum,
update_table_indices,
update_row_indices,
)
)
if self.cache_assoc in [32, 64]:
# 64 for AMD
self.prefetch_32way(linear_cache_indices)
elif self.cache_assoc == 1:
self.prefetch_1way(linear_cache_indices)
else:
raise ValueError(f"{self.cache_assoc} not in [1, 32, 64]")
lxu_cache_locations = self.lxu_cache_locations_list.pop()
torch.ops.fbgemm.emb_inplace_update(
dev_weights=self.weights_host if self.host_size > 0 else self.weights_dev,
uvm_weights=self.weights_uvm,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
weights_tys=self.weights_tys,
D_offsets=self.D_offsets,
update_weights=update_weights,
update_table_indices=update_table_indices,
update_row_indices=update_row_indices,
update_offsets=update_offsets,
row_alignment=self.row_alignment,
lxu_cache_weights=self.lxu_cache_weights,
lxu_cache_locations=lxu_cache_locations,
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import fbgemm_gpu
import fbgemm_gpu.split_table_batched_embeddings_ops_training
import torch # usort:skip
Tensor = torch.Tensor
def add_docs(method, docstr):
method.__doc__ = docstr
add_docs(
torch.ops.fbgemm.jagged_2d_to_dense,
"""
jagged_2d_to_dense(values, x_offsets, max_sequence_length) -> Tensor
Converts a jagged tensor, with a 2D values array into a dense tensor, padding with zeros.
Args:
values (Tensor): 2D tensor containing the values of the jagged tensor.
x_offsets (Tensor): 1D tensor containing the starting point of each jagged row in the values tensor.
max_sequence_length (int): Maximum length of any row in the jagged dimension.
Returns:
Tensor: The padded dense tensor
Example:
>>> values = torch.tensor([[1,1],[2,2],[3,3],[4,4]])
>>> x_offsets = torch.tensor([0, 1, 3])
>>> torch.ops.fbgemm.jagged_2d_to_dense(values, x_offsets, 3)
tensor([[[1, 1],
[0, 0],
[0, 0]],
[[2, 2],
[3, 3],
[0, 0]]])
""",
)
# Example:
#
# >>> t = torch.arange(4)
add_docs(
torch.ops.fbgemm.jagged_1d_to_dense,
"""
jagged_1d_to_dense(values, offsets, max_sequence_length, padding_value) -> Tensor)
Converts a jagged tensor, with a 1D values array, into a dense tensor, padding with a specified padding value.
Args:
values (Tensor): 1D tensor containing the values of the jagged tensor.
offsets (Tensor): 1D tensor containing the starting point of each jagged row in the values tensor.
max_sequence_length (int): Maximum length of any row in the jagged dimension.
padding_value (int): Value to set in the empty areas of the dense output, outside of the jagged tensor coverage.
Returns:
Tensor: the padded dense tensor
Example:
>>> values = torch.tensor([1,2,3,4])
>>> offsets = torch.tensor([0, 1, 3])
>>> torch.ops.fbgemm.jagged_1d_to_dense(values, x_offsets, 3, 0)
tensor([[1, 0, 0],
[2, 3, 0]])
""",
)
add_docs(
torch.ops.fbgemm.dense_to_jagged,
"""
dense_to_jagged(dense, x_offsets, total_L) -> (Tensor, Tensor[])
Converts a dense tensor into a jagged tensor, given the desired offsets of the resulting dense tensor.
Args:
dense (Tensor): A dense input tensor to be converted
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
total_L (int, Optional): Total number of values in the resulting jagged tensor.
Returns:
(Tensor, Tensor[]): Values and offsets of the resulting jagged tensor. Offsets are identital to those that were input.
Example:
>>> dense = torch.tensor([[[1, 1], [0, 0], [0, 0]], [[2, 2], [3, 3], [0, 0]]])
>>> x_offsets = torch.tensor([0, 1, 3])
>>> torch.ops.fbgemm.dense_to_jagged(dense, [x_offsets])
(tensor([[1, 1],
[2, 2],
[3, 3]]), [tensor([0, 1, 3])])
""",
)
add_docs(
torch.ops.fbgemm.jagged_to_padded_dense,
"""
jagged_to_padded_dense(values, offsets, max_lengths, padding_value=0) -> Tensor
Converts a jagged tensor into a dense tensor, padding with a specified padding value.
Args:
values (Tensor): Jagged tensor values
offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
max_lengths (int[]): A list with max_length for each jagged dimension.
padding_value (float): Value to set in the empty areas of the dense output, outside of the jagged tensor coverage.
Returns:
Tensor: the padded dense tensor
Example:
>>> values = torch.tensor([[1,1],[2,2],[3,3],[4,4]])
>>> offsets = torch.tensor([0, 1, 3])
>>> torch.ops.fbgemm.jagged_to_padded_dense(values, [offsets], [3], 7)
tensor([[[1, 1],
[7, 7],
[7, 7]],
[[2, 2],
[3, 3],
[7, 7]]])
""",
)
add_docs(
torch.ops.fbgemm.jagged_dense_elementwise_add,
"""
jagged_dense_elementwise_add(x_values, x_offsets, y) -> Tensor
Adds a jagged tensor to a dense tensor, resulting in dense tensor. Jagged
tensor input will be padded with zeros for the purposes of the addition.
Args:
x_values (Tensor): Jagged tensor values
offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
y (Tensor): A dense tensor
Returns:
Tensor: The sum of jagged input tensor + y
""",
)
add_docs(
torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output,
"""
jagged_dense_elementwise_add_jagged_output(x_values, x_offsets, y) -> (Tensor, Tensor[])
Adds a jagged tensor to a dense tensor and, resulting in a jagged tensor with the same structure as the input jagged tensor.
Args:
x_values (Tensor): Jagged tensor values
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
y (Tensor): A dense tensor
Returns:
(Tensor, Tensor[]): Values and offsets of the resulting jagged tensor. Offsets are identital to those that were input.
""",
)
add_docs(
torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output,
"""
jagged_dense_dense_elementwise_add_jagged_output(x_values, x_offsets, y_0, y_1) -> (Tensor, Tensor[])
Adds a jagged tensor to the sum of two dense tensors, resulting in a jagged tensor with the same structure as the input jagged tensor.
Args:
x_values (Tensor): Jagged tensor values
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
y_0 (Tensor): A dense tensor
y_1 (Tensor): A dense tensor
Returns:
(Tensor, Tensor[]): Values and offsets of the resulting jagged tensor. Offsets are identital to those that were input.
""",
)
add_docs(
torch.ops.fbgemm.jagged_dense_elementwise_mul,
"""
jagged_dense_elementwise_mul(x_values, x_offsets, y) -> (Tensor, Tensor[])
Elementwise-multiplies a jagged tensor a dense tensor and, resulting in a jagged tensor with the same structure as the input jagged tensor.
Args:
x_values (Tensor): Jagged tensor values
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
y (Tensor): A dense tensor
Returns:
(Tensor, Tensor[]): Values and offsets of the resulting jagged tensor. Offsets are identital to those that were input.
""",
)
add_docs(
fbgemm_gpu.split_table_batched_embeddings_ops_training.SplitTableBatchedEmbeddingBagsCodegen,
"""
SplitTableBatchedEmbeddingBagsCodegen(embedding_specs, feature_table_map=None, cache_algorithm=CacheAlgorithm.LRU, cache_load_factor=0.2, cache_sets=0, cache_reserved_memory=0.0, cache_precision=SparseType.FP32, weights_precision=SparseType.FP32, output_dtype=SparseType.FP32, enforce_hbm=False, optimizer=OptimType.EXACT_SGD, record_cache_metrics=None, stochastic_rounding=True, gradient_clipping=False, max_gradient=1.0, learning_rate=0.01, eps=1.0e-8, momentum=0.9, weight_decay=0.0, weight_decay_mode=WeightDecayMode.NONE, eta=0.001, beta1=0.9, beta2=0.999, pooling_mode=PoolingMode.SUM, device=None, bounds_check_mode=BoundsCheckMode.WARNING) -> None
Table batched Embedding operator. Looks up one or more embedding tables. The module is application for training. The backward operator is fused with optimizer. Thus, the embedding tables are updated during backward.
Args:
embedding_specs (List[Tuple[int, int, EmbeddingLocation, ComputeDevice]]): A list of embedding specifications. Each spec is a tuple of (number of embedding rows, embedding dimension; must be a multiple of 4, table placement, compute device).
feature_table_map (List[int], optional): An optional list that specifies feature-table mapping.
cache_algorithm (CacheAlgorithm, optional): LXU cache algorithm (`CacheAlgorithm.LRU`, `CacheAlgorithm.LFU`)
cache_load_factor (float, optional): The LXU cache capacity which is `cache_load_factor` * the total number of rows in all embedding tables
cache_sets (int, optional): The number of cache sets
cache_reserved_memory (float, optional): Amount of memory reserved in HBM for non-cache purpose.
cache_precision (SparseType, optional): Data type of LXU cache (`SparseType.FP32`, `SparseType.FP16`)
weights_precision (SparseType, optional): Data type of embedding tables (also known as weights) (`SparseType.FP32`, `SparseType.FP16`, `SparseType.INT8`)
output_dtype (SparseType, optional): Data type of an output tensor (`SparseType.FP32`, `SparseType.FP16`, `SparseType.INT8`)
enforce_hbm (bool, optional): If True, place all weights/momentums in HBM when using cache
optimizer (OptimType, optional): An optimizer to use for embedding table update in the backward pass. (`OptimType.ADAM`, `OptimType.EXACT_ADAGRAD`, `OptimType.EXACT_ROWWISE_ADAGRAD`, `OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD`, `OptimType.EXACT_SGD`, `OptimType.LAMB`, `OptimType.LARS_SGD`, `OptimType.PARTIAL_ROWWISE_ADAM`, `OptimType.PARTIAL_ROWWISE_LAMB`, `OptimType.SGD`)
record_cache_metrics (RecordCacheMetrics, optional): Record number of hits, number of requests, etc if RecordCacheMetrics.record_cache_miss_counter is True and record the similar metrics table-wise if RecordCacheMetrics.record_tablewise_cache_miss is True (default is None).
stochastic_rounding (bool, optional): If True, apply stochastic rounding for weight type that is not `SparseType.FP32`
gradient_clipping (bool, optional): If True, apply gradient clipping
max_gradient (float, optional): The value for gradient clipping
learning_rate (float, optional): The learning rate
eps (float, optional): The epsilon value used by Adagrad, LAMB, and Adam
momentum (float, optional): Momentum used by LARS-SGD
weight_decay (float, optional): Weight decay used by LARS-SGD, LAMB, ADAM, and Rowwise Adagrad
weight_decay_mode (WeightDecayMode, optional): Weight decay mode (`WeightDecayMode.NONE`, `WeightDecayMode.L2`, `WeightDecayMode.DECOUPLE`)
eta (float, optional): The eta value used by LARS-SGD
beta1 (float, optional): The beta1 value used by LAMB and ADAM
beta2 (float, optional): The beta2 value used by LAMB and ADAM
pooling_mode (PoolingMode, optional): Pooling mode (`PoolingMode.SUM`, `PoolingMode.MEAN`, `PoolingMode.NONE`)
device (torch.device, optional): The current device to place tensors on
bounds_check_mode (BoundsCheckMode, optional): If not set to `BoundsCheckMode.NONE`, apply boundary check for indices (`BoundsCheckMode.NONE`, `BoundsCheckMode.FATAL`, `BoundsCheckMode.WARNING`, `BoundsCheckMode.IGNORE`)
Inputs:
indices (torch.Tensor): A 1D-tensor that contains indices to be accessed in all embedding table
offsets (torch.Tensor): A 1D-tensor that conatins offsets of indices. Shape `(B * T + 1)` where `B` = batch size and `T` = number of tables. `offsets[t * B + b + 1] - offsets[t * B + b]` is the length of bag `b` of table `t`
per_sample_weights (torch.Tensor, optional): An optional 1D-tensor that contains positional weights. Shape `(max(bag length))`. Positional weight `i` is multiplied to all columns of row `i` in each bag after its read from the embedding table and before pooling (if pooling mode is not PoolingMode.NONE).
feature_requires_grad (torch.Tensor, optional): An optional tensor for checking if `per_sample_weights` requires gradient
Returns:
A 2D-tensor containing looked up data. Shape `(B, total_D)` where `B` = batch size and `total_D` = the sum of all embedding dimensions in the table
Example:
>>> import torch
>>>
>>> from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
>>> EmbeddingLocation,
>>> )
>>> from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
>>> SplitTableBatchedEmbeddingBagsCodegen,
>>> ComputeDevice,
>>> )
>>>
>>> # Two tables
>>> embedding_specs = [
>>> (3, 8, EmbeddingLocation.DEVICE, ComputeDevice.CUDA),
>>> (5, 4, EmbeddingLocation.MANAGED, ComputeDevice.CUDA)
>>> ]
>>>
>>> tbe = SplitTableBatchedEmbeddingBagsCodegen(embedding_specs)
>>> tbe.init_embedding_weights_uniform(-1, 1)
>>>
>>> print(tbe.split_embedding_weights())
[tensor([[-0.9426, 0.7046, 0.4214, -0.0419, 0.1331, -0.7856, -0.8124, -0.2021],
[-0.5771, 0.5911, -0.7792, -0.1068, -0.6203, 0.4813, -0.1677, 0.4790],
[-0.5587, -0.0941, 0.5754, 0.3475, -0.8952, -0.1964, 0.0810, -0.4174]],
device='cuda:0'), tensor([[-0.2513, -0.4039, -0.3775, 0.3273],
[-0.5399, -0.0229, -0.1455, -0.8770],
[-0.9520, 0.4593, -0.7169, 0.6307],
[-0.1765, 0.8757, 0.8614, 0.2051],
[-0.0603, -0.9980, -0.7958, -0.5826]], device='cuda:0')]
>>> # Batch size = 3
>>> indices = torch.tensor([0, 1, 2, 0, 1, 2, 0, 3, 1, 4, 2, 0, 0],
>>> device="cuda",
>>> dtype=torch.long)
>>> offsets = torch.tensor([0, 2, 5, 7, 9, 12, 13],
>>> device="cuda",
>>> dtype=torch.long)
>>>
>>> output = tbe(indices, offsets)
>>>
>>> # Batch size = 3, total embedding dimension = 12
>>> print(output.shape)
torch.Size([3, 12])
>>> print(output)
tensor([[-1.5197, 1.2957, -0.3578, -0.1487, -0.4873, -0.3044, -0.9801, 0.2769,
-0.7164, 0.8528, 0.7159, -0.6719],
[-2.0784, 1.2016, 0.2176, 0.1988, -1.3825, -0.5008, -0.8991, -0.1405,
-1.2637, -0.9427, -1.8902, 0.3754],
[-1.5013, 0.6105, 0.9968, 0.3057, -0.7621, -0.9821, -0.7314, -0.6195,
-0.2513, -0.4039, -0.3775, 0.3273]], device='cuda:0',
grad_fn=<CppNode<SplitLookupFunction_sgd_Op>>)
""",
)
add_docs(
torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul,
"""
batched_dense_vec_jagged_2d_mul(Tensor v, Tensor a_values, Tensor a_offsets) -> Tensor
Batched vector matrix multiplication of a batched dense vector with a jagged tensor, dense vector is in
size (B * H, max_N) and jagged tensor is in size (B, max_N, H * D) where max_N is the maximum size of
jagged dimension. B * H is the batch size and each multiplies is max_N with [max_N, D]
Args:
v (Tensor): dense vector tensor
a_values (Tensor): Jagged tensor values
a_offsets (Tensor []): A list of jagged offset tensors, one for each jagged dimension.
Returns:
Tensor: output of batch matmul in size (B * H, D)
""",
)
#
#
# add_docs(
# torch.ops.fbgemm.stacked_jagged_1d_to_dense,
# """Args:
# {input}
# Keyword args:
# {out}""",
# )
#
#
# add_docs(
# torch.ops.fbgemm.stacked_jagged_2d_to_dense,
# """Args:
# {input}
# Keyword args:
# {out}""",
# )
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import itertools
import logging
from math import log2
from typing import List, Optional, Tuple
import torch # usort:skip
import fbgemm_gpu.split_embedding_codegen_lookup_invokers as invokers
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
CacheAlgorithm,
DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
EmbeddingLocation,
PoolingMode,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
align_to_cacheline,
rounded_row_size_in_bytes,
unpadded_row_size_in_bytes,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
CounterBasedRegularizationDefinition,
WeightDecayMode,
)
from torch import nn, Tensor # usort:skip
from torch.autograd.profiler import record_function
try:
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:ssd_split_table_batched_embeddings"
)
except OSError:
# Keep for BC: will be deprecated soon.
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu/fb:ssd_split_table_batched_embeddings"
)
ASSOC = 32
class SSDTableBatchedEmbeddingBags(nn.Module):
D_offsets: Tensor
lxu_cache_weights: Tensor
lru_state: Tensor
lxu_cache_weights: Tensor
lxu_cache_state: Tensor
momentum1_dev: Tensor
momentum1_uvm: Tensor
momentum1_host: Tensor
momentum1_placements: Tensor
momentum1_offsets: Tensor
weights_dev: Tensor
weights_uvm: Tensor
weights_host: Tensor
weights_placements: Tensor
weights_offsets: Tensor
def __init__(
self,
embedding_specs: List[Tuple[int, int]], # tuple of (rows, dims)
feature_table_map: Optional[List[int]], # [T]
cache_sets: int,
ssd_storage_directory: str,
ssd_shards: int = 1,
ssd_memtable_flush_period: int = -1,
ssd_memtable_flush_offset: int = -1,
ssd_l0_files_per_compact: int = 4,
ssd_rate_limit_mbps: int = 0,
ssd_size_ratio: int = 10,
ssd_compaction_trigger: int = 8,
ssd_write_buffer_size: int = 2 * 1024 * 1024 * 1024,
ssd_max_write_buffer_num: int = 16,
ssd_cache_location: EmbeddingLocation = EmbeddingLocation.MANAGED,
ssd_uniform_init_lower: float = -0.01,
ssd_uniform_init_upper: float = 0.01,
# General Optimizer args
stochastic_rounding: bool = True,
gradient_clipping: bool = False,
max_gradient: float = 1.0,
learning_rate: float = 0.01,
eps: float = 1.0e-8, # used by Adagrad, LAMB, and Adam
momentum: float = 0.9, # used by LARS-SGD
weight_decay: float = 0.0, # used by LARS-SGD, LAMB, ADAM, and Rowwise Adagrad
weight_decay_mode: WeightDecayMode = WeightDecayMode.NONE, # used by Rowwise Adagrad
eta: float = 0.001, # used by LARS-SGD,
beta1: float = 0.9, # used by LAMB and ADAM
beta2: float = 0.999, # used by LAMB and ADAM
counter_based_regularization: Optional[
CounterBasedRegularizationDefinition
] = None, # used by Rowwise Adagrad
pooling_mode: PoolingMode = PoolingMode.SUM,
) -> None:
super(SSDTableBatchedEmbeddingBags, self).__init__()
self.pooling_mode = pooling_mode
self.embedding_specs = embedding_specs
(rows, dims) = zip(*embedding_specs)
T_ = len(self.embedding_specs)
assert T_ > 0
# pyre-fixme[8]: Attribute has type `device`; used as `int`.
self.current_device: torch.device = torch.cuda.current_device()
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
D_offsets = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(itertools.accumulate(D_offsets))
self.total_D: int = D_offsets[-1]
self.max_D: int = max(dims)
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
assert self.D_offsets.numel() == T + 1
hash_size_cumsum = [0] + list(itertools.accumulate(rows))
if hash_size_cumsum[-1] == 0:
self.total_hash_size_bits: int = 0
else:
self.total_hash_size_bits: int = int(log2(float(hash_size_cumsum[-1])) + 1)
# The last element is to easily access # of rows of each table by
self.total_hash_size_bits = int(log2(float(hash_size_cumsum[-1])) + 1)
self.total_hash_size: int = hash_size_cumsum[-1]
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in self.feature_table_map] + [
hash_size_cumsum[-1]
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
element_size = 4
cache_size = cache_sets * ASSOC * element_size * self.max_D
logging.info(
f"Using cache for SSD with admission algorithm "
f"{CacheAlgorithm.LRU}, {cache_sets} sets, stored on {'DEVICE' if ssd_cache_location is EmbeddingLocation.DEVICE else 'MANAGED'} with {ssd_shards} shards, "
f"Memtable Flush Period: {ssd_memtable_flush_period}, "
f"Memtable Flush Offset: {ssd_memtable_flush_offset}, "
f"Desired L0 files per compaction: {ssd_l0_files_per_compact}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(cache_sets, ASSOC, dtype=torch.int64).fill_(-1),
)
self.register_buffer(
"lru_state", torch.zeros(cache_sets, ASSOC, dtype=torch.int64)
)
assert ssd_cache_location in (
EmbeddingLocation.MANAGED,
EmbeddingLocation.DEVICE,
)
if ssd_cache_location == EmbeddingLocation.MANAGED:
self.register_buffer(
"lxu_cache_weights",
torch.ops.fbgemm.new_managed_tensor(
torch.zeros(1, device=self.current_device, dtype=torch.float32),
[cache_sets * ASSOC, self.max_D],
),
)
else:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * ASSOC,
self.max_D,
device=self.current_device,
dtype=torch.float32,
),
)
self.timestep = 0
import os
os.makedirs(ssd_storage_directory, exist_ok=True)
import tempfile
ssd_directory = tempfile.mkdtemp(
prefix="ssd_table_batched_embeddings", dir=ssd_storage_directory
)
# pyre-fixme[4]: Attribute must be annotated.
self.ssd_db = torch.classes.fbgemm.EmbeddingRocksDBWrapper(
ssd_directory,
ssd_shards,
ssd_shards,
ssd_memtable_flush_period,
ssd_memtable_flush_offset,
ssd_l0_files_per_compact,
self.max_D,
ssd_rate_limit_mbps,
ssd_size_ratio,
ssd_compaction_trigger,
ssd_write_buffer_size,
ssd_max_write_buffer_num,
ssd_uniform_init_lower,
ssd_uniform_init_upper,
32, # row_storage_bitwidth
)
# pyre-fixme[20]: Argument `self` expected.
(low_priority, high_priority) = torch.cuda.Stream.priority_range()
self.ssd_stream = torch.cuda.Stream(priority=low_priority)
self.ssd_set_start = torch.cuda.Event()
self.ssd_set_end = torch.cuda.Event()
self.timesteps_prefetched: List[int] = []
if weight_decay_mode == WeightDecayMode.COUNTER or counter_based_regularization:
raise AssertionError(
"weight_decay_mode = WeightDecayMode.COUNTER is not supported for SSD TBE."
)
counter_based_regularization = CounterBasedRegularizationDefinition()
self.optimizer_args = invokers.lookup_args.OptimizerArgs(
stochastic_rounding=stochastic_rounding,
gradient_clipping=gradient_clipping,
max_gradient=max_gradient,
learning_rate=learning_rate,
eps=eps,
beta1=beta1,
beta2=beta2,
weight_decay=weight_decay,
weight_decay_mode=weight_decay_mode.value,
eta=eta,
momentum=momentum,
counter_halflife=counter_based_regularization.counter_halflife,
adjustment_iter=counter_based_regularization.adjustment_iter,
adjustment_ub=counter_based_regularization.adjustment_ub,
learning_rate_mode=counter_based_regularization.learning_rate_mode.value,
grad_sum_decay=counter_based_regularization.grad_sum_decay.value,
tail_id_threshold=counter_based_regularization.tail_id_threshold.val,
is_tail_id_thresh_ratio=int(
counter_based_regularization.tail_id_threshold.is_ratio
),
total_hash_size=-1, # Unused
)
self.weights_dev = nn.Parameter(
torch.empty((0,), device=self.current_device, dtype=torch.float32)
)
self.register_buffer(
"weights_uvm",
torch.tensor((0,), device=self.current_device, dtype=torch.float32),
)
self.register_buffer(
"weights_host",
torch.empty(0),
)
self.register_buffer(
"weights_placements",
torch.tensor(
[EmbeddingLocation.MANAGED_CACHING for _ in range(T_)],
dtype=torch.int32,
),
)
weights_offsets = [0] + list(
itertools.accumulate([row * dim for (row, dim) in zip(rows, dims)])
)
self.register_buffer(
"weights_offsets",
torch.tensor(
weights_offsets[:-1],
device=self.current_device,
dtype=torch.int64,
),
)
self.register_buffer(
"momentum1_dev",
torch.zeros(
self.total_hash_size,
device=self.current_device,
dtype=torch.float32,
),
)
self.register_buffer(
"momentum1_uvm",
torch.empty((0,), device=self.current_device, dtype=torch.float32),
)
self.register_buffer(
"momentum1_host",
torch.empty(0),
)
self.register_buffer(
"momentum1_placements",
torch.tensor(
[EmbeddingLocation.DEVICE for _ in range(T_)], dtype=torch.int32
),
)
momentum1_offsets = [0] + list(itertools.accumulate(rows))
self.register_buffer(
"momentum1_offsets",
torch.tensor(
momentum1_offsets[:-1],
device=self.current_device,
dtype=torch.int64,
),
)
# add placeholder require_grad param to enable autograd without nn.parameter
# this is needed to enable int8 embedding weights for SplitTableBatchedEmbedding
self.placeholder_autograd_tensor = nn.Parameter(
torch.zeros(0, device=self.current_device, dtype=torch.float)
)
def prefetch(self, indices: Tensor, offsets: Tensor) -> Optional[Tensor]:
(indices, offsets) = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.hash_size_cumsum,
indices,
offsets,
)
self.timestep += 1
self.timesteps_prefetched.append(self.timestep)
(
inserted_indices,
evicted_indices,
assigned_cache_slots,
actions_count_gpu,
) = torch.ops.fbgemm.ssd_cache_populate_actions(
linear_cache_indices,
self.total_hash_size,
self.lxu_cache_state,
self.timestep,
1, # for now assume prefetch_dist == 1
self.lru_state,
)
def to_pinned_cpu(t: torch.Tensor) -> torch.Tensor:
t_cpu = torch.empty(t.shape, pin_memory=True, dtype=t.dtype)
t_cpu.copy_(t, non_blocking=True)
return t_cpu
actions_count_cpu = to_pinned_cpu(actions_count_gpu)
assigned_cache_slots = assigned_cache_slots.long()
evicted_rows = self.lxu_cache_weights[
assigned_cache_slots.clamp_(min=0).long(), :
]
inserted_rows = torch.empty(
evicted_rows.shape,
dtype=self.lxu_cache_weights.dtype,
pin_memory=True,
)
current_stream = torch.cuda.current_stream()
# Ensure the previous iterations l3_db.set(..) has completed.
current_stream.wait_event(self.ssd_set_end)
self.ssd_db.get_cuda(
to_pinned_cpu(inserted_indices), inserted_rows, actions_count_cpu
)
current_stream.record_event(self.ssd_set_start)
# TODO: T123943415 T123943414 this is a big copy that is (mostly) unnecessary with a decent cache hit rate.
# Should we allocate on HBM?
inserted_rows_gpu = inserted_rows.cuda(non_blocking=True)
# self.lxu_cache_weights[assigned_cache_slots, :] = inserted_rows.cuda(non_blocking=True)
torch.ops.fbgemm.masked_index_put(
self.lxu_cache_weights,
assigned_cache_slots,
inserted_rows_gpu,
actions_count_gpu,
)
with torch.cuda.stream(self.ssd_stream):
self.ssd_stream.wait_event(self.ssd_set_start)
evicted_rows_cpu = to_pinned_cpu(evicted_rows)
evicted_indices_cpu = to_pinned_cpu(evicted_indices)
# pyre-fixme[6]: For 1st param expected `Stream` but got `Stream`.
evicted_rows.record_stream(self.ssd_stream)
evicted_indices.record_stream(self.ssd_stream)
self.ssd_db.set_cuda(
evicted_indices_cpu, evicted_rows_cpu, actions_count_cpu, self.timestep
)
# TODO: is this needed?
# Need a way to synchronize
# actions_count_cpu.record_stream(self.ssd_stream)
self.ssd_stream.record_event(self.ssd_set_end)
return linear_cache_indices
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
feature_requires_grad: Optional[Tensor] = None,
) -> Tensor:
(indices, offsets) = indices.long(), offsets.long()
if len(self.timesteps_prefetched) == 0:
with record_function("## prefetch ##"):
linear_cache_indices = self.prefetch(indices, offsets)
else:
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.hash_size_cumsum,
indices,
offsets,
)
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.hash_size_cumsum[-1].item(),
)
common_args = invokers.lookup_args.CommonArgs(
placeholder_autograd_tensor=self.placeholder_autograd_tensor,
output_dtype=SparseType.FP32.as_int(),
dev_weights=self.weights_dev,
host_weights=self.weights_host,
uvm_weights=self.weights_uvm,
lxu_cache_weights=self.lxu_cache_weights,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_D=self.max_D,
hash_size_cumsum=self.hash_size_cumsum,
total_hash_size_bits=self.total_hash_size_bits,
indices=indices,
offsets=offsets,
pooling_mode=self.pooling_mode,
indice_weights=per_sample_weights,
feature_requires_grad=feature_requires_grad,
lxu_cache_locations=lxu_cache_locations,
vbe_metadata=invokers.lookup_args.VBEMetadata(
B_offsets=None,
output_offsets_feature_rank=None,
B_offsets_rank_per_feature=None,
max_B=-1,
max_B_feature_rank=-1,
output_size=-1,
),
is_experimental=False,
)
momentum1 = invokers.lookup_args.Momentum(
dev=self.momentum1_dev,
host=self.momentum1_host,
uvm=self.momentum1_uvm,
offsets=self.momentum1_offsets,
placements=self.momentum1_placements,
)
self.timesteps_prefetched.pop(0)
return invokers.lookup_rowwise_adagrad.invoke(
common_args, self.optimizer_args, momentum1
)
@torch.jit.ignore
def debug_split_optimizer_states(self) -> List[Tuple[torch.Tensor]]:
"""
Returns a list of states, split by table
Testing only
"""
(rows, _) = zip(*self.embedding_specs)
rows_cumsum = [0] + list(itertools.accumulate(rows))
return [
(
self.momentum1_dev.detach()[rows_cumsum[t] : rows_cumsum[t + 1]].view(
row
),
)
for t, row in enumerate(rows)
]
@torch.jit.export
def debug_split_embedding_weights(self) -> List[torch.Tensor]:
"""
Returns a list of weights, split by table.
Testing only, very slow.
"""
(rows, _) = zip(*self.embedding_specs)
rows_cumsum = [0] + list(itertools.accumulate(rows))
splits = []
for t, (row, dim) in enumerate(self.embedding_specs):
weights = torch.empty((row, dim), dtype=torch.float32)
self.ssd_db.get_cuda(
torch.arange(rows_cumsum[t], rows_cumsum[t + 1]).to(torch.int64),
weights,
torch.as_tensor([row]),
)
splits.append(weights)
torch.cuda.synchronize(self.current_device)
return splits
@torch.jit.export
def set_learning_rate(self, lr: float) -> None:
"""
Sets the learning rate.
"""
self._set_learning_rate(lr)
@torch.jit.ignore
def _set_learning_rate(self, lr: float) -> float:
"""
Helper function to script `set_learning_rate`.
Note that returning None does not work.
"""
self.optimizer_args = self.optimizer_args._replace(learning_rate=lr)
return 0.0
def flush(self) -> None:
active_slots_mask = self.lxu_cache_state != -1
active_weights = self.lxu_cache_weights.masked_select(
active_slots_mask.view(-1, 1)
).view(-1, self.max_D)
active_ids = self.lxu_cache_state.view(-1).masked_select(
active_slots_mask.view(-1)
)
torch.cuda.current_stream().wait_stream(self.ssd_stream)
self.ssd_db.set_cuda(
active_ids.cpu(),
active_weights.cpu(),
torch.tensor([active_ids.numel()]),
self.timestep,
)
class SSDIntNBitTableBatchedEmbeddingBags(nn.Module):
"""
SSD Table-batched version of nn.EmbeddingBag(sparse=False)
Inference version, with FP32/FP16/FP8/INT8/INT4/INT2 supports
"""
embedding_specs: List[Tuple[str, int, int, SparseType]]
def __init__(
self,
embedding_specs: List[
Tuple[str, int, int, SparseType]
], # tuple of (feature_names, rows, dims, SparseType)
feature_table_map: Optional[List[int]] = None, # [T]
pooling_mode: PoolingMode = PoolingMode.SUM,
output_dtype: SparseType = SparseType.FP16,
row_alignment: Optional[int] = None,
fp8_exponent_bits: Optional[int] = None,
fp8_exponent_bias: Optional[int] = None,
cache_assoc: int = 32,
scale_bias_size_in_bytes: int = DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
cache_sets: int = 0,
ssd_storage_directory: str = "/tmp",
ssd_shards: int = 1,
ssd_memtable_flush_period: int = -1,
ssd_memtable_flush_offset: int = -1,
ssd_l0_files_per_compact: int = 4,
ssd_rate_limit_mbps: int = 0,
ssd_size_ratio: int = 10,
ssd_compaction_trigger: int = 8,
ssd_write_buffer_size: int = 2 * 1024 * 1024 * 1024,
ssd_max_write_buffer_num: int = 16,
ssd_cache_location: EmbeddingLocation = EmbeddingLocation.MANAGED,
ssd_uniform_init_lower: float = -0.01,
ssd_uniform_init_upper: float = 0.01,
) -> None: # noqa C901 # tuple of (rows, dims,)
super(SSDIntNBitTableBatchedEmbeddingBags, self).__init__()
assert cache_assoc == 32, "Only 32-way cache is supported now"
self.scale_bias_size_in_bytes = scale_bias_size_in_bytes
self.pooling_mode = pooling_mode
self.embedding_specs = embedding_specs
T_ = len(self.embedding_specs)
assert T_ > 0
device = torch.cuda.current_device()
if device is None:
self.current_device: torch.device = torch.device(
torch.cuda.current_device()
)
elif isinstance(device, torch.device):
self.current_device = device
else:
self.current_device = torch.device(device)
self.use_cpu: bool = self.current_device.type == "cpu"
self.feature_table_map: List[int] = (
feature_table_map if feature_table_map is not None else list(range(T_))
)
T = len(self.feature_table_map)
assert T_ <= T
table_has_feature = [False] * T_
for t in self.feature_table_map:
table_has_feature[t] = True
assert all(table_has_feature), "Each table must have at least one feature!"
self.output_dtype: int = output_dtype.as_int()
# (feature_names, rows, dims, weights_tys) = zip(*embedding_specs)
# Pyre workaround
rows: List[int] = [e[1] for e in embedding_specs]
dims: List[int] = [e[2] for e in embedding_specs]
weights_tys: List[SparseType] = [e[3] for e in embedding_specs]
D_offsets = [dims[t] for t in self.feature_table_map]
D_offsets = [0] + list(itertools.accumulate(D_offsets))
self.total_D: int = D_offsets[-1]
self.register_buffer(
"D_offsets",
torch.tensor(D_offsets, device=self.current_device, dtype=torch.int32),
)
if row_alignment is None:
self.row_alignment: int = 1 if self.use_cpu else 16
else:
self.row_alignment = row_alignment
for dim, weight_ty in zip(dims, weights_tys):
if not weight_ty.is_float():
assert (
dim % (8 / weight_ty.bit_rate()) == 0
), f"For quantized types we need to at least pack at byte granularity, dim: {dim}, weight_ty: {weight_ty}"
def max_ty_D(ty: SparseType) -> int:
return max(
[dim for dim, weight_ty in zip(dims, weights_tys) if weight_ty == ty],
default=0,
)
self.max_int2_D: int = max_ty_D(SparseType.INT2)
self.max_int4_D: int = max_ty_D(SparseType.INT4)
self.max_int8_D: int = max_ty_D(SparseType.INT8)
self.max_float8_D: int = max_ty_D(SparseType.FP8)
self.max_float16_D: int = max_ty_D(SparseType.FP16)
self.max_float32_D: int = max_ty_D(SparseType.FP32)
cached_dims = [
rounded_row_size_in_bytes(
embedding_spec[2], embedding_spec[3], 16, self.scale_bias_size_in_bytes
)
for embedding_spec in self.embedding_specs
]
self.max_D_cache: int = max(cached_dims) if len(cached_dims) > 0 else 0
placements = []
offsets = []
uvm_size = 0
for _, num_embeddings, embedding_dim, weight_ty in embedding_specs:
embedding_dim = rounded_row_size_in_bytes(
embedding_dim, weight_ty, self.row_alignment, scale_bias_size_in_bytes
)
state_size = num_embeddings * embedding_dim
state_size = align_to_cacheline(state_size)
placements.append(EmbeddingLocation.MANAGED_CACHING)
offsets.append(uvm_size)
uvm_size += state_size
self.weights_physical_offsets: List[int] = offsets
weights_tys_int = [weights_tys[t].as_int() for t in self.feature_table_map]
self.register_buffer(
"weights_tys",
torch.tensor(
weights_tys_int, device=self.current_device, dtype=torch.uint8
),
)
self.weight_initialized: bool = True
assert self.D_offsets.numel() == T + 1
hash_size_cumsum = [0] + list(itertools.accumulate(rows))
if hash_size_cumsum[-1] == 0:
self.total_hash_size_bits: int = 0
else:
self.total_hash_size_bits: int = int(log2(float(hash_size_cumsum[-1])) + 1)
# The last element is to easily access # of rows of each table by
self.total_hash_size_bits = int(log2(float(hash_size_cumsum[-1])) + 1)
self.total_hash_size: int = hash_size_cumsum[-1]
# The last element is to easily access # of rows of each table by
# hash_size_cumsum[t + 1] - hash_size_cumsum[t]
hash_size_cumsum = [hash_size_cumsum[t] for t in self.feature_table_map] + [
hash_size_cumsum[-1]
]
self.register_buffer(
"hash_size_cumsum",
torch.tensor(
hash_size_cumsum, device=self.current_device, dtype=torch.int64
),
)
element_size = 1
cache_size = cache_sets * ASSOC * element_size * self.max_D_cache
logging.info(
f"Using cache for SSD with admission algorithm "
f"{CacheAlgorithm.LRU}, {cache_sets} sets, stored on {'DEVICE' if ssd_cache_location is EmbeddingLocation.DEVICE else 'MANAGED'} with {ssd_shards} shards, "
f"Memtable Flush Period: {ssd_memtable_flush_period}, "
f"Memtable Flush Offset: {ssd_memtable_flush_offset}, "
f"Desired L0 files per compaction: {ssd_l0_files_per_compact}, "
f"{cache_size / 1024.0 / 1024.0 / 1024.0 : .2f}GB"
)
self.register_buffer(
"lxu_cache_state",
torch.zeros(cache_sets, ASSOC, dtype=torch.int64).fill_(-1),
)
self.register_buffer(
"lru_state", torch.zeros(cache_sets, ASSOC, dtype=torch.int64)
)
assert ssd_cache_location in (
EmbeddingLocation.MANAGED,
EmbeddingLocation.DEVICE,
)
if ssd_cache_location == EmbeddingLocation.MANAGED:
self.register_buffer(
"lxu_cache_weights",
torch.ops.fbgemm.new_managed_tensor(
torch.zeros(1, device=self.current_device, dtype=torch.uint8),
[cache_sets * ASSOC, self.max_D_cache],
),
)
else:
self.register_buffer(
"lxu_cache_weights",
torch.zeros(
cache_sets * ASSOC,
self.max_D_cache,
device=self.current_device,
dtype=torch.uint8,
),
)
import os
os.makedirs(ssd_storage_directory, exist_ok=True)
import tempfile
ssd_directory = tempfile.mkdtemp(
prefix="ssd_table_batched_embeddings", dir=ssd_storage_directory
)
# pyre-fixme[4]: Attribute must be annotated.
self.ssd_db = torch.classes.fbgemm.EmbeddingRocksDBWrapper(
ssd_directory,
ssd_shards,
ssd_shards,
ssd_memtable_flush_period,
ssd_memtable_flush_offset,
ssd_l0_files_per_compact,
self.max_D_cache,
ssd_rate_limit_mbps,
ssd_size_ratio,
ssd_compaction_trigger,
ssd_write_buffer_size,
ssd_max_write_buffer_num,
ssd_uniform_init_lower,
ssd_uniform_init_upper,
8, # row_storage_bitwidth
)
# pyre-fixme[20]: Argument `self` expected.
(low_priority, high_priority) = torch.cuda.Stream.priority_range()
self.ssd_stream = torch.cuda.Stream(priority=low_priority)
self.ssd_set_start = torch.cuda.Event()
self.ssd_set_end = torch.cuda.Event()
# pyre-fixme[4]: Attribute must be annotated.
self.timestep_counter = torch.classes.fbgemm.AtomicCounter()
# pyre-fixme[4]: Attribute must be annotated.
self.timestep_prefetch_size = torch.classes.fbgemm.AtomicCounter()
self.weights_dev: torch.Tensor = torch.empty(
0,
device=self.current_device,
dtype=torch.uint8,
)
self.register_buffer(
"weights_uvm",
torch.tensor((0,), device=self.current_device, dtype=torch.uint8),
)
self.register_buffer(
"weights_host",
torch.empty(0),
)
self.register_buffer(
"weights_placements",
torch.tensor(
[EmbeddingLocation.MANAGED_CACHING for _ in range(T_)],
dtype=torch.int32,
),
)
weights_offsets = [0] + list(
itertools.accumulate([row * dim for (row, dim) in zip(rows, dims)])
)
self.register_buffer(
"weights_offsets",
torch.tensor(
weights_offsets[:-1],
device=self.current_device,
dtype=torch.int64,
),
)
if self.max_float8_D > 0:
default_config = SparseType.FP8.default_config()
self.fp8_exponent_bits: int = (
default_config.get("exponent_bits")
if fp8_exponent_bits is None
else fp8_exponent_bits
)
self.fp8_exponent_bias: int = (
default_config.get("exponent_bias")
if fp8_exponent_bias is None
else fp8_exponent_bias
)
else:
self.fp8_exponent_bits = -1
self.fp8_exponent_bias = -1
@torch.jit.export
def prefetch(self, indices: Tensor, offsets: Tensor) -> Tensor:
(indices, offsets) = indices.long(), offsets.long()
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.hash_size_cumsum,
indices,
offsets,
)
self.timestep_counter.increment()
self.timestep_prefetch_size.increment()
(
inserted_indices,
evicted_indices,
assigned_cache_slots,
actions_count_gpu,
) = torch.ops.fbgemm.ssd_cache_populate_actions(
linear_cache_indices,
self.total_hash_size,
self.lxu_cache_state,
self.timestep_counter.get(),
1, # for now assume prefetch_dist == 1
self.lru_state,
)
actions_count_cpu = torch.empty(
actions_count_gpu.shape, pin_memory=True, dtype=actions_count_gpu.dtype
)
actions_count_cpu.copy_(actions_count_gpu, non_blocking=True)
assigned_cache_slots = assigned_cache_slots.long()
evicted_rows = self.lxu_cache_weights[
assigned_cache_slots.clamp_(min=0).long(), :
]
inserted_rows = torch.empty(
evicted_rows.shape,
dtype=self.lxu_cache_weights.dtype,
pin_memory=True,
)
current_stream = torch.cuda.current_stream()
# Ensure the previous iterations l3_db.set(..) has completed.
current_stream.wait_event(self.ssd_set_end)
inserted_indices_cpu = torch.empty(
inserted_indices.shape, pin_memory=True, dtype=inserted_indices.dtype
)
inserted_indices_cpu.copy_(inserted_indices, non_blocking=True)
self.ssd_db.get_cuda(
inserted_indices_cpu,
inserted_rows,
actions_count_cpu,
)
current_stream.record_event(self.ssd_set_start)
# TODO: T123943415 T123943414 this is a big copy that is (mostly) unnecessary with a decent cache hit rate.
# Should we allocate on HBM?
inserted_rows_gpu = inserted_rows.to(self.current_device, non_blocking=True)
# self.lxu_cache_weights[assigned_cache_slots, :] = inserted_rows.cuda(non_blocking=True)
torch.ops.fbgemm.masked_index_put(
self.lxu_cache_weights,
assigned_cache_slots,
inserted_rows_gpu,
actions_count_gpu,
)
with torch.cuda.stream(self.ssd_stream):
self.ssd_stream.wait_event(self.ssd_set_start)
evicted_rows_cpu = torch.empty(
evicted_rows.shape, pin_memory=True, dtype=evicted_rows.dtype
)
evicted_rows_cpu.copy_(evicted_rows, non_blocking=True)
evicted_indices_cpu = torch.empty(
evicted_indices.shape, pin_memory=True, dtype=evicted_indices.dtype
)
evicted_indices_cpu.copy_(evicted_indices, non_blocking=True)
# pyre-fixme[6]: For 1st param expected `Stream` but got `Stream`.
evicted_rows.record_stream(self.ssd_stream)
evicted_indices.record_stream(self.ssd_stream)
self.ssd_db.set_cuda(
evicted_indices_cpu,
evicted_rows_cpu,
actions_count_cpu,
self.timestep_counter.get(),
)
# TODO: is this needed?
# Need a way to synchronize
# actions_count_cpu.record_stream(self.ssd_stream)
self.ssd_stream.record_event(self.ssd_set_end)
return linear_cache_indices
def forward(
self,
indices: Tensor,
offsets: Tensor,
per_sample_weights: Optional[Tensor] = None,
) -> Tensor:
if self.timestep_prefetch_size.get() <= 0:
with record_function("## prefetch ##"):
linear_cache_indices = self.prefetch(indices, offsets)
else:
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
self.hash_size_cumsum,
indices,
offsets,
)
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
self.lxu_cache_state,
self.hash_size_cumsum[-1].item(),
)
self.timestep_prefetch_size.decrement()
assert (
self.weight_initialized
), "weight needs to be initialized before forward function"
# Note: CPU and CUDA ops use the same interface to facilitate JIT IR
# generation for CUDA/CPU. For CPU op, we don't need weights_uvm and
# weights_placements
return torch.ops.fbgemm.int_nbit_split_embedding_codegen_lookup_function(
dev_weights=self.weights_dev,
uvm_weights=self.weights_uvm,
weights_placements=self.weights_placements,
weights_offsets=self.weights_offsets,
weights_tys=self.weights_tys,
D_offsets=self.D_offsets,
total_D=self.total_D,
max_int2_D=self.max_int2_D,
max_int4_D=self.max_int4_D,
max_int8_D=self.max_int8_D,
max_float16_D=self.max_float16_D,
max_float32_D=self.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=int(self.pooling_mode),
indice_weights=per_sample_weights,
output_dtype=self.output_dtype,
lxu_cache_weights=self.lxu_cache_weights,
lxu_cache_locations=lxu_cache_locations,
row_alignment=self.row_alignment,
max_float8_D=self.max_float8_D,
fp8_exponent_bits=self.fp8_exponent_bits,
fp8_exponent_bias=self.fp8_exponent_bias,
)
@torch.jit.export
def split_embedding_weights(
self, split_scale_shifts: bool = True
) -> List[Tuple[Tensor, Optional[Tensor]]]:
"""
Returns a list of weights, split by table.
Testing only, very slow.
"""
splits: List[Tuple[Tensor, Optional[Tensor]]] = []
rows_cumsum = 0
for _, row, dim, weight_ty in self.embedding_specs:
weights = torch.empty(
(
row,
rounded_row_size_in_bytes(
dim,
weight_ty,
self.row_alignment,
self.scale_bias_size_in_bytes,
),
),
dtype=torch.uint8,
)
self.ssd_db.get_cuda(
torch.arange(rows_cumsum, rows_cumsum + row).to(torch.int64),
weights,
torch.as_tensor([row]),
)
rows_cumsum += row
torch.cuda.synchronize(self.current_device)
weights_shifts = weights.detach()
if split_scale_shifts:
# remove the padding at the end of each row.
weights_shifts = weights_shifts[
:,
: unpadded_row_size_in_bytes(
dim, weight_ty, self.scale_bias_size_in_bytes
),
]
if (
weight_ty == SparseType.INT8
or weight_ty == SparseType.INT4
or weight_ty == SparseType.INT2
):
splits.append(
(
weights_shifts[:, self.scale_bias_size_in_bytes :],
weights_shifts[:, : self.scale_bias_size_in_bytes],
)
)
else:
assert (
weight_ty == SparseType.FP8
or weight_ty == SparseType.FP16
or weight_ty == SparseType.FP32
)
splits.append(
(
weights_shifts,
None,
)
)
else:
splits.append((weights_shifts, None))
torch.cuda.synchronize(self.current_device)
return splits
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
# flake8: noqa F401
import torch # usort:skip
import warnings
# This module is a compatibility wrapper that re-exports the symbols from:
# fbgemm_gpu.split_table_batched_embeddings_ops_common
# fbgemm_gpu.split_table_batched_embeddings_ops_inference
# fbgemm_gpu.split_table_batched_embeddings_ops_training
from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType, SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
CacheState,
DEFAULT_SCALE_BIAS_SIZE_IN_BYTES,
EmbeddingLocation,
PoolingMode,
RecordCacheMetrics,
round_up,
SplitState,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
align_to_cacheline,
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
unpadded_row_size_in_bytes,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
CounterBasedRegularizationDefinition,
CounterWeightDecayMode,
DEFAULT_ASSOC,
DenseTableBatchedEmbeddingBagsCodegen,
GradSumDecay,
INT8_EMB_ROW_DIM_OFFSET,
LearningRateMode,
SplitTableBatchedEmbeddingBagsCodegen,
TailIdThreshold,
WeightDecayMode,
)
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:embedding_ops_cpu")
except Exception:
pass
warnings.warn(
f"""\033[93m
The Python module {__name__} is now DEPRECATED and will be removed in the
future. Users should instead declare dependencies on
//deeplearning/fbgemm/fbgemm_gpu/split_table_batched_embeddings_ops_{{training, inference}}
in their TARGETS file and import the
fbgemm_gpu.split_table_batched_embeddings_ops_{{training, inference}}
modules as needed in their scripts.
\033[0m""",
DeprecationWarning,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import struct
import subprocess
import unittest
from functools import wraps
from typing import Any, Callable, List, Tuple
import hypothesis.strategies as st
import numpy as np
import torch
TEST_WITH_ROCM: bool = os.getenv("FBGEMM_TEST_WITH_ROCM", "0") == "1"
# Eigen/Python round 0.5 away from 0, Numpy rounds to even
round_to_nearest: Callable[[np.ndarray], np.ndarray] = np.vectorize(round)
def bytes_to_floats(byte_matrix: np.ndarray) -> np.ndarray:
floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float32)
for i, byte_values in enumerate(byte_matrix):
(floats[i],) = struct.unpack("f", bytearray(byte_values))
return floats
def floats_to_bytes(floats: np.ndarray) -> np.ndarray:
byte_matrix = np.empty([np.shape(floats)[0], 4], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float32), (value, floats)
as_bytes = struct.pack("f", value)
# In Python3 bytes will be a list of int, in Python2 a list of string
if isinstance(as_bytes[0], int):
byte_matrix[i] = list(as_bytes)
else:
byte_matrix[i] = list(map(ord, as_bytes))
return byte_matrix
def bytes_to_half_floats(byte_matrix: np.ndarray) -> np.ndarray:
floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float16)
for i, byte_values in enumerate(byte_matrix):
(floats[i],) = np.frombuffer(
memoryview(byte_values).tobytes(), dtype=np.float16
)
return floats
def half_floats_to_bytes(floats: np.ndarray) -> np.ndarray:
byte_matrix = np.empty([np.shape(floats)[0], 2], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float16), (value, floats)
byte_matrix[i] = np.frombuffer(
memoryview(value.tobytes()).tobytes(), dtype=np.uint8
)
return byte_matrix
def fused_rowwise_8bit_quantize_reference(data: np.ndarray) -> np.ndarray:
minimum = np.min(data, axis=-1, keepdims=True)
maximum = np.max(data, axis=-1, keepdims=True)
span = maximum - minimum
bias = minimum
scale = span / 255.0
inverse_scale = 255.0 / (span + 1e-8)
quantized_data = round_to_nearest((data - bias) * inverse_scale)
scale_bytes = floats_to_bytes(scale.reshape(-1))
scale_bytes = scale_bytes.reshape(data.shape[:-1] + (scale_bytes.shape[-1],))
bias_bytes = floats_to_bytes(bias.reshape(-1))
bias_bytes = bias_bytes.reshape(data.shape[:-1] + (bias_bytes.shape[-1],))
return np.concatenate([quantized_data, scale_bytes, bias_bytes], axis=-1)
def fused_rowwise_8bit_dequantize_reference(fused_quantized: np.ndarray) -> np.ndarray:
scale = bytes_to_floats(fused_quantized[..., -8:-4].astype(np.uint8).reshape(-1, 4))
scale = scale.reshape(fused_quantized.shape[:-1] + (scale.shape[-1],))
bias = bytes_to_floats(fused_quantized[..., -4:].astype(np.uint8).reshape(-1, 4))
bias = bias.reshape(fused_quantized.shape[:-1] + (bias.shape[-1],))
quantized_data = fused_quantized[..., :-8]
return quantized_data * scale + bias
def fused_rowwise_8bit_dequantize_reference_half(
fused_quantized: np.ndarray,
) -> np.ndarray:
scale = bytes_to_half_floats(
fused_quantized[..., -8:-4].astype(np.uint8).reshape(-1, 4)
)
scale = scale.reshape(fused_quantized.shape[:-1] + (scale.shape[-1],))
bias = bytes_to_half_floats(
fused_quantized[..., -4:].astype(np.uint8).reshape(-1, 4)
)
bias = bias.reshape(fused_quantized.shape[:-1] + (bias.shape[-1],))
quantized_data = fused_quantized[..., :-8]
return quantized_data * scale + bias
def fused_rowwise_nbit_quantize_reference(data: np.ndarray, bit: int) -> np.ndarray:
minimum = np.min(data, axis=1).astype(np.float16).astype(np.float32)
maximum = np.max(data, axis=1)
span = maximum - minimum
qmax = (1 << bit) - 1
scale = (span / qmax).astype(np.float16).astype(np.float32)
bias = np.zeros(data.shape[0])
quantized_data = np.zeros(data.shape).astype(np.uint8)
for i in range(data.shape[0]):
bias[i] = minimum[i]
inverse_scale = 1.0 if scale[i] == 0.0 else 1 / scale[i]
if scale[i] == 0.0 or math.isinf(inverse_scale):
scale[i] = 1.0
inverse_scale = 1.0
quantized_data[i] = np.clip(
np.round((data[i, :] - minimum[i]) * inverse_scale), 0, qmax
)
# pack
assert 8 % bit == 0
num_elem_per_byte = 8 // bit
packed_dim = (data.shape[1] + num_elem_per_byte - 1) // num_elem_per_byte
packed_data = np.zeros([data.shape[0], packed_dim]).astype(np.uint8)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if j % num_elem_per_byte == 0:
packed_data[i, j // num_elem_per_byte] = quantized_data[i, j]
else:
packed_data[i, j // num_elem_per_byte] += quantized_data[i, j] << (
(j % num_elem_per_byte) * bit
)
scale_bytes = half_floats_to_bytes(scale.astype(np.float16))
bias_bytes = half_floats_to_bytes(bias.astype(np.float16))
return np.concatenate([packed_data, scale_bytes, bias_bytes], axis=1)
def fused_rowwise_nbit_quantize_dequantize_reference(
data: np.ndarray, bit: int
) -> np.ndarray:
fused_quantized = fused_rowwise_nbit_quantize_reference(data, bit)
scale = bytes_to_half_floats(fused_quantized[:, -4:-2].astype(np.uint8)).astype(
np.float32
)
bias = bytes_to_half_floats(fused_quantized[:, -2:].astype(np.uint8)).astype(
np.float32
)
quantized_data = fused_quantized[:, :-4]
# unpack
packed_dim = fused_quantized.shape[1] - 4
assert 8 % bit == 0
num_elem_per_byte = 8 // bit
assert packed_dim == ((data.shape[1] + num_elem_per_byte - 1) // num_elem_per_byte)
unpacked_data = np.zeros(data.shape).astype(np.uint8)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
unpacked_data[i, j] = (
quantized_data[i, j // num_elem_per_byte]
>> ((j % num_elem_per_byte) * bit)
) & ((1 << bit) - 1)
return scale * unpacked_data + bias
# Used for `@unittest.skipIf`
gpu_unavailable: Tuple[bool, str] = (
not torch.cuda.is_available() or torch.cuda.device_count() == 0,
"CUDA is not available or no GPUs detected",
)
# Used for `if` statements inside tests
gpu_available: bool = not gpu_unavailable[0]
# Used for `@unittest.skipIf` for tests that pass in internal CI, but fail on the GitHub runners
running_on_github: Tuple[bool, str] = (
os.getenv("GITHUB_ENV") is not None,
"Test is currently known to fail or hang when run in the GitHub runners",
)
# Used for `@unittest.skipIf` for tests that currently fail on ARM platform
on_arm_platform: Tuple[bool, str] = (
subprocess.run(["uname", "-m"], stdout=subprocess.PIPE)
.stdout.decode("utf-8")
.strip()
== "aarch64",
"Test is currently known to fail when running on ARM platform",
)
def cpu_and_maybe_gpu() -> st.SearchStrategy[List[torch.device]]:
gpu_available = torch.cuda.is_available() and torch.cuda.device_count() > 0
# st.sampled_from is not guaranteed to test all the values passed to it.
# However, Hypothesis, by default, generates 100 test cases from the specified strategies.
# If st.sampled_from contains >100 items or if it's used in conjunction with other strategies
# then it may not test all values; however, for smaller tests it may work fine.
# This is still a stopgap solution until we figure out a way to parameterize UnitTestCase.
return st.sampled_from(
[torch.device("cpu")] + ([torch.device("cuda")] if gpu_available else [])
)
def cpu_only() -> st.SearchStrategy[List[torch.device]]:
return st.sampled_from([torch.device("cpu")])
# pyre-fixme[3]: Return annotation cannot be `Any`.
def skipIfRocm(reason: str = "Test currently doesn't work on the ROCm stack") -> Any:
# pyre-fixme[3]: Return annotation cannot be `Any`.
# pyre-fixme[24]: Generic type `Callable` expects 2 type parameters.
def skipIfRocmDecorator(fn: Callable) -> Any:
@wraps(fn)
# pyre-fixme[3]: Return annotation cannot be `Any`.
def wrapper(*args: Any, **kwargs: Any) -> Any:
if TEST_WITH_ROCM:
raise unittest.SkipTest(reason)
else:
fn(*args, **kwargs)
return wrapper
return skipIfRocmDecorator
def symint_vector_unsupported() -> Tuple[bool, str]:
major, minor = torch.__version__.split(".")[0:2]
return (
int(major) < 2 or (int(major) == 2 and int(minor) < 1),
"""
dynamic shape support for this op needs to be on PyTorch 2.1 or
newer with https://github.com/pytorch/pytorch/pull/101056
""",
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import random
import unittest
from itertools import accumulate
from typing import List, Tuple
import hypothesis.strategies as st
import torch
from hypothesis import given, HealthCheck, settings
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
from test_utils import gpu_unavailable # pyre-ignore[21]
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
from fbgemm_gpu.test.test_utils import gpu_unavailable
def gen_inputs(
hash_sizes: List[int],
batch_size: int,
max_len: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
the lengths of bags are chosen from
a uniform distribution from [0, max_len]
"""
T = len(hash_sizes)
offsets = [0]
indices_per_table = []
for t in range(T):
len_sum = 0
for _ in range(batch_size):
length = random.randint(0, max_len)
len_sum += length
offsets.append(offsets[-1] + length)
n_rows = hash_sizes[t]
indices_per_table.append(torch.randint(n_rows, [len_sum], dtype=torch.int64))
indices = torch.cat(indices_per_table, dim=0)
offsets = torch.tensor(offsets, dtype=torch.int64)
return indices, offsets
def transpose_embedding_input_ref(
hash_size_cumsum: torch.Tensor,
indices: torch.Tensor,
offsets: torch.Tensor,
info_B_num_bits: int,
) -> Tuple[
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
torch.Tensor,
]:
"""
reference implementation of torch.ops.fbgemm.transpose_embedding_input
"""
T = hash_size_cumsum.numel() - 1
B = (offsets.numel() - 1) // T
linear_indices = torch.zeros_like(indices)
infos = torch.zeros_like(indices)
for b_t in range(B * T):
t = b_t // B
b = b_t % B
start = int(offsets[b_t].item())
end = int(offsets[b_t + 1].item())
for i in range(start, end):
linear_indices[i] = indices[i] + hash_size_cumsum[t]
infos[i] = (t << info_B_num_bits) | b
linear_indices_sorted, sorted_idx = torch.sort(linear_indices, stable=True)
infos_sorted = infos[sorted_idx]
(
sorted_linear_indices_run,
sorted_linear_indices_run_lengths,
) = torch.unique_consecutive(linear_indices_sorted, return_counts=True)
sorted_linear_indices_num_runs = torch.tensor(
sorted_linear_indices_run.numel(), dtype=torch.int64
)
sorted_linear_indices_cumulative_run_lengths = torch.tensor(
[0] + list(accumulate(sorted_linear_indices_run_lengths.tolist())),
dtype=torch.int64,
)
return (
linear_indices,
linear_indices_sorted,
infos_sorted,
sorted_linear_indices_run,
sorted_linear_indices_run_lengths,
sorted_linear_indices_num_runs,
sorted_linear_indices_cumulative_run_lengths,
)
class SplitEmbeddingsUtilsTest(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
B=st.integers(min_value=10, max_value=25),
T=st.integers(min_value=5, max_value=20),
E=st.integers(min_value=10, max_value=50),
)
@settings(deadline=30000, suppress_health_check=[HealthCheck.filter_too_much])
def test_transpose(self, B: int, T: int, E: int) -> None:
hash_sizes = [random.randint(E, 2 * E) for _ in range(T)]
batch_size = B
max_len = 3 * E
total_hash_size_bits: int = int(math.log2(sum(hash_sizes)) + 1)
hash_size_cumsum = torch.tensor(
[0] + list(accumulate(hash_sizes)), dtype=torch.int64
)
indices, offsets = gen_inputs(hash_sizes, batch_size, max_len)
hash_size_cumsum_cuda = hash_size_cumsum.cuda()
info_B_num_bits, _ = torch.ops.fbgemm.get_infos_metadata(
hash_size_cumsum_cuda, B, T
)
(
linear_indices,
linear_indices_sorted,
infos_sorted,
sorted_linear_indices_run,
sorted_linear_indices_run_lengths,
sorted_linear_indices_num_runs,
sorted_linear_indices_cumulative_run_lengths,
) = torch.ops.fbgemm.transpose_embedding_input(
hash_size_cumsum_cuda,
total_hash_size_bits,
indices.cuda(),
offsets.cuda(),
info_B_num_bits=info_B_num_bits,
)
(
linear_indices_ref,
linear_indices_sorted_ref,
infos_sorted_ref,
sorted_linear_indices_run_ref,
sorted_linear_indices_run_lengths_ref,
sorted_linear_indices_num_runs_ref,
sorted_linear_indices_cumulative_run_lengths_ref,
) = transpose_embedding_input_ref(
hash_size_cumsum, indices, offsets, info_B_num_bits
)
self.assertTrue(torch.equal(linear_indices.cpu(), linear_indices_ref))
self.assertTrue(
torch.equal(linear_indices_sorted.cpu(), linear_indices_sorted_ref)
)
self.assertTrue(torch.equal(infos_sorted.cpu(), infos_sorted_ref))
# fbgemm impl has padding so we need slice
num = sorted_linear_indices_run_ref.numel()
self.assertTrue(
torch.equal(
sorted_linear_indices_run.cpu()[:num], sorted_linear_indices_run_ref
)
)
self.assertTrue(
torch.equal(
sorted_linear_indices_run_lengths.cpu()[:num],
sorted_linear_indices_run_lengths_ref,
)
)
self.assertEqual(
sorted_linear_indices_num_runs.item(),
sorted_linear_indices_num_runs_ref.item(),
)
self.assertTrue(
torch.equal(
sorted_linear_indices_cumulative_run_lengths.cpu()[: num + 1],
sorted_linear_indices_cumulative_run_lengths_ref,
)
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import random
import unittest
from typing import List
import fbgemm_gpu
import hypothesis.strategies as st
import torch
from hypothesis import given, settings, Verbosity
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from test_utils import gpu_available, gpu_unavailable, skipIfRocm
else:
from fbgemm_gpu.test.test_utils import gpu_available, gpu_unavailable, skipIfRocm
if gpu_available:
# pyre-ignore[21]
from fbgemm_gpu.uvm import cudaMemAdvise, cudaMemoryAdvise, cudaMemPrefetchAsync
MAX_EXAMPLES = 40
class UvmTest(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(st.integers(min_value=1, max_value=8), min_size=1, max_size=4),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_host_mapped_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_is_uvm_tensor(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = random.choice([True, False])
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(st.integers(min_value=1, max_value=8), min_size=1, max_size=4),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_to_cpu(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
cpu_t = torch.ops.fbgemm.uvm_to_cpu(uvm_t)
assert not torch.ops.fbgemm.is_uvm_tensor(cpu_t)
assert torch.ops.fbgemm.uvm_storage(cpu_t)
uvm_t.copy_(cpu_t)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
# Test use of cpu tensor after freeing the uvm tensor
del uvm_t
cpu_t.mul_(42)
@unittest.skipIf(*gpu_unavailable)
def test_enum(self) -> None:
# pyre-ignore[16]
assert cudaMemoryAdvise.cudaMemAdviseSetAccessedBy.value == 5
@skipIfRocm()
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=4
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_cudaMemAdvise(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
# pyre-ignore[16]
cudaMemAdvise(uvm_t, cudaMemoryAdvise.cudaMemAdviseSetAccessedBy)
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=3
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_cudaMemPrefetchAsync(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
cudaMemPrefetchAsync(uvm_t)
torch.cuda.synchronize(torch.device("cuda:0"))
@skipIfRocm()
@unittest.skipIf(
not torch.cuda.is_available() or torch.cuda.device_count() < 2,
"Skip unless two CUDA devices are detected",
)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=4
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_to_device(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
# Reference uvm tensor from second cuda device
try:
device_prototype = torch.empty(0, device="cuda:1")
except RuntimeError:
# Skip the tests if there is no "cuda:1" device
return
second_t = torch.ops.fbgemm.uvm_to_device(uvm_t, device_prototype)
assert torch.ops.fbgemm.is_uvm_tensor(second_t)
assert torch.ops.fbgemm.uvm_storage(second_t)
assert second_t.device == device_prototype.device
@skipIfRocm()
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=4
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_slice(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
for i in range(sizes[0]):
uvm_slice = uvm_t[i]
cpu_slice = torch.ops.fbgemm.uvm_to_cpu(uvm_slice)
assert uvm_slice.storage_offset() == cpu_slice.storage_offset()
assert uvm_slice.storage().data_ptr() == uvm_t.storage().data_ptr()
assert cpu_slice.storage().data_ptr() == uvm_t.storage().data_ptr()
assert torch.ops.fbgemm.is_uvm_tensor(uvm_slice)
assert torch.ops.fbgemm.uvm_storage(cpu_slice)
@skipIfRocm()
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(1024)), min_size=1, max_size=4
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_memadviceDontFork(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
cpu_t = torch.ops.fbgemm.uvm_to_cpu(uvm_t)
torch.ops.fbgemm.uvm_mem_advice_dont_fork(cpu_t)
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(512)), min_size=1, max_size=3
),
uvm_op=st.sampled_from(
[
torch.ops.fbgemm.new_unified_tensor,
torch.ops.fbgemm.new_managed_tensor,
torch.ops.fbgemm.new_vanilla_managed_tensor,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
# pyre-fixme[2]: Parameter must be annotated.
def test_uvm_to_cpu_clone(self, sizes: List[int], uvm_op) -> None:
if uvm_op is torch.ops.fbgemm.new_unified_tensor:
is_host_mapped = False
uvm_t = uvm_op(
torch.empty(0, device="cuda:0", dtype=torch.float),
sizes,
is_host_mapped,
)
else:
uvm_t = uvm_op(torch.empty(0, device="cuda:0", dtype=torch.float), sizes)
assert torch.ops.fbgemm.is_uvm_tensor(uvm_t)
assert torch.ops.fbgemm.uvm_storage(uvm_t)
cpu_clone = torch.ops.fbgemm.uvm_to_cpu_clone(uvm_t)
assert not torch.ops.fbgemm.is_uvm_tensor(cpu_clone)
assert not torch.ops.fbgemm.uvm_storage(cpu_clone)
@unittest.skipIf(*gpu_unavailable)
@given(
sizes=st.lists(
st.integers(min_value=1, max_value=(512)), min_size=1, max_size=3
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_new_managed_tensor_meta(self, sizes: List[int]) -> None:
cpu_tensor = torch.empty(sizes).to("meta")
cpu_tensor_meta = torch.ops.fbgemm.new_managed_tensor(cpu_tensor, sizes)
assert cpu_tensor.shape == cpu_tensor_meta.shape
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import logging
import math
import random
import unittest
from typing import Optional, Tuple
import fbgemm_gpu
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import (
EmbOptimType as OptimType,
FP8QuantizationConfig,
QuantizationConfig,
SparseType,
)
from fbgemm_gpu.split_embedding_inference_converter import SplitEmbInferenceConverter
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
EmbeddingLocation,
PoolingMode,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
SplitTableBatchedEmbeddingBagsCodegen,
)
from hypothesis import given, settings, Verbosity
from torch import nn
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from test_utils import gpu_available, on_arm_platform
else:
from fbgemm_gpu.test.test_utils import gpu_available, on_arm_platform
EMB_WEIGHT_UNIFORM_INIT_BOUND = 0.000316
MAX_EXAMPLES = 40
def div_round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
def to_device(t: torch.Tensor, use_cpu: bool) -> torch.Tensor:
return t.cpu() if use_cpu else t.cuda()
def get_table_batched_offsets_from_dense(
merged_indices: torch.Tensor, use_cpu: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
(T, B, L) = merged_indices.size()
lengths = np.ones((T, B)) * L
flat_lengths = lengths.flatten()
return (
to_device(merged_indices.contiguous().view(-1), use_cpu),
to_device(
torch.tensor(([0] + np.cumsum(flat_lengths).tolist())).long(),
use_cpu,
),
)
class SparseArch(nn.Module):
"""
The testing module with split table batched embedding op
"""
def __init__(
self,
emb_dim,
num_tables,
num_rows,
use_cpu,
) -> None:
super().__init__()
pooling_mode = PoolingMode.SUM
Ds = [emb_dim] * num_tables
Es = [num_rows] * num_tables
device = ComputeDevice.CPU if use_cpu else ComputeDevice.CUDA
loc = EmbeddingLocation.HOST if use_cpu else EmbeddingLocation.DEVICE
self.emb_module = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
loc,
device,
)
for (E, D) in zip(Es, Ds)
],
weights_precision=SparseType.FP32,
optimizer=OptimType.EXACT_SGD,
learning_rate=0.05,
pooling_mode=pooling_mode,
)
self.emb_module.init_embedding_weights_uniform(
-EMB_WEIGHT_UNIFORM_INIT_BOUND, +EMB_WEIGHT_UNIFORM_INIT_BOUND
)
def forward(self, indices, offsets):
return self.emb_module(indices, offsets)
class QuantizedSplitEmbeddingsTest(unittest.TestCase):
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
),
quantize_type=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.FP8,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
pruning_ratio=st.sampled_from([None, 0.0]),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_quantize_workflow(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
pooling_mode: PoolingMode,
quantize_type: SparseType,
pruning_ratio: Optional[float],
use_cpu: bool,
) -> None:
E = int(10**log_E)
Es = [E] * T
D_alignment = 8 if not quantize_type == SparseType.INT2 else 16
D = div_round_up(D, D_alignment)
xs = [torch.randint(low=0, high=e, size=(B, L)) for e in Es]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
# indices: T, B, L; offsets: T * B + 1
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=use_cpu)
sparse_arch = SparseArch(emb_dim=D, num_tables=T, num_rows=E, use_cpu=use_cpu)
quantization_config = QuantizationConfig()
# Fake quantize to make the original weight in FP32 all be exactly
# representable by INT8 row-wise quantized values
if quantize_type == quantize_type.INT8:
for t in range(T):
sparse_arch.emb_module.split_embedding_weights()[t].data.copy_(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
sparse_arch.emb_module.split_embedding_weights()[t].data
)
)
)
elif quantize_type == quantize_type.INT4 or quantize_type == quantize_type.INT2:
for t in range(T):
sparse_arch.emb_module.split_embedding_weights()[t].data.copy_(
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
sparse_arch.emb_module.split_embedding_weights()[t].data,
bit_rate=quantize_type.bit_rate(),
),
bit_rate=quantize_type.bit_rate(),
)
)
elif quantize_type == SparseType.FP8:
quantization_config = FP8QuantizationConfig(random.choice([4, 5]), 7)
for t in range(T):
sparse_arch.emb_module.split_embedding_weights()[t].data.copy_(
torch.ops.fbgemm.HFP8QuantizedToFloat(
torch.ops.fbgemm.FloatToHFP8Quantized(
sparse_arch.emb_module.split_embedding_weights()[t].data,
quantization_config.get("exponent_bits"),
quantization_config.get("exponent_bias"),
quantization_config.get("max_position"),
),
quantization_config.get("exponent_bits"),
quantization_config.get("exponent_bias"),
)
)
emb_out = sparse_arch(indices, offsets) # B, T, D
# Apply the quantization transformations on the model!
split_emb_infer_converter = SplitEmbInferenceConverter(
quantize_type=quantize_type,
pruning_ratio=pruning_ratio,
quantization_config=quantization_config,
)
split_emb_infer_converter.convert_model(sparse_arch)
assert type(sparse_arch.emb_module) is IntNBitTableBatchedEmbeddingBagsCodegen
assert sparse_arch.emb_module.use_cpu == use_cpu
quantized_emb_out = sparse_arch(indices.int(), offsets.int()) # B, T, D
# Compare FP32 emb module vs. quantize_type (FP16, INT8, INT4, INT2) emb module
torch.testing.assert_close(
emb_out.float().cpu(),
quantized_emb_out.float().cpu(),
atol=1.0e-1,
rtol=1.0e-1,
)
@unittest.skipIf(*on_arm_platform)
@given(
use_cpu=st.booleans() if gpu_available else st.just(True),
use_array_for_index_remapping=st.booleans(),
quantize_type=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_l2_norm_pruning_workflow(
self,
use_cpu: bool,
use_array_for_index_remapping: bool,
quantize_type: SparseType,
) -> None:
D = 128
T = 2
E = 5
indices = torch.Tensor([3, 0, 2, 2, 3, 4, 2]).int()
offsets = torch.Tensor([0, 1, 4, 6, 7]).int()
weights = [
(torch.Tensor([0.4, 0.1, -0.2, 0.2, 0.3]).float().view(E, 1))
* (torch.Tensor([1.0] * E * D).view(E, D)),
(torch.Tensor([-0.8, 0.2, 0.5, -0.1, 0.9]).float().view(E, 1))
* (torch.Tensor([1.0] * E * D).view(E, D)),
]
# Inputs for 3 test cases. Each row is used in one test case.
pruning_ratios = [0.9, 0.5, 0.1]
remapped_indices = [
torch.Tensor([0, 4]).int(),
torch.Tensor([3, 0, 2, 2, 4, 2]).int(),
indices,
]
remapped_offsets = [
torch.Tensor([0, 0, 1, 2, 2]).int(),
torch.Tensor([0, 1, 4, 5, 6]).int(),
offsets,
]
# Start to test.
logging.info("use cpu = {}".format(use_cpu))
for pruning_ratio, remapped_index, remapped_offset in zip(
pruning_ratios, remapped_indices, remapped_offsets
):
logging.info("pruning ratio = {}.".format(pruning_ratio))
sparse_arch = SparseArch(
emb_dim=D, num_tables=T, num_rows=E, use_cpu=use_cpu
)
for idx in range(T):
sparse_arch.emb_module.split_embedding_weights()[idx].copy_(
weights[idx]
)
emb_out = sparse_arch(
to_device(remapped_index, use_cpu), to_device(remapped_offset, use_cpu)
) # B, T, D
# Apply pruning / quantization transformations on the model!
split_emb_infer_converter = SplitEmbInferenceConverter(
quantize_type=quantize_type,
pruning_ratio=pruning_ratio,
use_array_for_index_remapping=use_array_for_index_remapping,
)
split_emb_infer_converter.convert_model(sparse_arch)
assert (
type(sparse_arch.emb_module) is IntNBitTableBatchedEmbeddingBagsCodegen
)
assert sparse_arch.emb_module.use_cpu == use_cpu
pruned_emb_out = sparse_arch(
to_device(indices, use_cpu), to_device(offsets, use_cpu)
) # B, T, D
# Compare FP32 emb module with remapped index vs. FP16 emb module with pruning
torch.testing.assert_close(
emb_out.float().cpu(),
pruned_emb_out.float().cpu(),
atol=1.0e-1,
rtol=1.0e-1,
)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
pruning_ratio=st.floats(min_value=0.0, max_value=1.0, exclude_max=True),
use_cpu=st.booleans() if gpu_available else st.just(True),
use_array_for_index_remapping=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_pruning_workflow_large_scale(
self,
T: int,
D: int,
log_E: int,
pruning_ratio: Optional[float],
use_cpu: bool,
use_array_for_index_remapping: bool,
) -> None:
E = int(10**log_E)
D_alignment = 8
D = div_round_up(D, D_alignment)
sparse_arch = SparseArch(emb_dim=D, num_tables=T, num_rows=E, use_cpu=use_cpu)
# Make sure that each row has a unique L2 norm.
embedding_weights_before = sparse_arch.emb_module.split_embedding_weights()
for weights in embedding_weights_before:
for i in range(weights.size()[0]):
weights[i].uniform_(i * 0.01, (i + 1) * 0.01)
# Collect #rows before pruning.
num_rows_before = [weight.size()[0] for weight in embedding_weights_before]
# Apply pruning / quantization transformations on the model!
split_emb_infer_converter = SplitEmbInferenceConverter(
quantize_type=SparseType.FP16,
pruning_ratio=pruning_ratio,
use_array_for_index_remapping=use_array_for_index_remapping,
)
split_emb_infer_converter.convert_model(sparse_arch)
embedding_weights_after = sparse_arch.emb_module.split_embedding_weights()
assert type(sparse_arch.emb_module) is IntNBitTableBatchedEmbeddingBagsCodegen
assert sparse_arch.emb_module.use_cpu == use_cpu
# Collect #rows after pruning.
embedding_weights_after = sparse_arch.emb_module.split_embedding_weights()
num_rows_after = [weight[0].size()[0] for weight in embedding_weights_after]
# Check #rows after pruning aligns with the specified pruning ratio.
self.assertEqual(len(num_rows_before), len(num_rows_after))
for before, after in zip(num_rows_before, num_rows_after):
self.assertEqual(
math.ceil(before * (1.0 - pruning_ratio)), # type: ignore
after,
msg="original_num_rows = {}, pruning ratio = {}".format(
before, pruning_ratio
),
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import unittest
from ctypes import c_float, c_int32, cast, POINTER, pointer
from typing import Dict, Tuple
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from hypothesis import assume, given, HealthCheck, settings, Verbosity
from torch import Tensor
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import ( # noqa: F401
bytes_to_half_floats,
fused_rowwise_8bit_dequantize_reference,
fused_rowwise_8bit_quantize_reference,
fused_rowwise_nbit_quantize_dequantize_reference,
fused_rowwise_nbit_quantize_reference,
gpu_available,
gpu_unavailable,
symint_vector_unsupported,
)
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
from fbgemm_gpu.test.test_utils import (
bytes_to_half_floats,
fused_rowwise_8bit_dequantize_reference,
fused_rowwise_8bit_quantize_reference,
fused_rowwise_nbit_quantize_dequantize_reference,
fused_rowwise_nbit_quantize_reference,
gpu_available,
gpu_unavailable,
symint_vector_unsupported,
)
no_long_tests: bool = False
class TestFused8BitRowwiseQuantizationConversion(unittest.TestCase):
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
is_half=st.booleans(),
test_float_or_half_op=st.booleans(),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_op(
self,
nrows: int,
ncols: int,
is_half: bool,
test_float_or_half_op: bool,
) -> None:
input_data = torch.rand(nrows, ncols).float()
if is_half:
input_data = torch.rand(nrows, ncols).half()
if test_float_or_half_op:
quantized_data = torch.ops.fbgemm.FloatOrHalfToFused8BitRowwiseQuantized(
input_data
)
else:
if not is_half:
quantized_data = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data
)
else:
quantized_data = torch.ops.fbgemm.HalfToFused8BitRowwiseQuantized(
input_data
)
if nrows == 0 or ncols == 0:
assert quantized_data.numel() == nrows * ((ncols + 3) // 4 * 4 + 8)
return
reference = fused_rowwise_8bit_quantize_reference(input_data.float().numpy())
np.testing.assert_array_almost_equal(quantized_data.numpy(), reference)
if gpu_available:
input_data_gpu = input_data.cuda()
if test_float_or_half_op:
quantized_data_gpu = (
torch.ops.fbgemm.FloatOrHalfToFused8BitRowwiseQuantized(
input_data_gpu
)
)
else:
if not is_half:
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data_gpu
)
)
else:
quantized_data_gpu = (
torch.ops.fbgemm.HalfToFused8BitRowwiseQuantized(input_data_gpu)
)
quantized_data_numpy = quantized_data_gpu.cpu().numpy()
ncols_aligned = (ncols + 4 - 1) // 4 * 4
# compare quantized data
np.testing.assert_allclose(
quantized_data_numpy[:, :ncols],
reference[:, :ncols],
# Allow 1 mantissa bit difference (LSB)
atol=1,
)
# compare scales
np.testing.assert_array_almost_equal(
quantized_data_numpy[:, ncols_aligned : ncols_aligned + 4],
reference[:, ncols : ncols + 4],
)
# compare zero points
np.testing.assert_array_equal(
quantized_data_numpy[:, ncols_aligned + 4 : ncols_aligned + 8],
reference[:, ncols + 4 : ncols + 8],
)
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
is_output_half=st.booleans(),
test_float_or_half_op=st.booleans(),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op(
self,
nrows: int,
ncols: int,
is_output_half: bool,
test_float_or_half_op: bool,
) -> None:
num_elem_per_byte = 1
input_data = torch.rand(nrows, ncols).float()
if is_output_half:
input_data = input_data.half()
assume(ncols % (2 * num_elem_per_byte) == 0)
if test_float_or_half_op:
quantized_data = torch.ops.fbgemm.FloatOrHalfToFused8BitRowwiseQuantized(
input_data
)
dequantized_data = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatOrHalf(
quantized_data,
output_dtype=1 if is_output_half else 0,
)
else:
if not is_output_half:
quantized_data = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data
)
dequantized_data = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
quantized_data
)
else:
quantized_data = torch.ops.fbgemm.HalfToFused8BitRowwiseQuantized(
input_data
)
dequantized_data = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToHalf(
quantized_data
)
if nrows == 0 or ncols == 0:
assert dequantized_data.numel() == 0
return
reference = torch.from_numpy(
fused_rowwise_8bit_dequantize_reference(quantized_data.numpy())
)
if not is_output_half:
torch.testing.assert_close(dequantized_data.float(), reference.float())
else:
torch.testing.assert_close(dequantized_data.half(), reference.half())
if gpu_available:
input_data_gpu = input_data.cuda()
if test_float_or_half_op:
quantized_data_gpu = (
torch.ops.fbgemm.FloatOrHalfToFused8BitRowwiseQuantized(
input_data_gpu
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatOrHalf(
quantized_data_gpu,
output_dtype=1 if is_output_half else 0,
)
)
else:
if not is_output_half:
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data_gpu
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
quantized_data_gpu
)
)
else:
quantized_data_gpu = (
torch.ops.fbgemm.HalfToFused8BitRowwiseQuantized(input_data_gpu)
)
dequantized_data_gpu = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToHalf(
quantized_data_gpu
)
)
dequantized_data_numpy = dequantized_data_gpu.cpu().numpy()
dequantized_data_trimmed = torch.from_numpy(
dequantized_data_numpy[:, :ncols]
)
if not is_output_half:
torch.testing.assert_close(
dequantized_data_trimmed.float(), reference.float()
)
else:
torch.testing.assert_close(
dequantized_data_trimmed.half(), reference.half()
)
@unittest.skipIf(no_long_tests, "Slow test, requires buck build to run.") # noqa
def test_quantize_and_dequantize_op_cuda_large_nrows(self) -> None:
ncols = 256
nrows = 65540
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(input_data)
reference = torch.from_numpy(
fused_rowwise_8bit_dequantize_reference(quantized_data.numpy())
)
if gpu_available:
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
input_data_gpu
)
dequantized_data_gpu = torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
quantized_data_gpu
)
reference = torch.from_numpy(
fused_rowwise_8bit_dequantize_reference(
quantized_data_gpu.cpu().numpy()
)
)
# compare quantized data
torch.testing.assert_close(dequantized_data_gpu.cpu(), reference)
class TestMixedDimInt8DequantizationConversion(unittest.TestCase):
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# Pyre was not able to infer the type of argument `not torch.cuda.is_available()`
# to decorator factory `unittest.skipIf`.
@unittest.skipIf(*gpu_unavailable)
def test_mixed_dim_8bit_dequantize_op_empty(self) -> None:
# assert that kernel return empty tensor and not failing with cuda error
input_refs = torch.empty((0, 0), dtype=torch.uint8).cuda()
D_offsets = torch.tensor([0]).cuda()
mixed_dim_dequant_output = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim(
input_refs, D_offsets, SparseType.FP32.as_int()
)
)
assert mixed_dim_dequant_output.numel() == 0
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
B=st.integers(min_value=1, max_value=100),
T=st.integers(min_value=1, max_value=100),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
min_dim=st.just(1),
max_dim=st.just(100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_mixed_dim_8bit_dequantize_op(
self,
B: int,
T: int,
output_dtype: SparseType,
min_dim: int,
max_dim: int,
) -> None:
self.run_mixed_dim_8bit_dequantize_op_test(B, T, output_dtype, min_dim, max_dim)
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
B=st.integers(min_value=1, max_value=100),
T=st.integers(min_value=1, max_value=100),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
min_dim=st.just(100),
max_dim=st.just(1000),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_mixed_dim_8bit_dequantize_op_large_dims(
self,
B: int,
T: int,
output_dtype: SparseType,
min_dim: int,
max_dim: int,
) -> None:
self.run_mixed_dim_8bit_dequantize_op_test(B, T, output_dtype, min_dim, max_dim)
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
B=st.just(65540),
T=st.just(5),
output_dtype=st.just(SparseType.FP32),
min_dim=st.just(1),
max_dim=st.just(100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_mixed_dim_8bit_dequantize_op_large_rows(
self,
B: int,
T: int,
output_dtype: SparseType,
min_dim: int,
max_dim: int,
) -> None:
self.run_mixed_dim_8bit_dequantize_op_test(B, T, output_dtype, min_dim, max_dim)
def run_mixed_dim_8bit_dequantize_op_test(
self,
B: int,
T: int,
output_dtype: SparseType,
min_dim: int,
max_dim: int,
) -> None:
table_dims = [
random.randint(min_dim, max_dim) * 8 for _ in range(T)
] # assume table dimensions are multiples of 8
table_dims_with_qparams = [d + 8 for d in table_dims]
D_offsets = (
torch.cumsum(torch.tensor([0] + table_dims_with_qparams), dim=0)
.to(torch.int)
.cuda()
)
input_refs = [torch.randn((B, d)).cuda() for d in table_dims]
input_refs_int8 = [
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(t) for t in input_refs
]
input_data = torch.concat(input_refs_int8, dim=1).contiguous()
mixed_dim_dequant_output = (
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloatMixedDim(
input_data, D_offsets, output_dtype.as_int()
)
)
table_output_split = [t + 8 for t in table_dims]
output_ref = []
for output_i8 in torch.split(input_data, table_output_split, dim=1):
output_ref.append(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
output_i8.contiguous()
)
)
output_ref_concat = torch.cat(output_ref, dim=1)
if output_dtype == SparseType.FP16:
output_ref_concat = output_ref_concat.half()
torch.testing.assert_close(output_ref_concat, mixed_dim_dequant_output)
class TestFusedNBitRowwiseQuantizationConversion(unittest.TestCase):
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
bit_rate=st.sampled_from([2, 4]),
is_half=st.booleans(),
test_float_or_half_op=st.booleans(),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_op(
self,
nrows: int,
ncols: int,
bit_rate: int,
is_half: bool,
test_float_or_half_op: bool,
) -> None:
assert 8 % bit_rate == 0
num_elem_per_byte = 8 // bit_rate
assume(ncols % (2 * num_elem_per_byte) == 0)
input_data = torch.rand(nrows, ncols).float()
if is_half:
input_data = input_data.half()
if test_float_or_half_op:
quantized_data = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
)
else:
if not is_half:
quantized_data = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
)
else:
quantized_data = torch.ops.fbgemm.HalfToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
if nrows == 0 or ncols == 0:
assert quantized_data.numel() == nrows * (
(ncols + bit_rate - 1) // bit_rate + 4
)
return
quantized_data = quantized_data.numpy()
reference = fused_rowwise_nbit_quantize_reference(
input_data.float().numpy(), bit_rate
)
interleaved_dim = ncols // num_elem_per_byte
# compare quantized data
np.testing.assert_array_equal(
quantized_data[:, :interleaved_dim], reference[:, :interleaved_dim]
)
# compare scales
np.testing.assert_array_almost_equal(
bytes_to_half_floats(
quantized_data[:, interleaved_dim : interleaved_dim + 2]
),
bytes_to_half_floats(reference[:, interleaved_dim : interleaved_dim + 2]),
)
# compare zero points
np.testing.assert_array_equal(
quantized_data[:, interleaved_dim + 2], reference[:, interleaved_dim + 2]
)
if gpu_available:
input_data_gpu = input_data.cuda()
if test_float_or_half_op:
quantized_data_gpu = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
else:
if not is_half:
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
else:
quantized_data_gpu = (
torch.ops.fbgemm.HalfToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
quantized_data_numpy = quantized_data_gpu.cpu().numpy()
# compare quantized data
np.testing.assert_array_equal(
quantized_data_numpy[:, :ncols], reference[:, :ncols]
)
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
bit_rate=st.sampled_from([2, 4]),
is_output_half=st.booleans(),
test_float_or_half_op=st.booleans(),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op(
self,
nrows: int,
ncols: int,
bit_rate: int,
is_output_half: bool,
test_float_or_half_op: bool,
) -> None:
assert 8 % bit_rate == 0
num_elem_per_byte = 8 // bit_rate
input_data = torch.rand(nrows, ncols).float()
if is_output_half:
input_data = input_data.half()
assume(ncols % (2 * num_elem_per_byte) == 0)
if test_float_or_half_op:
quantized_data = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
)
dequantized_data = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloatOrHalf(
quantized_data,
bit_rate,
output_dtype=1 if is_output_half else 0,
)
)
else:
if not is_output_half:
quantized_data = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
)
dequantized_data = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
quantized_data, bit_rate
)
)
else:
quantized_data = torch.ops.fbgemm.HalfToFusedNBitRowwiseQuantizedSBHalf(
input_data, bit_rate
)
dequantized_data = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToHalf(
quantized_data, bit_rate
)
)
if nrows == 0 or ncols == 0:
assert dequantized_data.numel() == 0
return
if not is_output_half:
reference = torch.from_numpy(
fused_rowwise_nbit_quantize_dequantize_reference(
input_data.float().numpy(), bit_rate
)
)
else:
reference = torch.from_numpy(
fused_rowwise_nbit_quantize_dequantize_reference(
input_data.float().numpy(), bit_rate
)
).half()
torch.testing.assert_close(dequantized_data, reference)
if gpu_available:
input_data_gpu = input_data.cuda()
if test_float_or_half_op:
quantized_data_gpu = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloatOrHalf(
quantized_data_gpu,
bit_rate,
output_dtype=1 if is_output_half else 0,
)
)
else:
if not is_output_half:
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
quantized_data_gpu, bit_rate
)
)
else:
quantized_data_gpu = (
torch.ops.fbgemm.HalfToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToHalf(
quantized_data_gpu, bit_rate
)
)
# compare quantized data
torch.testing.assert_close(
dequantized_data_gpu.cpu().float(), dequantized_data.float()
)
@unittest.skipIf(no_long_tests, "Slow test, requires buck build to run.") # noqa
def test_quantize_and_dequantize_op_cuda_large_nrows(self) -> None:
ncols = 256
bit_rate = 4
nrows = 65540
num_elem_per_byte = 8 // bit_rate
input_data = torch.rand(nrows, ncols).float()
assume(ncols % (2 * num_elem_per_byte) == 0)
reference = torch.from_numpy(
fused_rowwise_nbit_quantize_dequantize_reference(
input_data.numpy(), bit_rate
)
)
if gpu_available:
input_data_gpu = input_data.cuda()
quantized_data_gpu = (
torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
input_data_gpu, bit_rate
)
)
dequantized_data_gpu = (
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
quantized_data_gpu, bit_rate
)
)
# compare quantized data
torch.testing.assert_close(dequantized_data_gpu.cpu(), reference)
class TestHFP8QuantizationConversion(unittest.TestCase):
# min_pos is the minimal of denormal numbers
# min_normal_pos is the minimal of normal numbers
def _get_hfp8_dynamic_range(
self, ebits: int, mbits: int, bias: int
) -> Tuple[int, int, int]:
max_pos = (1 << ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
min_pos = 2 ** (1 - bias - mbits)
min_normal_pos = 2 ** (1 - bias)
return min_pos, max_pos, min_normal_pos
def _get_hfp8_config(
self,
) -> Tuple[int, int, Dict[int, int], Dict[int, int], Dict[int, int]]:
# TODO: set up test for 1-5-2 format
# TODO: parameterize ebits and mbits in unit test
ebits = 4
mbits = 3
max_pos_dict = {}
min_pos_dict = {}
min_normal_pos_dict = {}
for bias in [4, 5, 6, 7]:
min_pos, max_pos, min_normal_pos = self._get_hfp8_dynamic_range(
ebits, mbits, bias
)
min_pos_dict[bias] = min_pos
max_pos_dict[bias] = max_pos
min_normal_pos_dict[bias] = min_normal_pos
return ebits, mbits, min_pos_dict, max_pos_dict, min_normal_pos_dict
def _test_conversion(
self,
input_data: Tensor,
reference_data: Tensor,
ebits: int,
exponent_bias: int,
max_pos: float,
atol: float = 0.0,
rtol: float = 1e-7,
) -> None:
if torch.cuda.is_available():
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToHFP8Quantized(
input_data_gpu, ebits, exponent_bias, max_pos
)
dequantized_data_gpu = torch.ops.fbgemm.HFP8QuantizedToFloat(
quantized_data_gpu, ebits, exponent_bias
)
torch.testing.assert_close(
dequantized_data_gpu.cpu(), reference_data, rtol=rtol, atol=atol
)
# pyre-ignore [56]
@given(
nrows=st.integers(min_value=1, max_value=100),
ncols=st.integers(min_value=1, max_value=100),
exponent_bias=st.integers(min_value=4, max_value=7),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op(
self, nrows: int, ncols: int, exponent_bias: int
) -> None:
ebits, mbits, min_pos, max_pos, min_normal_pos = self._get_hfp8_config()
# test positive normal range
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
min_normal_pos[exponent_bias], max_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data,
ebits,
exponent_bias,
max_pos[exponent_bias],
rtol=(2 ** (-mbits - 1)),
atol=0,
)
# test positive denormal range
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
min_pos[exponent_bias], min_normal_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data,
ebits,
exponent_bias,
max_pos[exponent_bias],
rtol=0.0,
atol=(2 ** (1 - exponent_bias - mbits)),
)
# test negative normal range
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
-max_pos[exponent_bias], -min_normal_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data,
ebits,
exponent_bias,
max_pos[exponent_bias],
rtol=(2 ** (-mbits - 1)),
atol=0,
)
# test negative denormal range
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
-min_normal_pos[exponent_bias], -min_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data,
ebits,
exponent_bias,
max_pos[exponent_bias],
rtol=0.0,
atol=(2 ** (1 - exponent_bias - mbits)),
)
# test positive underflow
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
0, 0.5 * min_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data.new_full(input_data.shape, 0),
ebits,
exponent_bias,
max_pos[exponent_bias],
)
# test negative underflow
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
-0.5 * min_pos[exponent_bias], 0
)
self._test_conversion(
input_data,
input_data.new_full(input_data.shape, 0),
ebits,
exponent_bias,
max_pos[exponent_bias],
)
# test positive overflow
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
max_pos[exponent_bias], max_pos[exponent_bias] * 2
)
self._test_conversion(
input_data,
input_data.new_full(input_data.shape, max_pos[exponent_bias]),
ebits,
exponent_bias,
max_pos[exponent_bias],
)
# test negative overflow
input_data = torch.FloatTensor((nrows, ncols)).uniform_(
-max_pos[exponent_bias] * 2, -max_pos[exponent_bias]
)
self._test_conversion(
input_data,
input_data.new_full(input_data.shape, -max_pos[exponent_bias]),
ebits,
exponent_bias,
max_pos[exponent_bias],
)
class TestDenseMLPQuantizationConversion(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
# pyre-ignore [56]: Invalid decoration, was not able to infer the type of argument
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_op(self, nrows: int, ncols: int) -> None:
ebits = 8
mbits = 7
bias = 127
max_pos = (1 << ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
min_pos = 2 ** (1 - bias - mbits)
bounding_box_size = 16
print("MSFP parameters", bounding_box_size, ebits, mbits, bias)
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToMSFPQuantized(
input_data.cuda(),
bounding_box_size,
ebits,
mbits,
bias,
min_pos,
max_pos,
)
dequantized_data = torch.ops.fbgemm.MSFPQuantizedToFloat(
quantized_data.cuda(), ebits, mbits, bias
)
torch.testing.assert_close(dequantized_data.cpu(), input_data, rtol=1, atol=0)
class SparseNNOperatorsGPUTest(unittest.TestCase):
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.sampled_from(["BF16"])` to decorator factory
# `hypothesis.given`.
@given(
precision=st.just("BF16"),
batch_size=st.integers(min_value=1, max_value=256),
k=st.integers(min_value=2, max_value=2),
n=st.integers(min_value=2, max_value=2),
)
def test_dense_mlp_quantize_ops(
self, precision: str, batch_size: int, k: int, n: int
) -> None:
if precision == "BF16":
input_data = torch.rand((n, k), dtype=torch.float32)
quantized_data = torch.ops.fbgemm.FloatToBfloat16Quantized(input_data)
dequantized_data = torch.ops.fbgemm.Bfloat16QuantizedToFloat(quantized_data)
torch.testing.assert_close(
dequantized_data, input_data, rtol=1e-2, atol=1e-2
)
def bfloat_quantize(x_float: float) -> np.uint16:
bits = cast(pointer(c_float(x_float)), POINTER(c_int32)).contents.value
bits += 1 << 15
bits = bits >> 16
bits = np.uint16(bits)
return bits
def bfloat_dequantize(x_bfloat: np.uint16) -> float:
bits = np.int32(x_bfloat) << 16
return cast(pointer(c_int32(bits)), POINTER(c_float)).contents.value
class TestBfloat16QuantizationConversion(unittest.TestCase):
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_op(self, nrows: int, ncols: int) -> None:
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToBfloat16Quantized(input_data)
if nrows == 0 or ncols == 0:
assert quantized_data.numel() == 0
return
f = np.vectorize(lambda x: bfloat_quantize(x))
reference = f(input_data.numpy())
quantized_data_uint16 = quantized_data.numpy()
quantized_data_uint16.dtype = np.uint16
np.testing.assert_array_almost_equal(quantized_data_uint16, reference)
if torch.cuda.is_available():
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToBfloat16Quantized(
input_data_gpu
)
quantized_data_numpy = quantized_data_gpu.cpu().numpy()
quantized_data_numpy.dtype = np.uint16
np.testing.assert_allclose(quantized_data_numpy, reference)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.integers($parameter$min_value = 0, $parameter$max_value =
# 100)` to decorator factory `hypothesis.given`.
@given(
nrows=st.integers(min_value=0, max_value=100),
ncols=st.integers(min_value=0, max_value=100),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op(self, nrows: int, ncols: int) -> None:
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToBfloat16Quantized(input_data)
dequantized_data = torch.ops.fbgemm.Bfloat16QuantizedToFloat(quantized_data)
if nrows == 0 or ncols == 0:
assert dequantized_data.numel() == 0
return
f = np.vectorize(lambda x: bfloat_quantize(x))
ref_bfloat16 = f(input_data.numpy())
f = np.vectorize(lambda x: bfloat_dequantize(x))
ref_fp32 = torch.from_numpy(f(ref_bfloat16)).float()
torch.testing.assert_close(dequantized_data, ref_fp32)
if torch.cuda.is_available():
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToBfloat16Quantized(
input_data_gpu
)
dequantized_data_gpu = torch.ops.fbgemm.Bfloat16QuantizedToFloat(
quantized_data_gpu
)
# compare quantized data
torch.testing.assert_close(dequantized_data_gpu.cpu(), ref_fp32)
@unittest.skipIf(not torch.cuda.is_available(), "Skip when CUDA is not available")
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `hypothesis.strategies.sampled_from([(65540, 256), (256, 65540)])` to decorator
# factory `hypothesis.given`.
@given(
ncols_nrows=st.sampled_from([(65540, 256), (256, 65540)]),
)
@settings(deadline=10000, suppress_health_check=[HealthCheck.filter_too_much])
def test_quantize_and_dequantize_op_cuda_large_nrows_bf16(
self, ncols_nrows: Tuple[int, int]
) -> None:
ncols, nrows = ncols_nrows
input_data = torch.rand(nrows, ncols).float()
quantized_data = torch.ops.fbgemm.FloatToBfloat16Quantized(input_data)
dequantized_data = torch.ops.fbgemm.Bfloat16QuantizedToFloat(quantized_data)
if torch.cuda.is_available():
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToBfloat16Quantized(
input_data_gpu
)
dequantized_data_gpu = torch.ops.fbgemm.Bfloat16QuantizedToFloat(
quantized_data_gpu
)
# compare quantized data
torch.testing.assert_close(dequantized_data_gpu.cpu(), dequantized_data)
class TestFP8RowwiseQuantizationConversion(unittest.TestCase):
enable_logging: bool = False
def setUp(self) -> None:
self.enable_logging = bool(os.getenv("FBGEMM_GPU_ENABLE_LOGGING", 0))
if self.enable_logging:
logging.info("Enabled logging for TestFP8RowwiseQuantizationConversion")
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]:
@given(
batched=st.booleans(),
bs=st.integers(min_value=1, max_value=100),
m=st.integers(min_value=0, max_value=100),
n=st.integers(min_value=0, max_value=100),
forward=st.booleans(),
given_last_dim=st.booleans(),
dtype=st.sampled_from(
[
torch.float,
torch.half,
torch.bfloat16,
],
),
# if before PT 2.1, we don't support symint_vector, so turn it off
test_compile=st.booleans() if symint_vector_unsupported() else st.just(False),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_quantize_and_dequantize_op_fp8_rowwise(
self,
batched: bool,
bs: int,
m: int,
n: int,
forward: bool,
given_last_dim: bool,
dtype: torch.dtype,
test_compile: bool,
) -> None:
n = n * 4 # need (n % 4 == 0)
input_data = (
torch.rand(bs, m, n, dtype=dtype)
if batched
else torch.rand(bs * m, n, dtype=dtype)
)
input_data_gpu = input_data.cuda()
quantized_data_gpu = torch.ops.fbgemm.FloatToFP8RowwiseQuantized(
input_data_gpu, forward=forward
)
quantize_func = (
torch.compile(
torch.ops.fbgemm.FP8RowwiseQuantizedToFloat,
dynamic=True,
fullgraph=True,
)
if test_compile
else torch.ops.fbgemm.FP8RowwiseQuantizedToFloat
)
if test_compile:
torch._dynamo.mark_dynamic(quantized_data_gpu, 0)
torch._dynamo.mark_dynamic(quantized_data_gpu, 1)
dequantized_data_gpu = quantize_func(
quantized_data_gpu,
forward=forward,
output_dtype=SparseType.FP32.as_int()
if dtype == torch.float
else (
SparseType.FP16.as_int()
if dtype == torch.half
else SparseType.BF16.as_int()
),
)
if m == 0 or n == 0:
assert dequantized_data_gpu.numel() == 0
return
assert (
dequantized_data_gpu.dtype == dtype
), "result is {dequantized_data_gpu.dtype} type, but expected {dtype}"
qref = input_data_gpu.float()
dq = dequantized_data_gpu.float()
if self.enable_logging:
# Logging quantization errors
errors = (qref - dq) / (qref + 1e-5)
logging.info(f"max relative error {errors.abs().max()}")
val, idx = torch.topk(errors.flatten().abs(), k=min(10, errors.shape[-1]))
logging.info(f"top-10 errors {val}")
logging.info(f"ref data {input_data_gpu.flatten()}")
logging.info(f"dequantized data {dequantized_data_gpu.flatten()}")
logging.info(f"max relative error {errors.flatten()[idx]}")
torch.testing.assert_close(qref.cpu(), dq.cpu(), rtol=0.1, atol=0.05)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import copy
import random
import unittest
import fbgemm_gpu.ssd_split_table_batched_embeddings_ops as ssd_split_table_batched_embeddings_ops
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_embedding_utils import (
b_indices,
fake_quantize_embs,
get_table_batched_offsets_from_dense,
round_up,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_common import PoolingMode
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
rounded_row_size_in_bytes,
unpadded_row_size_in_bytes,
)
from hypothesis import given, settings, Verbosity
MAX_EXAMPLES = 40
@unittest.skipIf(not torch.cuda.is_available(), "Skip when CUDA is not available")
class SSDSplitTableBatchedEmbeddingsTest(unittest.TestCase):
def test_ssd(self) -> None:
import tempfile
E = int(1e4)
D = 128
N = 100
indices = torch.as_tensor(np.random.choice(E, replace=False, size=(N,)))
weights = torch.randn(N, D)
output_weights = torch.empty_like(weights)
count = torch.tensor([N])
feature_table_map = list(range(1))
emb = ssd_split_table_batched_embeddings_ops.SSDTableBatchedEmbeddingBags(
embedding_specs=[(E, D)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=1,
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
)
emb.ssd_db.get_cuda(indices, output_weights, count)
torch.cuda.synchronize()
assert (output_weights <= 0.1).all().item()
assert (output_weights >= -0.1).all().item()
emb.ssd_db.set_cuda(indices, weights, count, 1)
emb.ssd_db.get_cuda(indices, output_weights, count)
torch.cuda.synchronize()
torch.testing.assert_close(weights, output_weights)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_ssd_forward(
self, T: int, D: int, B: int, log_E: int, L: int, weighted: bool
) -> None:
import tempfile
E = int(10**log_E)
D = D * 4
Ds = [D] * T
Es = [E] * T
feature_table_map = list(range(T))
emb = ssd_split_table_batched_embeddings_ops.SSDTableBatchedEmbeddingBags(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=max(T * B * L, 1),
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
).cuda()
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
for t in range(T):
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
bs[t].weight.cpu(),
torch.as_tensor([E]),
t,
)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
fc2 = (
emb(indices.cuda(), offsets.cuda())
if not weighted
else emb(indices.cuda(), offsets.cuda(), xw.contiguous().view(-1).cuda())
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-5,
rtol=1.0e-5,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_ssd_backward_adagrad(
self, T: int, D: int, B: int, log_E: int, L: int, weighted: bool
) -> None:
import tempfile
E = int(10**log_E)
D = D * 4
Ds = [D] * T
Es = [E] * T
lr = 0.5
eps = 0.2
feature_table_map = list(range(T))
emb = ssd_split_table_batched_embeddings_ops.SSDTableBatchedEmbeddingBags(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=max(T * B * L, 1),
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
learning_rate=lr,
eps=eps,
ssd_shards=2,
).cuda()
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
for t in range(T):
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
bs[t].weight.cpu(),
torch.as_tensor([E]),
t,
)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
fc2 = (
emb(indices.cuda(), offsets.cuda())
if not weighted
else emb(indices.cuda(), offsets.cuda(), xw.contiguous().view(-1).cuda())
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-5,
rtol=1.0e-5,
)
fc2.backward(torch.cat([go.view(B, -1) for go in gos], dim=1))
split_optimizer_states = [s for (s,) in emb.debug_split_optimizer_states()]
for t in range(T):
# pyre-fixme[16]: Optional type has no attribute `float`.
ref_optimizer_state = bs[t].weight.grad.float().to_dense().pow(2)
torch.testing.assert_close(
split_optimizer_states[t].float(),
ref_optimizer_state.mean(dim=1),
atol=1.0e-4,
rtol=1.0e-4,
)
emb.flush()
for t in range(T):
torch.testing.assert_close(
emb.debug_split_embedding_weights()[t].float().cuda(),
torch.addcdiv(
bs[t].weight.float(),
value=-lr,
tensor1=bs[t].weight.grad.float().to_dense(),
tensor2=split_optimizer_states[t]
.float()
.sqrt_()
.add_(eps)
.view(Es[t], 1),
),
atol=1.0e-4,
rtol=1.0e-4,
)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_ssd_cache(
self, T: int, D: int, B: int, log_E: int, L: int, weighted: bool
) -> None:
# T=2
# D=2
# B=9
# log_E=3
# L=14
# weighted=False
import tempfile
E = int(10**log_E)
D = D * 4
Ds = [D] * T
Es = [E] * T
lr = 0.5
eps = 0.2
C = max(T * B * L, 1)
feature_table_map = list(range(T))
emb = ssd_split_table_batched_embeddings_ops.SSDTableBatchedEmbeddingBags(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=C,
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
learning_rate=lr,
eps=eps,
ssd_shards=2,
).cuda()
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
for t in range(T):
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
bs[t].weight.cpu(),
torch.as_tensor([E]),
t,
)
for i in range(10):
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
(indices, offsets) = indices.cuda(), offsets.cuda()
assert emb.timestep == i
emb.prefetch(indices, offsets)
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
emb.hash_size_cumsum,
indices,
offsets,
)
# Verify that prefetching twice avoids any actions.
(
_,
_,
_,
actions_count_gpu,
) = torch.ops.fbgemm.ssd_cache_populate_actions( # noqa
linear_cache_indices,
emb.total_hash_size,
emb.lxu_cache_state,
emb.timestep,
0, # prefetch_dist
emb.lru_state,
)
assert actions_count_gpu.item() == 0
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
emb.lxu_cache_state,
emb.hash_size_cumsum[-1],
)
lru_state_cpu = emb.lru_state.cpu()
lxu_cache_state_cpu = emb.lxu_cache_state.cpu()
NOT_FOUND = np.iinfo(np.int32).max
ASSOC = 32
for loc, linear_idx in zip(
lxu_cache_locations.cpu().numpy().tolist(),
linear_cache_indices.cpu().numpy().tolist(),
):
assert loc != NOT_FOUND
# if we have a hit, check the cache is consistent
loc_set = loc // ASSOC
loc_slot = loc % ASSOC
assert lru_state_cpu[loc_set, loc_slot] == emb.timestep
assert lxu_cache_state_cpu[loc_set, loc_slot] == linear_idx
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
fc2 = (
emb(indices, offsets)
if not weighted
else emb(indices, offsets, xw.contiguous().view(-1).cuda())
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-5,
rtol=1.0e-5,
)
@unittest.skipIf(not torch.cuda.is_available(), "Skip when CUDA is not available")
class SSDIntNBitTableBatchedEmbeddingsTest(unittest.TestCase):
def test_nbit_ssd(self) -> None:
import tempfile
E = int(1e4)
D = 128
N = 100
indices = torch.as_tensor(np.random.choice(E, replace=False, size=(N,)))
weights = torch.empty(N, D, dtype=torch.uint8)
output_weights = torch.empty_like(weights)
count = torch.tensor([N])
feature_table_map = list(range(1))
emb = (
ssd_split_table_batched_embeddings_ops.SSDIntNBitTableBatchedEmbeddingBags(
embedding_specs=[("", E, D, SparseType.FP32)],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=1,
)
)
emb.ssd_db.get_cuda(indices, output_weights, count)
torch.cuda.synchronize()
emb.ssd_db.set_cuda(indices, weights, count, 1)
emb.ssd_db.get_cuda(indices, output_weights, count)
torch.cuda.synchronize()
torch.testing.assert_close(weights, output_weights)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
# FIXME: Disable positional weight due to numerical issues.
weighted=st.just(False),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
mixed_weights_ty=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_ssd_forward(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
weights_ty: SparseType,
mixed_weights_ty: bool,
) -> None:
import tempfile
if not mixed_weights_ty:
weights_ty_list = [weights_ty] * T
else:
weights_ty_list = [
random.choice(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
for _ in range(T)
]
D_alignment = max(
1 if ty.bit_rate() % 8 == 0 else int(8 / ty.bit_rate())
for ty in weights_ty_list
)
D = round_up(D, D_alignment)
E = int(10**log_E)
Ds = [D] * T
Es = [E] * T
row_alignment = 16
feature_table_map = list(range(T))
emb = (
ssd_split_table_batched_embeddings_ops.SSDIntNBitTableBatchedEmbeddingBags(
embedding_specs=[
("", E, D, W_TY) for (E, D, W_TY) in zip(Es, Ds, weights_ty_list)
],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=max(T * B * L, 1),
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
pooling_mode=PoolingMode.SUM,
).cuda()
)
# # NOTE: test TorchScript-compatible!
# emb = torch.jit.script(emb)
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
for t in range(T):
(weights, scale_shift) = emb.split_embedding_weights()[t]
if scale_shift is not None:
(E, R) = scale_shift.shape
self.assertEqual(R, 4)
scales = np.random.uniform(0.1, 1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
scale_shift[:, :] = torch.tensor(
np.stack([scales, shifts], axis=1).astype(np.float16).view(np.uint8)
)
D_bytes = rounded_row_size_in_bytes(
Ds[t], weights_ty_list[t], row_alignment
)
copy_byte_tensor = torch.empty([E, D_bytes], dtype=torch.uint8)
fake_quantize_embs(
weights,
scale_shift,
bs[t].weight.detach(),
weights_ty_list[t],
use_cpu=False,
)
if weights_ty_list[t] in [SparseType.FP32, SparseType.FP16, SparseType.FP8]:
copy_byte_tensor[
:,
: unpadded_row_size_in_bytes(Ds[t], weights_ty_list[t]),
] = weights # q_weights
else:
copy_byte_tensor[
:,
emb.scale_bias_size_in_bytes : unpadded_row_size_in_bytes(
Ds[t], weights_ty_list[t]
),
] = weights # q_weights
copy_byte_tensor[
:, : emb.scale_bias_size_in_bytes
] = scale_shift # q_scale_shift
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
copy_byte_tensor,
torch.as_tensor([E]),
t,
)
torch.cuda.synchronize()
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
fc2 = (
emb(indices.cuda().int(), offsets.cuda().int())
if not weighted
else emb(
indices.cuda().int(),
offsets.cuda().int(),
xw.contiguous().view(-1).cuda(),
)
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-2,
rtol=1.0e-2,
equal_nan=True,
)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_ssd_cache(
self, T: int, D: int, B: int, log_E: int, L: int, weighted: bool
) -> None:
import tempfile
weights_ty = random.choice(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
D_alignment = (
1 if weights_ty.bit_rate() % 8 == 0 else int(8 / weights_ty.bit_rate())
)
D = round_up(D, D_alignment)
E = int(10**log_E)
Ds = [D] * T
Es = [E] * T
weights_ty_list = [weights_ty] * T
C = max(T * B * L, 1)
row_alignment = 16
feature_table_map = list(range(T))
emb = (
ssd_split_table_batched_embeddings_ops.SSDIntNBitTableBatchedEmbeddingBags(
embedding_specs=[
("", E, D, W_TY) for (E, D, W_TY) in zip(Es, Ds, weights_ty_list)
],
feature_table_map=feature_table_map,
ssd_storage_directory=tempfile.mkdtemp(),
cache_sets=C,
ssd_uniform_init_lower=-0.1,
ssd_uniform_init_upper=0.1,
ssd_shards=2,
pooling_mode=PoolingMode.SUM,
).cuda()
)
# # NOTE: test TorchScript-compatible!
# emb = torch.jit.script(emb)
bs = [
torch.nn.EmbeddingBag(E, D, mode="sum", sparse=True).cuda()
for (E, D) in zip(Es, Ds)
]
torch.manual_seed(42)
for t in range(T):
(weights, scale_shift) = emb.split_embedding_weights()[t]
if scale_shift is not None:
(E, R) = scale_shift.shape
self.assertEqual(R, 4)
if weights_ty_list[t] == SparseType.INT2:
scales = np.random.uniform(0.1, 1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT4:
scales = np.random.uniform(0.01, 0.1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT8:
scales = np.random.uniform(0.001, 0.01, size=(E,)).astype(
np.float16
)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
scale_shift[:, :] = torch.tensor(
np.stack([scales, shifts], axis=1).astype(np.float16).view(np.uint8)
)
D_bytes = rounded_row_size_in_bytes(
Ds[t], weights_ty_list[t], row_alignment
)
copy_byte_tensor = torch.empty([E, D_bytes], dtype=torch.uint8)
fake_quantize_embs(
weights,
scale_shift,
bs[t].weight.detach(),
weights_ty_list[t],
use_cpu=False,
)
if weights_ty_list[t] in [SparseType.FP32, SparseType.FP16, SparseType.FP8]:
copy_byte_tensor[
:,
: unpadded_row_size_in_bytes(Ds[t], weights_ty_list[t]),
] = weights # q_weights
else:
copy_byte_tensor[
:,
emb.scale_bias_size_in_bytes : unpadded_row_size_in_bytes(
Ds[t], weights_ty_list[t]
),
] = weights # q_weights
copy_byte_tensor[
:, : emb.scale_bias_size_in_bytes
] = scale_shift # q_scale_shift
emb.ssd_db.set_cuda(
torch.arange(t * E, (t + 1) * E).to(torch.int64),
copy_byte_tensor,
torch.as_tensor([E]),
t,
)
torch.cuda.synchronize()
for i in range(10):
xs = [torch.randint(low=0, high=e, size=(B, L)).cuda() for e in Es]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xws = [torch.randn(size=(B, L)).cuda() for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x)
(indices, offsets) = indices.cuda(), offsets.cuda()
assert emb.timestep_counter.get() == i
emb.prefetch(indices, offsets)
linear_cache_indices = torch.ops.fbgemm.linearize_cache_indices(
emb.hash_size_cumsum,
indices,
offsets,
)
# Verify that prefetching twice avoids any actions.
(
_,
_,
_,
actions_count_gpu,
) = torch.ops.fbgemm.ssd_cache_populate_actions( # noqa
linear_cache_indices,
emb.total_hash_size,
emb.lxu_cache_state,
emb.timestep_counter.get(),
0, # prefetch_dist
emb.lru_state,
)
assert actions_count_gpu.item() == 0
lxu_cache_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices,
emb.lxu_cache_state,
emb.hash_size_cumsum[-1],
)
lru_state_cpu = emb.lru_state.cpu()
lxu_cache_state_cpu = emb.lxu_cache_state.cpu()
NOT_FOUND = np.iinfo(np.int32).max
ASSOC = 32
for loc, linear_idx in zip(
lxu_cache_locations.cpu().numpy().tolist(),
linear_cache_indices.cpu().numpy().tolist(),
):
assert loc != NOT_FOUND
# if we have a hit, check the cache is consistent
loc_set = loc // ASSOC
loc_slot = loc % ASSOC
assert lru_state_cpu[loc_set, loc_slot] == emb.timestep_counter.get()
assert lxu_cache_state_cpu[loc_set, loc_slot] == linear_idx
fs = (
[b_indices(b, x) for (b, x) in zip(bs, xs)]
if not weighted
else [
b_indices(b, x, per_sample_weights=xw.view(-1))
for (b, x, xw) in zip(bs, xs, xws)
]
)
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
fc2 = (
emb(indices.cuda().int(), offsets.cuda().int())
if not weighted
else emb(
indices.cuda().int(),
offsets.cuda().int(),
xw.contiguous().view(-1).cuda(),
)
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=1.0e-2,
rtol=1.0e-2,
)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
from typing import List, Optional, Tuple
import torch
from hypothesis import given, settings
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import cpu_and_maybe_gpu
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:input_combine")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:input_combine_cpu")
from fbgemm_gpu.test.test_utils import cpu_and_maybe_gpu
DEFAULT_DEVICE = torch.device("cpu")
class TBEInputPrepareReference(torch.nn.Module):
def __init__(self, include_last_offsets: List[bool]) -> None:
super().__init__()
self.include_last_offsets = include_last_offsets
def forward( # noqa C901
self,
indices_list: List[torch.Tensor],
offsets_list: List[torch.Tensor],
per_sample_weights_list: List[torch.Tensor],
batch_size: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
size = 0
assert len(indices_list) > 0
assert len(indices_list) == len(offsets_list)
assert len(indices_list) == len(per_sample_weights_list)
assert len(indices_list) == len(self.include_last_offsets)
for i in range(len(self.include_last_offsets)):
size += indices_list[i].size(0)
assert indices_list[i].dim() == 1
assert offsets_list[i].dim() == 1
if per_sample_weights_list[i].numel() > 0:
assert per_sample_weights_list[i].dim() == 1
assert indices_list[i].numel() == per_sample_weights_list[i].numel()
combined_indices = torch.empty(
size,
dtype=torch.int32,
device=indices_list[0].device,
)
torch.cat(indices_list, out=combined_indices)
offsets_starts = torch.zeros(
[len(offsets_list) + 1],
dtype=offsets_list[0].dtype,
device=offsets_list[0].device,
)
offsets_accs = torch.zeros(
[len(offsets_list) + 1],
dtype=offsets_list[0].dtype,
device=offsets_list[0].device,
)
for i, include_last_offset in enumerate(self.include_last_offsets):
if include_last_offset:
offsets_starts[i + 1] = offsets_starts[i] + offsets_list[i].size(0) - 1
else:
offsets_starts[i + 1] = offsets_starts[i] + offsets_list[i].size(0)
offsets_accs[i + 1] = offsets_accs[i] + indices_list[i].size(0)
assert offsets_accs[-1] == combined_indices.size(0)
combined_offsets_size: List[int] = (
[int(offsets_starts[-1].item()) + 1]
if batch_size is None
else [batch_size * len(offsets_list) + 1]
)
combined_offsets = torch.zeros(
combined_offsets_size,
dtype=torch.int32,
device=offsets_list[0].device,
)
if batch_size is None:
for i in range(len(self.include_last_offsets)):
combined_offsets[offsets_starts[i] : offsets_starts[i + 1]] = (
offsets_list[i][: offsets_starts[i + 1] - offsets_starts[i]]
+ offsets_accs[i]
)
else:
for i in range(len(self.include_last_offsets)):
cur_start = batch_size * i
combined_offsets[
cur_start : cur_start + offsets_starts[i + 1] - offsets_starts[i]
] = (
offsets_list[i][: offsets_starts[i + 1] - offsets_starts[i]]
+ offsets_accs[i]
)
cur_start = cur_start + offsets_starts[i + 1] - offsets_starts[i]
for j in range(batch_size - offsets_starts[i + 1] + offsets_starts[i]):
combined_offsets[cur_start + j] = (
indices_list[i].numel() + offsets_accs[i]
)
combined_offsets[-1] = offsets_accs[-1]
per_sample_weights: Optional[torch.Tensor] = None
for i in range(len(self.include_last_offsets)):
if per_sample_weights_list[i].size(0) > 0:
per_sample_weights = torch.ones(
combined_indices.size(0),
dtype=per_sample_weights_list[i].dtype,
device=per_sample_weights_list[i].device,
)
break
if per_sample_weights is not None:
for i in range(len(self.include_last_offsets)):
if per_sample_weights_list[i].size(0) > 0:
per_sample_weights[
offsets_accs[i] : offsets_accs[i + 1]
] = per_sample_weights_list[i][:]
# indices and offsets are required to be int32 for TBE
return combined_indices, combined_offsets, per_sample_weights
class InputCombineTest(unittest.TestCase):
def _get_inputs(self, dtypes, device=DEFAULT_DEVICE):
indices_list = [
torch.tensor([1, 2, 3], dtype=dtypes[0], device=device),
torch.tensor([1, 2, 3, 4], dtype=dtypes[1], device=device),
]
offsets_list = [
torch.tensor([0, 2], dtype=dtypes[0], device=device),
torch.tensor([0, 1, 4], dtype=dtypes[1], device=device),
]
include_last_offsets = [False, True]
per_sample_weights = [
torch.tensor([1, 2, 1], dtype=torch.float, device=device),
torch.tensor([1, 2, 1, 3], dtype=torch.float, device=device),
]
empty_per_sample_weights = [
torch.tensor([], dtype=torch.float, device=device),
torch.tensor([], dtype=torch.float, device=device),
]
return (
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
)
def _run_test(self, dtypes) -> None:
(
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
) = self._get_inputs(dtypes)
ref_mod = TBEInputPrepareReference(include_last_offsets)
outputs = torch.ops.fbgemm.tbe_input_combine(
indices_list,
offsets_list,
per_sample_weights,
torch.BoolTensor(include_last_offsets),
)
ref_outputs = ref_mod(indices_list, offsets_list, per_sample_weights)
for i, j in zip(outputs, ref_outputs):
torch.testing.assert_close(i, j)
self.assertTrue(outputs[0].dtype == torch.int32)
self.assertTrue(outputs[1].dtype == torch.int32)
outputs = torch.ops.fbgemm.tbe_input_combine(
indices_list,
offsets_list,
empty_per_sample_weights,
torch.BoolTensor(include_last_offsets),
)
ref_outputs = ref_mod(indices_list, offsets_list, empty_per_sample_weights)
for i, j in zip(outputs[:-1], ref_outputs[:-1]):
torch.testing.assert_close(i, j)
self.assertTrue(j.dtype == torch.int32)
self.assertTrue(outputs[0].dtype == torch.int32)
self.assertTrue(outputs[1].dtype == torch.int32)
self.assertTrue(outputs[-1].size(0) == 0)
def _run_padding_fused_test(self, dtypes, batch_size) -> None:
(
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
) = self._get_inputs(dtypes)
ref_mod = TBEInputPrepareReference(include_last_offsets)
outputs = torch.ops.fbgemm.padding_fused_tbe_input_combine(
indices_list,
offsets_list,
per_sample_weights,
torch.BoolTensor(include_last_offsets),
batch_size,
)
ref_outputs = ref_mod(
indices_list, offsets_list, per_sample_weights, batch_size
)
for i, j in zip(outputs, ref_outputs):
torch.testing.assert_close(i, j)
self.assertTrue(outputs[0].dtype == torch.int32)
self.assertTrue(outputs[1].dtype == torch.int32)
outputs = torch.ops.fbgemm.padding_fused_tbe_input_combine(
indices_list,
offsets_list,
empty_per_sample_weights,
torch.BoolTensor(include_last_offsets),
batch_size,
)
ref_outputs = ref_mod(
indices_list, offsets_list, empty_per_sample_weights, batch_size
)
for i, j in zip(outputs[:-1], ref_outputs[:-1]):
torch.testing.assert_close(i, j)
self.assertTrue(j.dtype == torch.int32)
self.assertTrue(outputs[0].dtype == torch.int32)
self.assertTrue(outputs[1].dtype == torch.int32)
self.assertTrue(outputs[-1].size(0) == 0)
def _offsets_to_lengths(
self, offsets, indices, include_last_offsets, device=DEFAULT_DEVICE
):
if include_last_offsets:
offsets_complete = offsets
else:
offsets_complete = torch.cat(
[
offsets,
torch.tensor([indices.numel()], dtype=offsets.dtype, device=device),
]
)
return offsets_complete[1:] - offsets_complete[:-1]
def _run_test_with_length(self, dtypes, device=DEFAULT_DEVICE) -> None:
(
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
) = self._get_inputs(dtypes, device=device)
ref_mod = TBEInputPrepareReference(include_last_offsets)
lengths_list = [
self._offsets_to_lengths(
offsets, indices, include_last_offsets, device=device
)
for offsets, indices, include_last_offsets in zip(
offsets_list, indices_list, include_last_offsets
)
]
outputs = torch.ops.fbgemm.tbe_input_combine_with_length(
indices_list, lengths_list, per_sample_weights
)
ref_outputs = ref_mod(indices_list, offsets_list, per_sample_weights)
# indices
self.assertTrue(ref_outputs[0].allclose(outputs[0]))
# per sample weights
self.assertTrue(ref_outputs[2].allclose(outputs[2]))
ref_lengths = self._offsets_to_lengths(ref_outputs[1], ref_outputs[0], True)
self.assertTrue(ref_lengths.allclose(outputs[1]))
def _run_padding_fused_test_with_length(self, dtypes, batch_size) -> None:
(
indices_list,
offsets_list,
per_sample_weights,
empty_per_sample_weights,
include_last_offsets,
) = self._get_inputs(dtypes)
ref_mod = TBEInputPrepareReference(include_last_offsets)
lengths_list = [
self._offsets_to_lengths(offsets, indices, include_last_offsets)
for offsets, indices, include_last_offsets in zip(
offsets_list, indices_list, include_last_offsets
)
]
outputs = torch.ops.fbgemm.padding_fused_tbe_input_combine_with_length(
indices_list,
lengths_list,
per_sample_weights,
batch_size,
)
ref_outputs = ref_mod(
indices_list, offsets_list, per_sample_weights, batch_size
)
# indices
self.assertTrue(ref_outputs[0].allclose(outputs[0]))
# per sample weights
self.assertTrue(ref_outputs[2].allclose(outputs[2]))
ref_lengths = self._offsets_to_lengths(ref_outputs[1], ref_outputs[0], True)
self.assertTrue(ref_lengths.allclose(outputs[1]))
def test_input_combine_int64(self) -> None:
self._run_test((torch.int64, torch.int64))
def test_input_combine_int32(self) -> None:
self._run_test((torch.int64, torch.int64))
def test_input_combined_mix(self) -> None:
self._run_test((torch.int64, torch.int32))
@given(device=cpu_and_maybe_gpu())
@settings(deadline=None)
def test_input_combine_int64_with_length(self, device: torch.device) -> None:
self._run_test_with_length((torch.int64, torch.int64), device=device)
@given(device=cpu_and_maybe_gpu())
@settings(deadline=None)
def test_input_combine_int32_with_length(self, device: torch.device) -> None:
self._run_test_with_length((torch.int32, torch.int32), device=device)
@given(device=cpu_and_maybe_gpu())
@settings(deadline=None)
def test_input_combine_mix_with_length(self, device: torch.device) -> None:
self._run_test_with_length((torch.int64, torch.int32), device=device)
def test_padding_fused_input_combine_int64(self) -> None:
self._run_padding_fused_test((torch.int64, torch.int64), 64)
def test_padding_fused_input_combine_int32(self) -> None:
self._run_padding_fused_test((torch.int32, torch.int32), 64)
def test_padding_fused_input_combined_mix(self) -> None:
self._run_padding_fused_test((torch.int64, torch.int32), 64)
def test_padding_fused_input_combine_int64_with_length(self) -> None:
self._run_padding_fused_test_with_length((torch.int64, torch.int64), 64)
def test_padding_fused_input_combine_int32_with_length(self) -> None:
self._run_padding_fused_test_with_length((torch.int32, torch.int32), 64)
def test_padding_fused_input_combined_mix_with_length(self) -> None:
self._run_padding_fused_test_with_length((torch.int64, torch.int32), 64)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import contextlib
import functools
import itertools
import logging
import random
import unittest
from itertools import accumulate
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import hypothesis.strategies as st
import numpy as np
import torch
from hypothesis import given, settings, Verbosity
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import gpu_available, gpu_unavailable, skipIfRocm
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu/codegen:index_select_ops")
from fbgemm_gpu.test.test_utils import gpu_available, gpu_unavailable, skipIfRocm
def unbucketize_indices_value(
bucketized_indices: torch.Tensor,
bucketized_lengths: torch.Tensor,
block_sizes: torch.Tensor,
W: int,
B: int,
) -> torch.Tensor:
block_size_expand = torch.empty_like(bucketized_indices)
bucket_expand = torch.empty_like(bucketized_indices)
T = block_sizes.size()[0]
offset = 0
for w in range(W):
for t in range(T):
for b in range(B):
seg_length = bucketized_lengths[w * T * B + t * B + b]
for i in range(offset, offset + seg_length):
block_size_expand[i] = block_sizes[t]
bucket_expand[i] = w
offset += seg_length
return bucket_expand * block_size_expand + bucketized_indices
def get_n_rand_num_summing_to_k(n: int, k: int) -> np.ndarray:
"""Get a list of `n` integers which collectively sum to `k`, drawn
uniformly from the set of all such lists.
Args:
n - The number of integers in the result list
k - The value they should sum to
"""
# There are a lot of ways to do this wrong, probably including
# the ones you've just thought of. I think the following does
# it correctly, though.
if n == 0:
return np.array([])
return np.random.multinomial(k, np.ones(n) / n, size=1)[0]
@torch.jit.script
def permute_scripted(
permute: torch.Tensor, lengths: torch.Tensor, indices: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(permute, lengths, indices, None, None)
return (
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
)
class SparseOpsTest(unittest.TestCase):
@staticmethod
def permute_indices_ref_(
lengths: torch.Tensor,
indices: torch.Tensor,
weights: Optional[torch.Tensor],
permute: torch.LongTensor,
is_1D: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
T = lengths.size(0)
B = lengths.size(1)
if T == 0 or B == 0:
if is_1D:
lengths = lengths.view(-1)
return lengths, indices, weights
if is_1D:
permuted_lengths = torch.index_select(lengths.view(-1), 0, permute).view(-1)
original_segment_lengths = lengths.view(-1)
original_segment_start = [0] + list(accumulate(lengths.view(-1)))
permuted_indices = []
permuted_weights = []
for i in range(permute.numel()):
start = original_segment_start[permute[i]]
end = start + original_segment_lengths[permute[i]]
permuted_indices.append(indices[start:end])
if weights is not None:
permuted_weights.append(weights[start:end])
permuted_indices = torch.cat(permuted_indices, dim=0).flatten()
if weights is None:
permuted_weights = None
else:
permuted_weights = torch.cat(permuted_weights, dim=0).flatten()
else:
permuted_lengths = torch.index_select(lengths.view(T, -1), 0, permute)
original_segment_lengths = lengths.view(T, -1).sum(dim=1, dtype=torch.int32)
original_segment_start = [0] + list(
accumulate(original_segment_lengths.view(-1))
)
permuted_indices = []
permuted_weights = []
for i in range(permute.size(0)):
start = original_segment_start[permute[i]]
end = start + original_segment_lengths[permute[i]]
permuted_indices.append(indices[start:end])
if weights is not None:
permuted_weights.append(weights[start:end])
permuted_indices = torch.cat(permuted_indices, dim=0).flatten()
if weights is None:
permuted_weights = None
else:
permuted_weights = torch.cat(permuted_weights, dim=0).flatten()
return permuted_lengths, permuted_indices, permuted_weights
@given(
B=st.integers(min_value=0, max_value=20),
T=st.integers(min_value=0, max_value=20),
L=st.integers(min_value=2, max_value=20),
long_index=st.booleans(),
has_weight=st.booleans(),
is_1D=st.booleans(),
W=st.integers(min_value=4, max_value=8),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_permute_indices(
self,
B: int,
T: int,
L: int,
long_index: bool,
has_weight: bool,
is_1D: bool,
W: int,
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
length_splits: Optional[List[torch.Tensor]] = None
if is_1D:
if B == 0:
batch_sizes = [0] * W
else:
batch_sizes = [random.randint(a=1, b=B) for i in range(W)]
length_splits = [
torch.randint(low=1, high=L, size=(T, batch_sizes[i])).type(index_dtype)
for i in range(W)
]
lengths = torch.cat(length_splits, dim=1)
else:
lengths = torch.randint(low=1, high=L, size=(T, B)).type(index_dtype)
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
weights = torch.rand(lengths.sum().item()).float() if has_weight else None
indices = torch.randint(
low=1,
high=int(1e5),
# pyre-fixme[6]: Expected `Union[int, typing.Tuple[int, ...]]` for 3rd
# param but got `Tuple[typing.Union[float, int]]`.
size=(lengths.sum().item(),),
).type(index_dtype)
if is_1D:
permute_list = []
offset_w = [0] + list(
# pyre-fixme[16]
accumulate([length_split.numel() for length_split in length_splits])
)
for t in range(T):
for w in range(W):
for b in range(batch_sizes[w]):
permute_list.append(offset_w[w] + t * batch_sizes[w] + b)
else:
permute_list = list(range(T))
random.shuffle(permute_list)
permute = torch.IntTensor(permute_list)
if is_1D:
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = torch.ops.fbgemm.permute_1D_sparse_data(
permute, lengths, indices, weights, None
)
else:
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(
permute, lengths, indices, weights, None
)
(
permuted_lengths_ref,
permuted_indices_ref,
permuted_weights_ref,
# pyre-fixme[6]: For 4th param expected `LongTensor` but got `Tensor`.
) = self.permute_indices_ref_(lengths, indices, weights, permute.long(), is_1D)
torch.testing.assert_close(permuted_indices_cpu, permuted_indices_ref)
torch.testing.assert_close(permuted_lengths_cpu, permuted_lengths_ref)
if has_weight:
torch.testing.assert_close(permuted_weights_cpu, permuted_weights_ref)
else:
assert permuted_weights_cpu is None and permuted_weights_ref is None
if gpu_available:
if is_1D:
(
permuted_lengths_gpu,
permuted_indices_gpu,
permuted_weights_gpu,
) = torch.ops.fbgemm.permute_1D_sparse_data(
permute.cuda(),
lengths.cuda(),
indices.cuda(),
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights.cuda() if has_weight else None,
None,
)
else:
(
permuted_lengths_gpu,
permuted_indices_gpu,
permuted_weights_gpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(
permute.cuda(),
lengths.cuda(),
indices.cuda(),
weights.cuda() if has_weight else None,
None,
)
torch.testing.assert_close(permuted_indices_gpu.cpu(), permuted_indices_cpu)
torch.testing.assert_close(permuted_lengths_gpu.cpu(), permuted_lengths_cpu)
if has_weight:
torch.testing.assert_close(
permuted_weights_gpu.cpu(), permuted_weights_cpu
)
else:
assert permuted_weights_gpu is None
# TorchScript has different behaviors than eager mode. We can see undefined
# models returned. So we need to add a unittest to ensure the op return
# real None, not an undefined tensor.
def test_permute_indices_scripted_with_none_weights(
self,
) -> None:
index_dtype = torch.int32
lengths = torch.randint(low=1, high=2, size=(1, 1)).type(index_dtype)
weights = None
indices = torch.randint(
low=1,
high=int(1e5),
# pyre-fixme[6]: Expected `Union[int, typing.Tuple[int, ...]]` for 3rd
# param but got `Tuple[typing.Union[float, int]]`.
size=(lengths.sum().item(),),
).type(index_dtype)
permute_list = list(range(1))
random.shuffle(permute_list)
permute = torch.IntTensor(permute_list)
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = permute_scripted(permute, lengths, indices)
(
permuted_lengths_ref,
permuted_indices_ref,
permuted_weights_ref,
# pyre-fixme[6]: For 4th param expected `LongTensor` but got `Tensor`.
) = self.permute_indices_ref_(lengths, indices, weights, permute.long(), False)
self.assertTrue(torch.equal(permuted_indices_cpu, permuted_indices_ref))
self.assertTrue(torch.equal(permuted_lengths_cpu, permuted_lengths_ref))
self.assertEqual(permuted_weights_cpu, None)
self.assertEqual(permuted_weights_ref, None)
@given(
permute_size=st.integers(min_value=30, max_value=1000),
long_index=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_invert_permute(
self,
permute_size: int,
long_index: bool,
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
permute_list = list(range(permute_size))
random.shuffle(permute_list)
inversed_permute_list = [0] * len(permute_list)
for i in range(permute_size):
inversed_permute_list[permute_list[i]] = i
permute = torch.IntTensor(permute_list).type(index_dtype)
inverse_permute_ref = torch.IntTensor(inversed_permute_list).type(index_dtype)
inverse_permute_cpu = torch.ops.fbgemm.invert_permute(permute)
torch.testing.assert_close(inverse_permute_cpu, inverse_permute_ref)
if gpu_available:
inverse_permute_gpu = torch.ops.fbgemm.invert_permute(permute.cuda())
torch.testing.assert_close(inverse_permute_gpu.cpu(), inverse_permute_cpu)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
long_index=st.booleans(),
has_weight=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_permute_indices_with_repeats(
self, B: int, T: int, L: int, long_index: bool, has_weight: bool
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
lengths = torch.randint(low=1, high=L, size=(T, B)).type(index_dtype)
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
weights = torch.rand(lengths.sum().item()).float() if has_weight else None
indices = torch.randint(
low=1,
high=int(1e5),
# pyre-fixme[6]: Expected `Union[int, typing.Tuple[int, ...]]` for 3rd
# param but got `Tuple[typing.Union[float, int]]`.
size=(lengths.sum().item(),),
).type(index_dtype)
permute_list = list(range(T))
num_repeats = random.randint(0, T)
for _ in range(num_repeats):
permute_list.append(random.randint(0, T - 1))
random.shuffle(permute_list)
permute = torch.IntTensor(permute_list)
(
permuted_lengths_cpu,
permuted_indices_cpu,
permuted_weights_cpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(permute, lengths, indices, weights)
(
permuted_lengths_ref,
permuted_indices_ref,
permuted_weights_ref,
# pyre-fixme[6]: For 4th param expected `LongTensor` but got `Tensor`.
) = self.permute_indices_ref_(lengths, indices, weights, permute.long())
torch.testing.assert_close(permuted_indices_cpu, permuted_indices_ref)
torch.testing.assert_close(permuted_lengths_cpu, permuted_lengths_ref)
if has_weight:
torch.testing.assert_close(permuted_weights_cpu, permuted_weights_ref)
else:
assert permuted_weights_cpu is None and permuted_weights_ref is None
if gpu_available:
(
permuted_lengths_gpu,
permuted_indices_gpu,
permuted_weights_gpu,
) = torch.ops.fbgemm.permute_2D_sparse_data(
permute.cuda(),
lengths.cuda(),
indices.cuda(),
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights.cuda() if has_weight else None,
)
torch.testing.assert_close(permuted_indices_gpu.cpu(), permuted_indices_cpu)
torch.testing.assert_close(permuted_lengths_gpu.cpu(), permuted_lengths_cpu)
if has_weight:
torch.testing.assert_close(
permuted_weights_gpu.cpu(), permuted_weights_cpu
)
else:
assert permuted_weights_cpu is None
@staticmethod
def permute_embeddings_(
permute_fn: Callable[..., Tuple[torch.Tensor, ...]],
*args: Any,
) -> Tuple[torch.Tensor, torch.Tensor]:
if permute_fn == torch.ops.fbgemm.permute_2D_sparse_data:
permuted_lengths, permuted_embeddings, _ = permute_fn(*args, None)
return permuted_lengths, permuted_embeddings
else:
return permute_fn(*args)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
long_index=st.booleans(),
permute_fn=st.sampled_from(
[
torch.ops.fbgemm.permute_2D_sparse_data,
torch.ops.fbgemm.permute_sequence_embeddings,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_permute_embeddings(
self,
B: int,
T: int,
L: int,
long_index: bool,
permute_fn: Callable[..., Tuple[torch.Tensor, ...]],
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
lengths = torch.randint(low=1, high=L, size=(T, B)).type(index_dtype)
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Union[bool, float, int]`.
embeddings = torch.rand(lengths.sum().item()).float()
permute_list = list(range(T))
random.shuffle(permute_list)
permute = torch.IntTensor(permute_list)
(permuted_lengths_cpu, permuted_embeddings_cpu) = self.permute_embeddings_(
permute_fn, permute, lengths, embeddings
)
(
permuted_lengths_ref,
permuted_embeddings_ref,
_,
# pyre-fixme[6]: For 4th param expected `LongTensor` but got `Tensor`.
) = self.permute_indices_ref_(lengths, embeddings, None, permute.long())
torch.testing.assert_close(permuted_embeddings_cpu, permuted_embeddings_ref)
torch.testing.assert_close(permuted_lengths_cpu, permuted_lengths_ref)
if gpu_available:
(permuted_lengths_gpu, permuted_embeddings_gpu) = self.permute_embeddings_(
permute_fn,
permute.cuda(),
lengths.cuda(),
embeddings.cuda(),
)
torch.testing.assert_close(
permuted_embeddings_gpu.cpu(), permuted_embeddings_cpu
)
torch.testing.assert_close(permuted_lengths_gpu.cpu(), permuted_lengths_cpu)
@given(
long_indices=st.booleans(),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(verbosity=Verbosity.verbose, max_examples=16, deadline=None)
def test_block_bucketize_sparse_features_long_indices(
self, long_indices: bool, use_cpu: bool
) -> None:
bucketize_pos = False
sequence = False
index_type = torch.long if long_indices else torch.int
# 3 GPUs
my_size = 3
block_sizes = torch.tensor([3, 4, 5], dtype=index_type)
if not long_indices:
lengths = torch.tensor([0, 3, 2, 0, 1, 4], dtype=index_type)
indices = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=index_type)
new_lengths_ref = torch.tensor(
[0, 2, 0, 0, 0, 0, 0, 1, 2, 0, 1, 3, 0, 0, 0, 0, 0, 1], dtype=index_type
)
new_indices_ref = torch.tensor(
[1, 2, 0, 0, 1, 1, 2, 3, 4, 0], dtype=index_type
)
else:
lengths = torch.tensor([0, 3, 2, 0, 1, 4], dtype=index_type)
# Test long and negative indices: -8 will be casted to 18446644015555759292
indices = torch.tensor(
[1, 2, 3, 100061827127359, 5, 6, 7, -8, 100058153792324, 10],
dtype=index_type,
)
new_lengths_ref = torch.tensor(
[0, 2, 0, 0, 0, 0, 0, 1, 2, 0, 1, 1, 0, 0, 0, 0, 0, 3], dtype=index_type
)
new_indices_ref = torch.tensor(
[
1,
2,
0,
33353942375786, # 100061827127359/3 = 33353942375786
1,
1,
2,
6148914691236517202, # -8 cast to 18446644015555759292, 18446644015555759292 /3 = 6148914691236517202
33352717930774, # 100058153792324/3 = 33352717930774
0,
],
dtype=index_type,
)
(
new_lengths_cpu,
new_indices_cpu,
new_weights_cpu,
new_pos_cpu,
unbucketize_permute_cpu,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
lengths,
indices,
bucketize_pos,
sequence,
block_sizes,
my_size,
None,
)
torch.testing.assert_close(new_lengths_cpu, new_lengths_ref)
torch.testing.assert_close(new_indices_cpu, new_indices_ref)
if not use_cpu:
(
new_lengths_gpu,
new_indices_gpu,
new_weights_gpu,
new_pos_gpu,
unbucketize_permute_gpu,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
lengths.cuda(),
indices.cuda(),
bucketize_pos,
sequence,
block_sizes.cuda(),
my_size,
None,
)
torch.testing.assert_close(new_lengths_gpu.cpu(), new_lengths_ref)
torch.testing.assert_close(new_indices_gpu.cpu(), new_indices_ref)
torch.testing.assert_close(new_lengths_gpu.cpu(), new_lengths_cpu)
torch.testing.assert_close(new_indices_gpu.cpu(), new_indices_cpu)
@given(
n=st.integers(min_value=1, max_value=100),
long_index=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_cumsum(self, n: int, long_index: bool) -> None:
index_dtype = torch.int64 if long_index else torch.int32
np_index_dtype = np.int64 if long_index else np.int32
# cpu tests
x = torch.randint(low=0, high=100, size=(n,)).type(index_dtype)
ze = torch.ops.fbgemm.asynchronous_exclusive_cumsum(x)
zi = torch.ops.fbgemm.asynchronous_inclusive_cumsum(x)
zc = torch.ops.fbgemm.asynchronous_complete_cumsum(x)
torch.testing.assert_close(
torch.from_numpy(np.cumsum(x.cpu().numpy()).astype(np_index_dtype)),
zi.cpu(),
)
torch.testing.assert_close(
torch.from_numpy(
(np.cumsum([0] + x.cpu().numpy().tolist())[:-1]).astype(np_index_dtype)
),
ze.cpu(),
)
torch.testing.assert_close(
torch.from_numpy(
(np.cumsum([0] + x.cpu().numpy().tolist())).astype(np_index_dtype)
),
zc.cpu(),
)
# meta tests
mx = torch.randint(low=0, high=100, size=(n,)).type(index_dtype).to("meta")
# mze = torch.ops.fbgemm.asynchronous_exclusive_cumsum(mx)
# mzi = torch.ops.fbgemm.asynchronous_inclusive_cumsum(mx)
mzc = torch.ops.fbgemm.asynchronous_complete_cumsum(mx)
# self.assertEqual(ze.size(), mze.size())
# self.assertEqual(zi.size(), mzi.size())
self.assertEqual(zc.size(), mzc.size())
if gpu_available:
x = x.cuda()
ze = torch.ops.fbgemm.asynchronous_exclusive_cumsum(x)
zi = torch.ops.fbgemm.asynchronous_inclusive_cumsum(x)
zc = torch.ops.fbgemm.asynchronous_complete_cumsum(x)
torch.testing.assert_close(
torch.from_numpy(np.cumsum(x.cpu().numpy()).astype(np_index_dtype)),
zi.cpu(),
)
torch.testing.assert_close(
torch.from_numpy(
(np.cumsum([0] + x.cpu().numpy().tolist())[:-1]).astype(
np_index_dtype
)
),
ze.cpu(),
)
torch.testing.assert_close(
torch.from_numpy(
(np.cumsum([0] + x.cpu().numpy().tolist())).astype(np_index_dtype)
),
zc.cpu(),
)
@given(
n=st.integers(min_value=1, max_value=600),
b=st.integers(min_value=1, max_value=10),
long_index=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_asynchronous_complete_cumsum_2d(
self, n: int, b: int, long_index: bool
) -> None:
index_dtype = torch.int64 if long_index else torch.int32
def test_asynchronous_complete_cumsum_2d_helper(x: torch.Tensor) -> None:
np_index_dtype = np.int64 if long_index else np.int32
zc = torch.ops.fbgemm.asynchronous_complete_cumsum(x)
zeros = torch.zeros(b, 1)
torch.testing.assert_close(
torch.from_numpy(
np.cumsum(
torch.concat([zeros, x.cpu()], dim=1).numpy(), axis=1
).astype(np_index_dtype)
),
zc.cpu(),
)
x = torch.randint(low=0, high=100, size=(b, n)).type(index_dtype)
# cpu test
test_asynchronous_complete_cumsum_2d_helper(x)
if gpu_available:
# gpu test
test_asynchronous_complete_cumsum_2d_helper(x.cuda())
@given(
N=st.integers(min_value=1, max_value=20),
offsets_type=st.sampled_from([torch.int32, torch.int64]),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_offsets_range(
self,
N: int,
# pyre-fixme[11]: Annotation `int32` is not defined as a type.
# pyre-fixme[11]: Annotation `int64` is not defined as a type.
offsets_type: "Union[Type[torch.int32], Type[torch.int64]]",
) -> None:
lengths = np.array([np.random.randint(low=0, high=20) for _ in range(N)])
offsets = np.cumsum(np.concatenate(([0], lengths)))[:-1]
range_ref = torch.from_numpy(
np.concatenate([np.arange(size) for size in lengths])
)
output_size = np.sum(lengths)
offsets_cpu = torch.tensor(offsets, dtype=offsets_type)
range_cpu = torch.ops.fbgemm.offsets_range(offsets_cpu, output_size)
range_ref = range_ref.to(range_cpu.dtype)
torch.testing.assert_close(range_cpu, range_ref, rtol=0, atol=0)
if gpu_available:
range_gpu = torch.ops.fbgemm.offsets_range(offsets_cpu.cuda(), output_size)
range_ref = range_ref.to(range_gpu.dtype)
torch.testing.assert_close(range_gpu.cpu(), range_ref, rtol=0, atol=0)
@given(
index_type=st.sampled_from([torch.int, torch.long]),
has_weight=st.booleans(),
bucketize_pos=st.booleans(),
sequence=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=16, deadline=None)
def test_block_bucketize_sparse_features(
self,
index_type: Type[torch.dtype],
has_weight: bool,
bucketize_pos: bool,
sequence: bool,
) -> None:
B = 2
# pyre-ignore [6]
lengths = torch.tensor([0, 2, 1, 3, 2, 3, 3, 1], dtype=index_type)
indices = torch.tensor(
[3, 4, 15, 11, 28, 29, 1, 10, 11, 12, 13, 11, 22, 20, 20],
# pyre-ignore [6]
dtype=index_type,
)
weights = (
torch.tensor(
[
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
13.0,
14.0,
15.0,
],
dtype=torch.float,
)
if has_weight
else None
)
# pyre-ignore [6]
block_sizes = torch.tensor([5, 15, 10, 20], dtype=index_type)
my_size = 2
new_lengths_ref = torch.tensor(
[0, 2, 0, 1, 1, 0, 1, 0, 0, 0, 1, 2, 1, 3, 2, 1],
# pyre-ignore [6]
dtype=index_type,
)
new_indices_ref = torch.tensor(
[3, 4, 11, 1, 11, 0, 13, 14, 0, 1, 2, 3, 2, 0, 0],
# pyre-ignore [6]
dtype=index_type,
)
new_weights_ref = torch.tensor(
[
1.0,
2.0,
4.0,
7.0,
12.0,
3.0,
5.0,
6.0,
8.0,
9.0,
10.0,
11.0,
13.0,
14.0,
15.0,
],
dtype=torch.float,
)
new_pos_ref = torch.tensor(
[0, 1, 0, 0, 0, 0, 1, 2, 1, 0, 1, 2, 1, 2, 0],
# pyre-ignore [6]
dtype=index_type,
)
(
new_lengths_cpu,
new_indices_cpu,
new_weights_cpu,
new_pos_cpu,
unbucketize_permute,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
lengths, indices, bucketize_pos, sequence, block_sizes, my_size, weights
)
torch.testing.assert_close(new_lengths_cpu, new_lengths_ref, rtol=0, atol=0)
torch.testing.assert_close(new_indices_cpu, new_indices_ref, rtol=0, atol=0)
if has_weight:
torch.testing.assert_close(new_weights_cpu, new_weights_ref)
if bucketize_pos:
torch.testing.assert_close(new_pos_cpu, new_pos_ref)
if sequence:
value_unbucketized_indices = unbucketize_indices_value(
new_indices_cpu, new_lengths_cpu, block_sizes, my_size, B
)
unbucketized_indices = torch.index_select(
value_unbucketized_indices, 0, unbucketize_permute
)
torch.testing.assert_close(unbucketized_indices, indices, rtol=0, atol=0)
if gpu_available:
(
new_lengths_gpu,
new_indices_gpu,
new_weights_gpu,
new_pos_gpu,
unbucketize_permute_gpu,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
lengths.cuda(),
indices.cuda(),
bucketize_pos,
sequence,
block_sizes.cuda(),
my_size,
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights.cuda() if has_weight else None,
)
torch.testing.assert_close(
new_lengths_gpu.cpu(), new_lengths_ref, rtol=0, atol=0
)
torch.testing.assert_close(
new_indices_gpu.cpu(), new_indices_ref, rtol=0, atol=0
)
if has_weight:
torch.testing.assert_close(new_weights_gpu.cpu(), new_weights_cpu)
if bucketize_pos:
torch.testing.assert_close(new_pos_gpu.cpu(), new_pos_cpu)
if sequence:
value_unbucketized_indices = unbucketize_indices_value(
new_indices_gpu.cpu(),
new_lengths_gpu.cpu(),
block_sizes,
my_size,
B,
)
unbucketized_indices = torch.index_select(
value_unbucketized_indices, 0, unbucketize_permute_gpu.cpu()
)
torch.testing.assert_close(
unbucketized_indices, indices, rtol=0, atol=0
)
@given(
index_type=st.sampled_from([torch.int, torch.long]),
has_weight=st.booleans(),
bucketize_pos=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_bucketize_sparse_features(
self,
index_type: Type[torch.dtype],
has_weight: bool,
bucketize_pos: bool,
) -> None:
# pyre-ignore [6]
lengths = torch.tensor([0, 2, 1, 3], dtype=index_type)
# pyre-ignore [6]
indices = torch.tensor([10, 10, 15, 20, 25, 30], dtype=index_type)
weights = (
torch.tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=torch.float)
if has_weight
else None
)
# pyre-ignore [6]
new_lengths_ref = torch.tensor([0, 2, 0, 2, 0, 0, 1, 1], dtype=index_type)
# pyre-ignore [6]
new_indices_ref = torch.tensor([5, 5, 10, 15, 7, 12], dtype=index_type)
new_weights_ref = torch.tensor(
[1.0, 2.0, 4.0, 6.0, 3.0, 5.0], dtype=torch.float
)
# pyre-ignore [6]
new_pos_ref = torch.tensor([0, 1, 0, 2, 0, 1], dtype=index_type)
(
new_lengths_cpu,
new_indices_cpu,
new_weights_cpu,
new_pos_cpu,
) = torch.ops.fbgemm.bucketize_sparse_features(
lengths, indices, bucketize_pos, 2, weights
)
torch.testing.assert_close(new_lengths_cpu, new_lengths_ref, rtol=0, atol=0)
torch.testing.assert_close(new_indices_cpu, new_indices_ref, rtol=0, atol=0)
if has_weight:
torch.testing.assert_close(new_weights_cpu, new_weights_ref)
if bucketize_pos:
torch.testing.assert_close(new_pos_cpu, new_pos_ref)
if gpu_available:
(
new_lengths_gpu,
new_indices_gpu,
new_weights_gpu,
new_pos_gpu,
) = torch.ops.fbgemm.bucketize_sparse_features(
lengths.cuda(),
indices.cuda(),
bucketize_pos,
2,
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights.cuda() if has_weight else None,
)
torch.testing.assert_close(
new_lengths_gpu.cpu(), new_lengths_ref, rtol=0, atol=0
)
torch.testing.assert_close(
new_indices_gpu.cpu(), new_indices_ref, rtol=0, atol=0
)
if has_weight:
torch.testing.assert_close(new_weights_gpu.cpu(), new_weights_cpu)
if bucketize_pos:
torch.testing.assert_close(new_pos_gpu.cpu(), new_pos_cpu)
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
broadcast_lengths=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_reorder_batched_ad_lengths(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
broadcast_lengths: bool,
) -> None:
if broadcast_lengths:
cat_ad_lengths = (
torch.cat([torch.tensor([L for _ in range(T)]) for _ in range(B)], 0)
.cuda()
.to(Dtype)
)
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
else:
cat_ad_lengths = (
torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)], 0
)
.cuda()
.to(Dtype)
)
cat_ad_lengths_broadcasted = cat_ad_lengths
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int().cuda()
num_ads_in_batch = B * A
reordered_batched_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_lengths
)
torch.testing.assert_close(
cat_ad_lengths_broadcasted, reordered_batched_ad_lengths
)
cat_ad_lengths_cpu = cat_ad_lengths.cpu()
batch_offsets_cpu = batch_offsets.cpu()
reordered_batched_ad_lengths_cpu = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths_cpu, batch_offsets_cpu, num_ads_in_batch, broadcast_lengths
)
torch.testing.assert_close(
reordered_batched_ad_lengths_cpu, reordered_batched_ad_lengths.cpu()
)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
broadcast_lengths=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=40, deadline=None)
def test_reorder_batched_ad_lengths_cpu(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
broadcast_lengths: bool,
) -> None:
if broadcast_lengths:
cat_ad_lengths = (
torch.cat([torch.tensor([L for _ in range(T)]) for _ in range(B)], 0)
.int()
.to(Dtype)
)
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
else:
cat_ad_lengths = (
torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)], 0
)
.int()
.to(Dtype)
)
cat_ad_lengths_broadcasted = cat_ad_lengths
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int()
num_ads_in_batch = B * A
reordered_batched_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_lengths
)
torch.testing.assert_close(
cat_ad_lengths_broadcasted, reordered_batched_ad_lengths
)
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
Itype=st.sampled_from([torch.int32, torch.int64]),
broadcast_indices=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_reorder_batched_ad_indices(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
Itype: torch.dtype,
broadcast_indices: bool,
) -> None:
if broadcast_indices:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(B * T * L,),
)
.int()
.cuda()
.to(Dtype)
)
cat_ad_lengths = (
torch.cat(
[torch.tensor([L for _ in range(T)]) for _ in range(B)],
0,
)
.int()
.cuda()
)
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
else:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(B * T * A * L,),
)
.int()
.cuda()
.to(Dtype)
)
cat_ad_lengths = (
torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)],
0,
)
.int()
.cuda()
)
cat_ad_lengths_broadcasted = cat_ad_lengths
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int().cuda()
num_ads_in_batch = B * A
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
torch.testing.assert_close(cat_ad_lengths_broadcasted, reordered_cat_ad_lengths)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
cat_ad_lengths
).to(Itype)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
).to(Itype)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
B * T * A * L,
)
torch.testing.assert_close(
reordered_cat_ad_indices.view(T, B, A, L).permute(1, 0, 2, 3),
cat_ad_indices.view(B, T, 1, L).tile([1, 1, A, 1])
if broadcast_indices
else cat_ad_indices.view(B, T, A, L),
)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
Itype=st.sampled_from([torch.int32, torch.int64]),
broadcast_indices=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_cat_reorder_batched_ad_indices_cpu(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
Itype: torch.dtype,
broadcast_indices: bool,
) -> None:
if broadcast_indices:
ad_indices = [
(
torch.randint(
low=0,
high=100,
size=(T * L,),
)
.int()
.to(Dtype)
)
for _ in range(B)
]
cat_ad_lengths = torch.cat(
[torch.tensor([L for _ in range(T)]) for _ in range(B)],
0,
).int()
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
cat_ad_indices = torch.cat(ad_indices, 0)
else:
ad_indices = [
(
torch.randint(
low=0,
high=100,
size=(T * A * L,),
)
.int()
.to(Dtype)
)
for _ in range(B)
]
cat_ad_lengths = torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)],
0,
).int()
cat_ad_lengths_broadcasted = cat_ad_lengths
cat_ad_indices = torch.cat(ad_indices, 0)
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int()
num_ads_in_batch = B * A
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
torch.testing.assert_close(cat_ad_lengths_broadcasted, reordered_cat_ad_lengths)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
cat_ad_lengths
).to(Itype)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
).to(Itype)
reordered_cat_ad_indices = torch.ops.fbgemm.cat_reorder_batched_ad_indices(
cat_ad_offsets,
ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
B * T * A * L,
)
torch.testing.assert_close(
reordered_cat_ad_indices.view(T, B, A, L).permute(1, 0, 2, 3),
cat_ad_indices.view(B, T, 1, L).tile([1, 1, A, 1])
if broadcast_indices
else cat_ad_indices.view(B, T, A, L),
)
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
L=st.integers(min_value=2, max_value=20),
A=st.integers(min_value=1, max_value=20),
Dtype=st.sampled_from([torch.int32, torch.float, torch.int64]),
Itype=st.sampled_from([torch.int32, torch.int64]),
broadcast_indices=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=40, deadline=None)
def test_reorder_batched_ad_indices_cpu(
self,
B: int,
T: int,
L: int,
A: int,
Dtype: torch.dtype,
Itype: torch.dtype,
broadcast_indices: bool,
) -> None:
if broadcast_indices:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(B * T * L,),
)
.int()
.to(Dtype)
)
cat_ad_lengths = torch.cat(
[torch.tensor([L for _ in range(T)]) for _ in range(B)],
0,
).int()
cat_ad_lengths_broadcasted = cat_ad_lengths.tile([A])
else:
cat_ad_indices = (
torch.randint(
low=0,
high=100,
size=(B * T * A * L,),
)
.int()
.to(Dtype)
)
cat_ad_lengths = torch.cat(
[torch.tensor([L for _ in range(T * A)]) for _ in range(B)],
0,
).int()
cat_ad_lengths_broadcasted = cat_ad_lengths
batch_offsets = torch.tensor([A * b for b in range(B + 1)]).int()
num_ads_in_batch = B * A
reordered_cat_ad_lengths = torch.ops.fbgemm.reorder_batched_ad_lengths(
cat_ad_lengths, batch_offsets, num_ads_in_batch, broadcast_indices
)
torch.testing.assert_close(cat_ad_lengths_broadcasted, reordered_cat_ad_lengths)
cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
cat_ad_lengths
).to(Itype)
reordered_cat_ad_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(
reordered_cat_ad_lengths
).to(Itype)
reordered_cat_ad_indices = torch.ops.fbgemm.reorder_batched_ad_indices(
cat_ad_offsets,
cat_ad_indices,
reordered_cat_ad_offsets,
batch_offsets,
num_ads_in_batch,
broadcast_indices,
B * T * A * L,
)
torch.testing.assert_close(
reordered_cat_ad_indices.view(T, B, A, L).permute(1, 0, 2, 3),
cat_ad_indices.view(B, T, 1, L).tile([1, 1, A, 1])
if broadcast_indices
else cat_ad_indices.view(B, T, A, L),
)
@given(data_type=st.sampled_from([torch.bfloat16, torch.half, torch.float32]))
@settings(verbosity=Verbosity.verbose, deadline=None)
def test_histogram_binning_calibration(self, data_type: torch.dtype) -> None:
num_bins = 5000
logit = torch.tensor([[-0.0018], [0.0085], [0.0090], [0.0003], [0.0029]]).type(
data_type
)
bin_num_examples = torch.empty([num_bins], dtype=torch.float64).fill_(0.0)
bin_num_positives = torch.empty([num_bins], dtype=torch.float64).fill_(0.0)
calibrated_prediction, bin_ids = torch.ops.fbgemm.histogram_binning_calibration(
logit=logit,
bin_num_examples=bin_num_examples,
bin_num_positives=bin_num_positives,
positive_weight=0.4,
lower_bound=0.0,
upper_bound=1.0,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
expected_calibrated_prediction = torch.tensor(
[[0.2853], [0.2875], [0.2876], [0.2858], [0.2863]]
).type(data_type)
expected_bin_ids = torch.tensor(
[1426, 1437, 1437, 1428, 1431], dtype=torch.long
)
error_tolerance = 1e-03
if data_type == torch.bfloat16:
# Due to smaller significand bits.
error_tolerance = 1e-02
expected_bin_ids = torch.tensor(
[1426, 1438, 1438, 1430, 1430], dtype=torch.long
)
torch.testing.assert_close(
calibrated_prediction,
expected_calibrated_prediction,
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids.long(),
expected_bin_ids,
)
)
if torch.cuda.is_available():
(
calibrated_prediction_gpu,
bin_ids_gpu,
) = torch.ops.fbgemm.histogram_binning_calibration(
logit=logit.cuda(),
bin_num_examples=bin_num_examples.cuda(),
bin_num_positives=bin_num_positives.cuda(),
positive_weight=0.4,
lower_bound=0.0,
upper_bound=1.0,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
torch.testing.assert_close(
calibrated_prediction_gpu,
expected_calibrated_prediction.cuda(),
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids_gpu.long(),
expected_bin_ids.cuda(),
)
)
@given(
data_type=st.sampled_from([torch.bfloat16, torch.half, torch.float32]),
segment_value_type=st.sampled_from([torch.int, torch.long]),
segment_length_type=st.sampled_from([torch.int, torch.long]),
)
@settings(verbosity=Verbosity.verbose, deadline=None)
def test_histogram_binning_calibration_by_feature(
self,
data_type: torch.dtype,
segment_value_type: torch.dtype,
segment_length_type: torch.dtype,
) -> None:
num_bins = 5000
num_segments = 42
logit = torch.tensor([-0.0018, 0.0085, 0.0090, 0.0003, 0.0029]).type(data_type)
segment_value = torch.tensor([40, 31, 32, 13, 31]).type(segment_value_type)
lengths = torch.tensor([[1], [1], [1], [1], [1]]).type(segment_length_type)
num_interval = num_bins * (num_segments + 1)
bin_num_examples = torch.empty([num_interval], dtype=torch.float64).fill_(0.0)
bin_num_positives = torch.empty([num_interval], dtype=torch.float64).fill_(0.0)
(
calibrated_prediction,
bin_ids,
) = torch.ops.fbgemm.histogram_binning_calibration_by_feature(
logit=logit,
segment_value=segment_value,
segment_lengths=lengths,
num_segments=num_segments,
bin_num_examples=bin_num_examples,
bin_num_positives=bin_num_positives,
num_bins=num_bins,
positive_weight=0.4,
lower_bound=0.0,
upper_bound=1.0,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
expected_calibrated_prediction = torch.tensor(
[0.2853, 0.2875, 0.2876, 0.2858, 0.2863]
).type(data_type)
expected_bin_ids = torch.tensor(
[206426, 161437, 166437, 71428, 161431], dtype=torch.long
)
error_tolerance = 1e-03
if data_type == torch.bfloat16:
# Due to smaller significand bits.
error_tolerance = 1e-02
expected_bin_ids = torch.tensor(
[206426, 161438, 166438, 71430, 161430], dtype=torch.long
)
torch.testing.assert_close(
calibrated_prediction,
expected_calibrated_prediction,
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids.long(),
expected_bin_ids,
)
)
if torch.cuda.is_available():
(
calibrated_prediction_gpu,
bin_ids_gpu,
) = torch.ops.fbgemm.histogram_binning_calibration_by_feature(
logit=logit.cuda(),
segment_value=segment_value.cuda(),
segment_lengths=lengths.cuda(),
num_segments=num_segments,
bin_num_examples=bin_num_examples.cuda(),
bin_num_positives=bin_num_positives.cuda(),
num_bins=num_bins,
positive_weight=0.4,
lower_bound=0.0,
upper_bound=1.0,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
torch.testing.assert_close(
calibrated_prediction_gpu,
expected_calibrated_prediction.cuda(),
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids_gpu.long(),
expected_bin_ids.cuda(),
)
)
@given(
data_type=st.sampled_from([torch.bfloat16, torch.half, torch.float32]),
segment_value_type=st.sampled_from([torch.int, torch.long]),
segment_length_type=st.sampled_from([torch.int, torch.long]),
)
@settings(verbosity=Verbosity.verbose, deadline=None)
def test_generic_histogram_binning_calibration_by_feature(
self,
data_type: torch.dtype,
segment_value_type: torch.dtype,
segment_length_type: torch.dtype,
) -> None:
num_bins = 5000
num_segments = 42
logit = torch.tensor([-0.0018, 0.0085, 0.0090, 0.0003, 0.0029]).type(data_type)
segment_value = torch.tensor([40, 31, 32, 13, 31]).type(segment_value_type)
lengths = torch.tensor([[1], [1], [1], [1], [1]]).type(segment_length_type)
num_interval = num_bins * (num_segments + 1)
bin_num_examples = torch.empty([num_interval], dtype=torch.float64).fill_(0.0)
bin_num_positives = torch.empty([num_interval], dtype=torch.float64).fill_(0.0)
lower_bound = 0.0
upper_bound = 1.0
w = (upper_bound - lower_bound) / num_bins
bin_boundaries = torch.arange(
lower_bound + w, upper_bound - w / 2, w, dtype=torch.float64
)
(
calibrated_prediction,
bin_ids,
) = torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
logit=logit,
segment_value=segment_value,
segment_lengths=lengths,
num_segments=num_segments,
bin_num_examples=bin_num_examples,
bin_num_positives=bin_num_positives,
bin_boundaries=bin_boundaries,
positive_weight=0.4,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
expected_calibrated_prediction = torch.tensor(
[0.2853, 0.2875, 0.2876, 0.2858, 0.2863]
).type(data_type)
expected_bin_ids = torch.tensor(
[206426, 161437, 166437, 71428, 161431], dtype=torch.long
)
error_tolerance = 1e-03
if data_type == torch.bfloat16:
# Due to smaller significand bits.
error_tolerance = 1e-02
expected_bin_ids = torch.tensor(
[206426, 161438, 166438, 71430, 161430], dtype=torch.long
)
torch.testing.assert_close(
calibrated_prediction,
expected_calibrated_prediction,
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids.long(),
expected_bin_ids,
)
)
if torch.cuda.is_available():
(
calibrated_prediction_gpu,
bin_ids_gpu,
) = torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
logit=logit.cuda(),
segment_value=segment_value.cuda(),
segment_lengths=lengths.cuda(),
num_segments=num_segments,
bin_num_examples=bin_num_examples.cuda(),
bin_num_positives=bin_num_positives.cuda(),
bin_boundaries=bin_boundaries.cuda(),
positive_weight=0.4,
bin_ctr_in_use_after=10000,
bin_ctr_weight_value=0.9995,
)
torch.testing.assert_close(
calibrated_prediction_gpu,
expected_calibrated_prediction.cuda(),
rtol=error_tolerance,
atol=error_tolerance,
)
self.assertTrue(
torch.equal(
bin_ids_gpu.long(),
expected_bin_ids.cuda(),
)
)
@unittest.skipIf(*gpu_unavailable)
@given(
data_type=st.sampled_from([torch.bfloat16, torch.half, torch.float32]),
)
@settings(verbosity=Verbosity.verbose, deadline=None)
def test_generic_histogram_binning_calibration_by_feature_cpu_gpu(
self,
data_type: torch.dtype,
) -> None:
num_logits = random.randint(8, 16)
num_bins = random.randint(3, 8)
num_segments = random.randint(3, 8)
positive_weight = random.uniform(0.1, 1.0)
bin_ctr_in_use_after = random.randint(0, 10)
bin_ctr_weight_value = random.random()
logit = torch.randn(num_logits).type(data_type)
lengths = torch.randint(0, 2, (num_logits,))
segment_value = torch.randint(-3, num_segments + 3, (sum(lengths),))
num_interval = num_bins * (num_segments + 1)
bin_num_positives = torch.randint(0, 10, (num_interval,)).double()
bin_num_examples = (
bin_num_positives + torch.randint(0, 10, (num_interval,)).double()
)
lower_bound = 0.0
upper_bound = 1.0
w = (upper_bound - lower_bound) / num_bins
bin_boundaries = torch.arange(
lower_bound + w, upper_bound - w / 2, w, dtype=torch.float64
)
(
calibrated_prediction_cpu,
bin_ids_cpu,
) = torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
logit=logit,
segment_value=segment_value,
segment_lengths=lengths,
num_segments=num_segments,
bin_num_examples=bin_num_examples,
bin_num_positives=bin_num_positives,
bin_boundaries=bin_boundaries,
positive_weight=positive_weight,
bin_ctr_in_use_after=bin_ctr_in_use_after,
bin_ctr_weight_value=bin_ctr_weight_value,
)
(
calibrated_prediction_gpu,
bin_ids_gpu,
) = torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
logit=logit.cuda(),
segment_value=segment_value.cuda(),
segment_lengths=lengths.cuda(),
num_segments=num_segments,
bin_num_examples=bin_num_examples.cuda(),
bin_num_positives=bin_num_positives.cuda(),
bin_boundaries=bin_boundaries.cuda(),
positive_weight=positive_weight,
bin_ctr_in_use_after=bin_ctr_in_use_after,
bin_ctr_weight_value=bin_ctr_weight_value,
)
torch.testing.assert_close(
calibrated_prediction_cpu,
calibrated_prediction_gpu.cpu(),
rtol=1e-03,
atol=1e-03,
)
self.assertTrue(
torch.equal(
bin_ids_cpu,
bin_ids_gpu.cpu(),
)
)
def test_segment_sum_csr(self) -> None:
segment_sum_cpu = torch.ops.fbgemm.segment_sum_csr(
2,
torch.IntTensor([0, 2, 3, 5]),
torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]),
)
torch.testing.assert_close(
segment_sum_cpu, torch.Tensor([10.0, 11.0, 34.0]), rtol=0, atol=0
)
if torch.cuda.is_available():
segment_sum_cuda = torch.ops.fbgemm.segment_sum_csr(
2,
torch.IntTensor([0, 2, 3, 5]).cuda(),
torch.Tensor(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0]
).cuda(),
)
torch.testing.assert_close(
segment_sum_cuda.cpu(), torch.Tensor([10.0, 11.0, 34.0]), rtol=0, atol=0
)
@given(
batch_size=st.just(2),
m=st.just(3),
k=st.just(4),
n=st.just(5),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_permute102_baddbmm_permute102(
self,
batch_size: int,
m: int,
k: int,
n: int,
use_cpu: bool,
) -> None:
# baddbmm doesn't support half
dtype = torch.float if use_cpu else torch.half
device = torch.device("cpu" if use_cpu else "cuda")
A = torch.rand((m, batch_size, k), dtype=dtype, device=device)
B = torch.rand((batch_size, k, n), dtype=dtype, device=device)
# bias_permute102 = torch.rand(batch_size, 1, n).half().cuda()
# bias = bias_permute102.permute(1, 0, 2)
bias = torch.rand((batch_size, n), dtype=dtype, device=device)
bias_permute102 = bias.unsqueeze(1)
# bias = bias_short.unsqueeze(0)
A_permute102 = A.permute(1, 0, 2)
C_permute102 = torch.baddbmm(bias_permute102, A_permute102, B)
C_ref = C_permute102.permute(1, 0, 2) # (m, batch_size, n)
C = torch.ops.fbgemm.permute102_baddbmm_permute102(bias, A, B)
torch.testing.assert_close(C.cpu(), C_ref.cpu())
def _pack_segments_ref(
self,
lengths: torch.Tensor,
tensor: torch.Tensor,
max_length: Optional[int] = None,
) -> np.ndarray:
lengths = lengths.numpy()
sections = np.split(tensor, np.cumsum(lengths))
max_length = np.max(lengths, initial=0) if max_length is None else max_length
padded_arrs = []
for arr in sections[:-1]: # Last section is always a blank
arr = arr[: min(max_length, len(arr)), ...]
padded_arr = np.pad(
arr,
[(0, max(max_length - arr.shape[0], 0))]
+ ([(0, 0)] * (len(arr.shape) - 1)),
constant_values=0,
)
padded_arrs.append(padded_arr)
if len(padded_arrs) == 0:
padded_arrs = torch.empty((0, 0) + tuple(tensor.shape[1:]))
else:
padded_arrs = torch.Tensor(np.stack(padded_arrs))
# pyre-fixme[7]: Expected `ndarray` but got `Tensor`.
return padded_arrs
@given(
n=st.integers(2, 10),
k=st.integers(2, 10),
batch_size=st.integers(1, 30),
divisions=st.integers(1, 10),
dtype=st.sampled_from(
[
torch.float,
torch.half,
]
),
)
@settings(deadline=None)
def test_pack_segments(
self,
n: int,
k: int,
batch_size: int,
divisions: int,
dtype: torch.dtype,
) -> None:
input_raw = np.random.rand(batch_size, n, k)
input_data = torch.tensor(input_raw, dtype=dtype, requires_grad=True)
lengths = torch.tensor(
get_n_rand_num_summing_to_k(divisions, batch_size), dtype=torch.int
)
max_length = lengths.max().item()
packed_tensor = torch.ops.fbgemm.pack_segments(
t_in=input_data, lengths=lengths, max_length=max_length
)
packed_ref = self._pack_segments_ref(lengths, input_raw)
packed_ref = torch.Tensor(packed_ref).to(dtype)
self.assertTrue(torch.equal(packed_tensor, packed_ref))
grad_cpu = torch.tensor(
np.random.uniform(low=0.01, high=0.5, size=packed_ref.shape).astype(
np.float32
)
).to(dtype)
# CPU backward
packed_tensor.backward(grad_cpu)
if gpu_available:
packed_cuda = torch.ops.fbgemm.pack_segments(
t_in=input_data.cuda(),
lengths=lengths.cuda(),
max_length=max_length,
)
self.assertTrue(torch.equal(packed_tensor, packed_cuda.cpu()))
# GPU backward
packed_cuda.backward(grad_cpu.cuda())
@given(
n=st.integers(2, 10),
k=st.integers(2, 10),
batch_size=st.integers(1, 30),
divisions=st.integers(1, 10),
max_length=st.integers(1, 20),
dtype=st.sampled_from(
[
torch.float,
torch.half,
]
),
)
@settings(deadline=None)
def test_pack_segments_smaller_max_len(
self,
n: int,
k: int,
batch_size: int,
divisions: int,
max_length: int,
dtype: torch.dtype,
) -> None:
input_data = torch.tensor(np.random.rand(batch_size, n, k), dtype=dtype)
lengths = torch.tensor(
get_n_rand_num_summing_to_k(divisions, batch_size), dtype=torch.int
)
packed_tensor = torch.ops.fbgemm.pack_segments(
t_in=input_data,
lengths=lengths,
max_length=max_length,
)
self.assertEqual(packed_tensor.shape, (divisions, max_length, n, k))
packed_ref = self._pack_segments_ref(
lengths,
input_data,
max_length=max_length,
)
# pyre-fixme[6]: For 2nd param expected `Tensor` but got `ndarray`.
self.assertTrue(torch.equal(packed_tensor, packed_ref))
if gpu_available:
packed_cuda = torch.ops.fbgemm.pack_segments(
t_in=input_data.cuda(),
lengths=lengths.cuda(),
max_length=max_length,
)
self.assertTrue(torch.equal(packed_tensor, packed_cuda.cpu()))
@skipIfRocm()
@given(
n=st.integers(2, 10),
k=st.integers(2, 10),
batch_size=st.integers(1, 30),
divisions=st.integers(1, 10),
dtype=st.sampled_from(
[
torch.float,
torch.half,
]
),
)
@settings(deadline=None)
def test_pack_segments_meta_backend(
self,
n: int,
k: int,
batch_size: int,
divisions: int,
dtype: torch.dtype,
) -> None:
input_raw = np.random.rand(batch_size, n, k)
input_data = torch.tensor(
input_raw, dtype=torch.float32, requires_grad=True
).to("meta")
lengths = torch.tensor(
get_n_rand_num_summing_to_k(divisions, batch_size), dtype=torch.int
)
max_length = lengths.max().item()
packed_tensor = torch.ops.fbgemm.pack_segments(
t_in=input_data, lengths=lengths, max_length=max_length
)
packed_ref = self._pack_segments_ref(lengths, input_raw)
# verify forward
assert packed_tensor.size() == torch.Tensor(packed_ref).size()
@given(
N=st.integers(1, 32),
shape=st.one_of(
st.lists(st.integers(1, 128), max_size=1),
st.lists(st.integers(1, 16), min_size=2, max_size=2),
),
dtype=st.sampled_from([torch.float, torch.half, torch.double]),
use_cpu=st.booleans() if gpu_available else st.just(True),
consecutive_indices=st.booleans(),
skip_indices_sorting_fwd=st.booleans(),
use_inference_mode=st.booleans(),
)
@settings(max_examples=20, deadline=None)
def test_index_select_dim0(
self,
N: int,
shape: List[int],
dtype: torch.dtype,
use_cpu: bool,
consecutive_indices: bool,
skip_indices_sorting_fwd: bool,
use_inference_mode: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
U = random.randint(0, N + 1)
kwargs = {}
if consecutive_indices:
start = np.random.randint(0, U)
length = np.random.randint(1, U - start + 1)
indices = list(range(start, start + length))
np_arr = np.array(indices)
for _ in range(N - U):
indices.append(np.random.randint(start, start + length))
np_arr = np.array(indices)
np.random.shuffle(np_arr)
indices = torch.from_numpy(np_arr).to(torch.int).to(device)
kwargs["consecutive_range_start"] = start
kwargs["consecutive_range_length"] = length
else:
indices = torch.randint(U, (N,), device=device)
kwargs["skip_indices_sorting_fwd"] = skip_indices_sorting_fwd
input = torch.rand((U,) + tuple(shape), dtype=dtype, device=device)
with torch.inference_mode() if use_inference_mode else contextlib.nullcontext():
output_ref = torch.ops.fbgemm.index_select_dim0(input, indices, **kwargs)
output = torch.index_select(input, 0, indices)
torch.testing.assert_close(output, output_ref)
if not use_inference_mode:
gradcheck_args = [
input.clone().detach().double().requires_grad_(True),
indices,
]
for k in kwargs:
gradcheck_args.append(kwargs[k])
torch.autograd.gradcheck(torch.ops.fbgemm.index_select_dim0, gradcheck_args)
@given(
num_indices=st.integers(1, 32),
max_num_input_rows=st.integers(1, 32),
shape=st.lists(st.integers(1, 32), min_size=1, max_size=2),
dtype=st.sampled_from([torch.float, torch.half, torch.double]),
use_cpu=st.booleans() if gpu_available else st.just(True),
num_groups=st.integers(1, 32),
use_var_cols=st.booleans(),
use_var_num_input_rows=st.booleans(),
check_non_contiguous=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
def test_group_index_select_dim0(
self,
num_indices: int,
max_num_input_rows: int,
shape: List[int],
dtype: torch.dtype,
use_cpu: bool,
num_groups: int,
use_var_cols: bool,
use_var_num_input_rows: bool,
check_non_contiguous: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
input_group: List[torch.Tensor] = []
input_ref_group: List[torch.Tensor] = []
indices_group: List[torch.Tensor] = []
grad_group: List[torch.Tensor] = []
for _ in range(num_groups):
if use_var_num_input_rows:
num_input_rows = (
random.randint(1, max_num_input_rows)
if max_num_input_rows > 1
else 1
)
else:
num_input_rows = max_num_input_rows
indices = torch.randint(num_input_rows, (num_indices,), device=device)
assert indices.max() < num_input_rows
if use_var_cols:
var_dim = random.randint(0, len(shape) - 1)
new_shape = random.randint(1, 32)
shape[var_dim] = new_shape
indices_group.append(indices)
input = torch.rand(
(num_input_rows,) + tuple(shape), dtype=dtype, device=device
)
input_ref = input.clone().detach()
input.requires_grad = True
input_ref.requires_grad = True
input_group.append(input)
input_ref_group.append(input_ref)
grad = torch.rand((num_indices,) + tuple(shape), dtype=dtype, device=device)
grad_group.append(grad)
# Test forward
output_ref_group = []
for input, indices in zip(input_ref_group, indices_group):
output_ref_group.append(torch.index_select(input, 0, indices))
output_group = torch.ops.fbgemm.group_index_select_dim0(
input_group, indices_group
)
# Test backward
for out, grad in zip(output_ref_group, grad_group):
out.backward(grad)
cat_output = torch.concat(
[
(
# Transpose is likely going to make the tensor
# noncontiguous
output.transpose(1, 0).flatten()
if check_non_contiguous
else output.flatten()
)
for output in output_group
]
)
cat_grad = torch.concat(
[
(
# Transpose is likely going to make the tensor
# noncontiguous
grad.transpose(1, 0).flatten()
if check_non_contiguous
else grad.flatten()
)
for grad in grad_group
]
)
cat_output.backward(cat_grad)
def compare_tensor_groups(
test_group: List[torch.Tensor],
ref_group: List[torch.Tensor],
tensor_type: str,
tols: Dict["str", float],
) -> None:
passed = True
failure_count = 0
for i, (test, ref) in enumerate(zip(test_group, ref_group)):
# pyre-ignore [6]
if not torch.allclose(test, ref, **tols):
passed = False
failure_count += 1
print(
f"FAILED: group {i} {tensor_type} ({dtype}), "
f"input shape {input_group[i].shape}, indices "
f"{indices_group[i]}, test {test}, ref {ref}"
)
assert (
passed
), f"{failure_count}/{num_groups} groups of {tensor_type} failed"
compare_tensor_groups(
output_group, output_ref_group, "activation", {"rtol": 0, "atol": 0}
)
compare_tensor_groups(
# pyre-ignore [6]
[i.grad for i in input_group],
# pyre-ignore [6]
[i.grad for i in input_ref_group],
"gradient",
{"rtol": 1e-02, "atol": 1e-02} if dtype == torch.half else {},
)
@given(
T=st.integers(1, 5),
B=st.integers(1, 5),
L=st.integers(1, 5),
)
@settings(max_examples=20, deadline=None)
def test_bottom_unique_k_per_row(
self,
T: int,
B: int,
L: int,
) -> None:
E = 1000000
all_indices = (np.random.zipf(a=1.15, size=(T, B, 3 * L)) - 1) % E
all_indices_deduped = torch.ops.fbgemm.bottom_k_per_row(
torch.as_tensor(all_indices), torch.tensor([0, L], dtype=torch.long), True
)
for index_tuple in itertools.product(range(T), range(B)):
# sample without replacement from
# https://stats.stackexchange.com/questions/20590/how-do-i-sample-without-replacement-using-a-sampling-with-replacement-function
r = set()
for x in all_indices[index_tuple]:
if x not in r:
r.add(x)
if len(r) == L:
break
assert (len(r)) == L, "too skewed distribution (alpha too big)"
all_indices[index_tuple][:L] = sorted(r)
all_indices_deduped_ref = torch.as_tensor(all_indices[:, :, :L])
torch.testing.assert_close(all_indices_deduped, all_indices_deduped_ref)
@given(
num_inputs=st.integers(0, 100),
max_input_rows=st.integers(2, 32),
max_cols_factor=st.integers(2, 256),
max_output_rows=st.integers(2, 32),
permute_output_dim_0_1=st.booleans(),
dtype=st.sampled_from([torch.float, torch.half]),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_batch_index_select_dim0(
self,
num_inputs: int,
max_input_rows: int,
max_cols_factor: int,
max_output_rows: int,
permute_output_dim_0_1: bool,
dtype: torch.dtype,
use_cpu: bool,
) -> None:
device = "cpu" if use_cpu else "cuda"
input_rows = torch.randint(
low=1, high=max_input_rows, size=(num_inputs,)
).tolist()
input_columns = (
torch.randint(low=1, high=max_cols_factor, size=(num_inputs,)) * 4
).tolist()
if permute_output_dim_0_1:
# All num_indices must be the same if permute_output_dim_0_1 is
# True
num_indices = torch.randint(low=1, high=max_output_rows, size=(1,)).item()
input_num_indices = [num_indices] * num_inputs
else:
input_num_indices = torch.randint(
low=1, high=max_output_rows, size=(num_inputs,)
).tolist()
def validate(
test_list: List[torch.Tensor],
ref_list: List[torch.Tensor],
rows: List[int],
val_fn: Callable[[torch.Tensor, torch.Tensor], bool],
name: str,
) -> None:
test_passed_all = True
error_msg = ""
for i, (test, ref) in enumerate(zip(test_list, ref_list)):
test = test.float()
ref = ref.float()
test_passed = val_fn(test, ref)
test_passed_all = test_passed & test_passed_all
if not test_passed:
test = test.reshape(rows[i], -1)
ref = ref.reshape(rows[i], -1)
for r in range(rows[i]):
test_row = test[r]
ref_row = ref[r]
if not val_fn(test_row, ref_row):
error_msg += f"ERROR: {name} {i} row {r} are different, test {test_row}, ref {ref_row}\n"
assert test_passed_all, error_msg
logging.info(f"{name} test passed")
if num_inputs == 0:
inputs = [torch.empty(0, dtype=dtype, device=device)]
indices = [torch.empty(0, dtype=torch.long, device=device)]
else:
inputs = [
torch.rand(rows, cols, dtype=dtype, device=device)
for rows, cols in zip(input_rows, input_columns)
]
indices = [
torch.randint(
low=0, high=rows, size=(num,), dtype=torch.long, device=device
)
for num, rows in zip(input_num_indices, input_rows)
]
for i in range(len(inputs)):
inputs[i].requires_grad = True
output_ref = [
input.index_select(dim=0, index=index).flatten()
for input, index in zip(inputs, indices)
]
concat_inputs = torch.concat(
[input.flatten().clone().detach() for input in inputs]
)
concat_indices = torch.concat(indices)
concat_inputs.requires_grad = True
output_test = torch.ops.fbgemm.batch_index_select_dim0(
concat_inputs,
concat_indices,
input_num_indices,
input_rows,
input_columns,
permute_output_dim_0_1,
)
if permute_output_dim_0_1 and num_inputs > 0:
output_list = output_test.view(input_num_indices[0], -1).split(
input_columns,
dim=1,
)
output_list = [out.flatten() for out in output_list]
else:
output_list = output_test.split(
[rows * cols for rows, cols in zip(input_num_indices, input_columns)]
)
validate(output_list, output_ref, input_num_indices, torch.equal, "output")
if num_inputs == 0:
grads = [torch.empty(0, dtype=dtype, device=device)]
else:
grads = [torch.rand_like(output) for output in output_ref]
for out_ref, grad in zip(output_ref, grads):
out_ref.backward(grad)
if permute_output_dim_0_1 and num_inputs > 0:
concat_grads = torch.concat(
[grad.view(input_num_indices[0], -1) for grad in grads], dim=1
).flatten()
else:
concat_grads = torch.concat(grads)
assert concat_grads.shape == output_test.shape
output_test.backward(concat_grads)
assert concat_inputs.grad is not None
grad_list = concat_inputs.grad.split(
[rows * cols for rows, cols in zip(input_rows, input_columns)]
)
grad_ref = []
for input in inputs:
assert input.grad is not None
grad_ref.append(input.grad.flatten())
tol = 1.0e-4 if dtype == torch.float else 1.0e-2
validate(
grad_list,
grad_ref,
input_rows,
functools.partial(torch.allclose, atol=tol, rtol=tol),
"grad",
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import sys
import unittest
from itertools import accumulate
from typing import List, Tuple
import fbgemm_gpu
import torch
import torch._dynamo
from fbgemm_gpu.permute_pooled_embedding_modules import PermutePooledEmbeddings
from hypothesis import given, HealthCheck, settings
from torch import nn, Tensor
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
if getattr(fbgemm_gpu, "open_source", False):
# pyre-ignore[21]
from test_utils import cpu_and_maybe_gpu, gpu_unavailable
else:
from fbgemm_gpu.test.test_utils import cpu_and_maybe_gpu, gpu_unavailable
typed_gpu_unavailable: Tuple[bool, str] = gpu_unavailable
if getattr(HealthCheck, "not_a_test_method", False):
suppressed_list: List[HealthCheck] = [HealthCheck.not_a_test_method]
else:
suppressed_list = []
INTERN_MODULE = "fbgemm_gpu.permute_pooled_embedding_modules"
FIXED_EXTERN_API = {
"PermutePooledEmbeddings": {
"__init__": ["self", "embs_dims", "permute", "device"],
"forward": ["self", "pooled_embs"],
},
}
FWD_COMPAT_MSG = (
"WARNING: If this test is failing, you are probably trying "
"to make changes to a module that has been marked external to PyPer torch packages. "
"This can break forward compatibility of torch packages on training_platform "
"(see https://fb.workplace.com/groups/pyper/permalink/808155810065803/). "
"You need to split up your changes as follows:\n"
"\t1. Edit your diff so it only contains the changes as optional, and not any usage of the"
" new optional changes.\n"
"\t2. Edit FIXED_EXTERN_API in this test so your diff passes the test.\n"
"\t3. Land your diff and wait for the diff to be picked up by the production version of"
" fbpkg training_platform.\n"
"\t4. Once step 3. is complete, you can push the rest of your changes that use the new"
" changes."
)
class Net(torch.nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(1, 10, bias=False)
self.permute_pooled_embeddings = PermutePooledEmbeddings(
[2, 3, 1, 4], [3, 0, 2, 1]
)
self.fc2 = torch.nn.Linear(10, 1, bias=False)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.permute_pooled_embeddings(x)
x = self.fc2(x)
return x
# @parameterized_class([{"device_type": "cpu"}, {"device_type": "cuda"}])
class PooledEmbeddingModulesTest(unittest.TestCase):
@settings(deadline=10000, suppress_health_check=suppressed_list)
# pyre-fixme[56]: Pyre was not able to infer the type of argument
@given(device_type=cpu_and_maybe_gpu())
def setUp(self, device_type: torch.device) -> None:
self.device = device_type
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation(self) -> None:
net = Net().to(self.device)
input = torch.Tensor([range(10)]).to(self.device)
self.assertEqual(
net.permute_pooled_embeddings(input).view(10).tolist(),
[6, 7, 8, 9, 0, 1, 5, 2, 3, 4],
)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation_autograd(self) -> None:
net = Net().to(self.device)
input = torch.randn(2, 1).to(self.device)
input_sum = input.sum().item()
output = net(input)
output.sum().backward()
# check grads for fc1 when permuted, equals to fc2 weights times input_sum
# pyre-fixme[16]: Optional type has no attribute `view`.
permute_res = net.permute_pooled_embeddings(net.fc1.weight.grad.view(1, 10))
permute_ref = input_sum * net.fc2.weight
torch.testing.assert_close(permute_res, permute_ref, rtol=1e-03, atol=1e-03)
def test_compatibility(self) -> None:
members = inspect.getmembers(sys.modules[INTERN_MODULE])
for name, clazz in members:
if getattr(clazz, "__module__", None) != INTERN_MODULE:
continue
self.assertIn(name, FIXED_EXTERN_API.keys(), FWD_COMPAT_MSG)
for fn, fixed_params in FIXED_EXTERN_API[name].items():
current_params = inspect.getfullargspec(getattr(clazz, fn)).args
self.assertEqual(
fixed_params,
current_params,
msg=f"\nForward incompatible change in {name} : {fn}\n\n"
f"{FWD_COMPAT_MSG}",
)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_pooled_table_batched_embedding(self) -> None:
num_emb_bags = 5
num_embeddings = 10
embedding_dims = [1, 2, 3, 4, 5]
emb_weight_range = 1
embedding_bags = [
nn.EmbeddingBag(
num_embeddings=num_embeddings,
embedding_dim=embedding_dims[i],
mode="sum",
sparse=True,
)
for i in range(num_emb_bags)
]
for emb_bag in embedding_bags:
torch.nn.init.uniform_(
emb_bag.weight,
-emb_weight_range,
emb_weight_range,
)
indices = [[0], [1, 2], [0, 1, 2], [3, 6], [8]]
indices = [torch.tensor(i).view(-1, len(i)) for i in indices]
pooled_embs = [emb_bag(indices[i]) for i, emb_bag in enumerate(embedding_bags)]
cat_pooled_embs = torch.cat(pooled_embs, dim=1)
permute_order = [2, 1, 3, 0, 4]
permute_pooled_embeddings = PermutePooledEmbeddings(
embedding_dims,
permute_order,
device=self.device,
)
permuted_pooled_emb = permute_pooled_embeddings(cat_pooled_embs.to(self.device))
ref_permuted_pooled_emb = [pooled_embs[i] for i in permute_order]
ref_permuted_pooled_emb = torch.cat(ref_permuted_pooled_emb, dim=1)
assert torch.allclose(
ref_permuted_pooled_emb.to(self.device), permuted_pooled_emb
)
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_permutation_autograd_meta(self) -> None:
"""
Test that permute_pooled_embeddings_autograd works with meta tensor and
dynamo export mode
"""
input = torch.randn(2, 1)
net = Net()
output_cpu = net(input)
output_meta = net.to("meta")(input.to("meta"))
assert output_meta.shape == output_cpu.shape
assert input.shape == output_meta.shape
@unittest.skipIf(True, "Skip until FailedHealthCheck is fixed")
def test_duplicate_permutations(self) -> None:
embs_dims = [2, 3, 1, 4]
permute = [3, 0, 2, 0, 1, 3]
expected_result = [6, 7, 8, 9, 0, 1, 5, 0, 1, 2, 3, 4, 6, 7, 8, 9]
input = torch.Tensor([range(10)]).to(device="cuda")
_permute = torch.tensor(permute, device=self.device, dtype=torch.int64)
_offset_dim_list = torch.tensor(
[0] + list(accumulate(embs_dims)), device=self.device, dtype=torch.int64
)
inv_permute: List[int] = [0] * len(permute)
for i, p in enumerate(permute):
inv_permute[p] = i
_inv_permute = torch.tensor(inv_permute, device=self.device, dtype=torch.int64)
inv_embs_dims = [embs_dims[i] for i in permute]
_inv_offset_dim_list = torch.tensor(
[0] + list(accumulate(inv_embs_dims)),
device=self.device,
dtype=torch.int64,
)
result = torch.ops.fbgemm.permute_duplicate_pooled_embs_auto_grad(
input,
_offset_dim_list.to(device=input.device),
_permute.to(device=input.device),
_inv_offset_dim_list.to(device=input.device),
_inv_permute.to(device=input.device),
)
self.assertEqual(
result.view(16).tolist(),
expected_result,
)
input = input.to(device="cpu")
result = torch.ops.fbgemm.permute_duplicate_pooled_embs_auto_grad(
input,
_offset_dim_list.to(device=input.device),
_permute.to(device=input.device),
_inv_offset_dim_list.to(device=input.device),
_inv_permute.to(device=input.device),
)
self.assertEqual(
result.view(16).tolist(),
expected_result,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import random
import unittest
from math import sqrt
from typing import List, Tuple
import fbgemm_gpu.batched_unary_embeddings_ops as batched_unary_embeddings_ops
import numpy as np
import torch
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import gpu_unavailable
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
from fbgemm_gpu.test.test_utils import gpu_unavailable
# Relative tolerances
TOLERANCE_REL = {
torch.float32: 1e-4,
torch.float16: 1e-2,
torch.bfloat16: 0.1,
}
# Absolute tolerances
TOLERANCE_ABS = {
torch.float32: 1e-4,
torch.float16: 1e-2,
torch.bfloat16: 1e-2,
}
class TableBatchedEmbeddingsTest(unittest.TestCase):
class RefEmb(torch.nn.Module):
def __init__(self, num_tasks: int, hash_sizes: List[int]) -> None:
super().__init__()
self.num_tasks = num_tasks
self.hash_sizes = hash_sizes
self.emb_modules = torch.nn.ModuleList()
for _ in range(num_tasks):
for h in self.hash_sizes:
emb = torch.nn.EmbeddingBag(
num_embeddings=h,
embedding_dim=1,
mode="sum",
sparse=False,
include_last_offset=True,
)
emb.weight = torch.nn.Parameter(
torch.empty([h, 1]).uniform_(-sqrt(1 / h), sqrt(1 / h))
)
self.emb_modules.append(emb)
def forward(
self, offsets: List[torch.Tensor], indices: List[torch.Tensor]
) -> torch.Tensor:
tt_list = []
for n in range(self.num_tasks):
t_list = []
for i in range(len(self.hash_sizes)):
t = self.emb_modules[n * len(self.hash_sizes) + i](
offsets=offsets[i].long(), input=indices[i].long()
)
t_list.append(t)
tt = torch.cat(t_list, dim=1)
tt_list.append(tt)
return torch.cat(tt_list).view(self.num_tasks, -1, len(self.hash_sizes))
def _generate_unary_features(
self, batch_size: int, num_embeddings: int
) -> Tuple[List, List, List]:
lengths = []
offsets = []
indices = []
offset = 0
for _ in range(batch_size):
n_indices = 1
indices += np.round(
np.random.random(n_indices) * (num_embeddings - 1)
).tolist()
offsets.append(offset)
offset += 1
lengths.append(n_indices)
offsets.append(offset)
return (lengths, offsets, indices)
def _test_main(
self,
gpu_infer: bool,
torch_compile: bool = False,
) -> None:
if gpu_infer:
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
batch_size = 128
hash_sizes = [100, 200]
num_tasks = 3
emb_dtype = random.choice([torch.float, torch.half, torch.bfloat16])
# generate unary features
lengths = []
offsets = []
indices = []
for h in hash_sizes:
l, o, i = self._generate_unary_features(batch_size, h)
lengths.append(torch.IntTensor(l).to(device))
offsets.append(torch.IntTensor(o).to(device))
indices.append(torch.IntTensor(i).to(device))
lengths_tensor = torch.cat(lengths)
indices_tensor = torch.cat(indices)
offsets_tensor = torch.zeros(
lengths_tensor.numel() + 1,
dtype=lengths_tensor.dtype,
device=lengths_tensor.device,
)
offsets_tensor[1:] = torch.ops.fbgemm.asynchronous_inclusive_cumsum(
lengths_tensor.view(-1)
)
# forward with int_32
ref_emb = self.RefEmb(num_tasks, hash_sizes).to(device).to(emb_dtype)
unary_emb = (
batched_unary_embeddings_ops.BatchedUnaryEmbeddingBag(num_tasks, hash_sizes)
.to(device)
.to(emb_dtype)
)
for i, param in enumerate(unary_emb.split_embedding_weights()):
param.detach().copy_(ref_emb.emb_modules[i].weight)
output_ref = ref_emb(offsets, indices)
if torch_compile:
unary_emb = torch.compile(unary_emb, dynamic=True, fullgraph=True)
output = unary_emb(offsets_tensor, indices_tensor)
torch.testing.assert_close(
output_ref,
output,
atol=TOLERANCE_ABS[emb_dtype],
rtol=TOLERANCE_REL[emb_dtype],
)
# forward with int_64
ref_emb = self.RefEmb(num_tasks, hash_sizes).to(device).to(emb_dtype)
unary_emb = (
batched_unary_embeddings_ops.BatchedUnaryEmbeddingBag(
num_tasks=num_tasks, hash_sizes=hash_sizes, long_index=True
)
.to(device)
.to(emb_dtype)
)
for i, param in enumerate(unary_emb.split_embedding_weights()):
param.detach().copy_(ref_emb.emb_modules[i].weight)
output_ref = ref_emb(offsets, indices)
if torch_compile:
unary_emb = torch.compile(unary_emb, dynamic=True, fullgraph=True)
output = unary_emb(offsets_tensor.long(), indices_tensor.long())
torch.testing.assert_close(
output_ref,
output,
atol=TOLERANCE_ABS[emb_dtype],
rtol=TOLERANCE_REL[emb_dtype],
)
# No implementation for CPU backprop yet
if not gpu_infer:
return
# FIXME: the following doesn't work
# with torch.compile-d unary_emb
if torch_compile:
return
d_output = (
torch.randn([num_tasks, batch_size, len(hash_sizes)]).to(device) * 0.1
)
output_ref.backward(d_output)
output.backward(d_output)
d_weight_ref = []
for emb in ref_emb.emb_modules:
d_weight_ref.append(emb.weight.grad)
d_weight_ref = torch.cat(d_weight_ref).view(num_tasks, sum(hash_sizes), -1)
d_weight = unary_emb.weight.grad # pyre-ignore[16]
torch.testing.assert_close(
d_weight_ref,
d_weight,
atol=TOLERANCE_ABS[emb_dtype],
rtol=TOLERANCE_REL[emb_dtype],
)
# Testing the case where we add permute operation, which produces
# in contiguous grad tensor, this should also work
unary_embedding_module = batched_unary_embeddings_ops.BatchedUnaryEmbeddingBag(
num_tasks=3,
hash_sizes=[71, 107],
long_index=True,
).to(device)
offsets = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7], dtype=torch.long).to(device)
values = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8], dtype=torch.long).to(device)
for _ in range(10):
output = unary_embedding_module(offsets, values).transpose(1, 0)
output = output[1:]
output.sum().backward()
@unittest.skipIf(*gpu_unavailable)
def test_gpu(self) -> None:
self._test_main(gpu_infer=True)
# the test below fails with CUDA error in the OSS CI
# likely to the CUDA IMA issues in the test_gpu above
# commenting out for now
# @unittest.skipIf(*gpu_unavailable)
# def test_gpu_torch_compile(self) -> None:
# self._test_main(gpu_infer=True, torch_compile=True)
def test_cpu(self) -> None:
self._test_main(gpu_infer=False)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Optional, Tuple
import hypothesis.strategies as st
import torch
from fbgemm_gpu.quantize_comm import none_throws, QuantizedCommCodec
from fbgemm_gpu.split_embedding_configs import SparseType
from hypothesis import assume, given, settings
class QuantizedCommCodecTest(unittest.TestCase):
@settings(deadline=4000)
# pyre-ignore
@given(
comm_precisions_loss_scale=st.sampled_from(
[
(SparseType.FP32, None),
(SparseType.FP16, None),
(SparseType.FP16, 4.0),
(SparseType.BF16, None),
(SparseType.BF16, 2.0),
(SparseType.FP8, None),
(SparseType.FP8, 3.0),
(SparseType.INT8, None),
]
),
row_size=st.integers(4, 256),
col_size=st.integers(4, 256),
rand_seed=st.integers(0, 65534),
row_dim=st.sampled_from([-1, 4, 8, 16, 32]),
)
def test_quantized_comm_codec(
self,
comm_precisions_loss_scale: Tuple[SparseType, Optional[float]],
row_size: int,
col_size: int,
rand_seed: int,
row_dim: int,
) -> None:
(comm_precision, loss_scale) = comm_precisions_loss_scale
if comm_precision == SparseType.FP8:
if row_dim > 0:
assume((col_size * row_size) % row_dim == 0)
assume(col_size % 4 == 0)
torch.manual_seed(rand_seed)
shape = (row_size, col_size)
input_tensor = torch.rand(shape, requires_grad=True)
cur_row_dim = None
if (
comm_precision == SparseType.FP8
and torch.cuda.device_count() != 0
and row_dim > 0
):
cur_row_dim = row_dim
input_tensor = input_tensor.view(-1).cuda()
quant_codec = QuantizedCommCodec(
comm_precision, loss_scale, row_dim=cur_row_dim
)
ctx = quant_codec.create_context()
if comm_precision == SparseType.INT8:
ctx = none_throws(ctx)
assume(row_size * col_size % ctx.row_dim == 0)
input_tensor = input_tensor.view(-1)
quant_tensor = quant_codec.encode(input_tensor, ctx)
self.assertEqual(
quant_tensor.numel(),
quant_codec.calc_quantized_size(input_tensor.numel(), ctx),
)
output_tensor = quant_codec.decode(quant_tensor, ctx)
self.assertEqual(output_tensor.shape, input_tensor.shape)
rtol = 0.005
atol = 0.005
if comm_precision == SparseType.FP8:
rtol = 0.05
atol = 0.05
torch.testing.assert_close(
input_tensor.detach().cpu(),
output_tensor.detach().cpu(),
rtol=rtol,
atol=atol,
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import fbgemm_gpu.metrics
import hypothesis.strategies as st
import torch
from hypothesis import given, settings
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:metric_ops")
class MetricOpsTest(unittest.TestCase):
@unittest.skipIf(
True,
"Test is sometimes failed due to issues with Flaky. Skipping until the issues are resolved. ",
)
# pyre-ignore [56]
@given(
n_tasks=st.integers(1, 5),
batch_size=st.integers(1, 1024),
dtype=st.sampled_from([torch.half, torch.float, torch.double]),
)
@settings(max_examples=20, deadline=None)
def test_auc(self, n_tasks: int, batch_size: int, dtype: torch.dtype) -> None:
predictions = torch.randint(0, 1000, (n_tasks, batch_size)).to(dtype).cuda()
labels = torch.randint(0, 1000, (n_tasks, batch_size)).to(dtype).cuda() / 1000.0
weights = torch.rand(n_tasks, batch_size).to(dtype).cuda()
compute_auc = fbgemm_gpu.metrics.Auc()
output_ref = compute_auc(n_tasks, predictions, labels, weights)
output = fbgemm_gpu.metrics.auc(n_tasks, predictions, labels, weights)
# Explicitly convert type based on output_ref's dtype
output = output.to(output_ref.dtype)
# Test correctness only if output_ref does not product nan or inf
if not (torch.isnan(output_ref).any() or torch.isinf(output_ref).any()):
torch.testing.assert_close(
output_ref,
output,
rtol=1e-2 if dtype == torch.half else None,
atol=1e-2 if dtype == torch.half else None,
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import itertools
import random
import unittest
from typing import List, Tuple
import hypothesis.strategies as st
import numpy as np
import torch
import torch._dynamo
from hypothesis import assume, given, settings, Verbosity
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import (
gpu_available,
gpu_unavailable,
on_arm_platform,
running_on_github,
symint_vector_unsupported,
TEST_WITH_ROCM,
)
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
from fbgemm_gpu.test.test_utils import (
gpu_available,
gpu_unavailable,
on_arm_platform,
running_on_github,
symint_vector_unsupported,
TEST_WITH_ROCM,
)
def lengths_to_segment_ids(lengths: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(
torch._dim_arange(lengths, 0).long(),
lengths.long(),
)
# Converts lengths + values format to COO format
# [B], [N] -> [B, N'].
# pyre-ignore Missing return annotation [3]
def var_list_to_coo_1d(
lengths: torch.Tensor,
values: torch.Tensor,
N: int,
):
rows = lengths_to_segment_ids(lengths)
num_rows = lengths.size()[0]
# This does D&H sync
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
output_size = lengths.sum()
# This does D&H sync
cols = torch.ops.fbgemm.offsets_range(offsets, output_size)
indices = torch.stack([rows, cols])
dims = [num_rows, N]
# torch.sparse_coo_tensor is not supported by torch.fx, wrap it.
return torch.sparse_coo_tensor(
indices=indices,
values=values,
size=dims,
)
# Converts lengths + values format to COO format
# [B], [N, D] -> [B, N', D].
# pyre-ignore Missing return annotation [3]
def var_list_to_coo(lengths: torch.Tensor, values: torch.Tensor, N: int, D: int):
rows = lengths_to_segment_ids(lengths)
num_rows = lengths.size()[0]
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
output_size = lengths.sum()
# This does D&H sync
cols = torch.ops.fbgemm.offsets_range(offsets, output_size)
indices = torch.stack([rows, cols])
dims = [num_rows, N, D]
# torch.sparse_coo_tensor is not supported by torch.fx, wrap it.
return torch.sparse_coo_tensor(
indices=indices,
values=values,
size=dims,
)
def hash_size_cumsum_to_offsets(hash_size_cum_sum_list: List[int]) -> List[int]:
hash_size_offsets_list = [0]
count = 0
for f in range(1, len(hash_size_cum_sum_list)):
count = count + 1
if hash_size_cum_sum_list[f] == hash_size_cum_sum_list[f - 1]:
curr_offsets = hash_size_offsets_list[-1]
hash_size_offsets_list.append(curr_offsets)
else:
hash_size_offsets_list.append(count)
hash_size_offsets_list[-1] = count
return hash_size_offsets_list
class JaggedTensorOpsTest(unittest.TestCase):
def setUp(self) -> None:
if symint_vector_unsupported()[0]:
return
assert hasattr(
torch._dynamo.config, "assume_static_by_default"
), "Need to update the config as the dynamic/auto-dynamic setting has changed"
# Turn off static assumption for auto-dynamic
torch._dynamo.config.assume_static_by_default = False
@staticmethod
def expand_into_jagged_permute_ref_(
permute: List[int],
length: List[int],
) -> List[int]:
offsets = [0] + list(itertools.accumulate(length))
output_permute = []
for r in permute:
output_permute.extend(
range(
offsets[r],
offsets[r + 1],
)
)
return output_permute
@given(
T=st.integers(min_value=10, max_value=20),
W=st.integers(min_value=8, max_value=64),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_expand_into_jagged_permute(
self,
T: int,
W: int,
) -> None:
length_per_w = [random.randint(5000, 10000) for i in range(W)]
length_1d = list(
itertools.chain.from_iterable(itertools.repeat(x, T) for x in length_per_w)
)
permute_list = list(range(T * W))
random.shuffle(permute_list)
permuted_length_1d = [length_1d[r] for r in permute_list]
permute_tensor = torch.tensor(permute_list)
# compute offsets
offsets_1d = [0] + list(itertools.accumulate(length_1d))
permuted_offsets_1d = [0] + list(itertools.accumulate(permuted_length_1d))
offsets_1d_tensor = torch.tensor(offsets_1d)
permuted_offsets_1d_tensor = torch.tensor(permuted_offsets_1d)
# cpu op
output_permute_cpu = torch.ops.fbgemm.expand_into_jagged_permute(
permute_tensor,
offsets_1d_tensor,
permuted_offsets_1d_tensor,
offsets_1d[-1],
)
# reference solution
output_permute_ref = self.expand_into_jagged_permute_ref_(
permute_list,
length_1d,
)
output_permute_ref_tensor = torch.tensor(output_permute_ref)
# assert cpu and gpu ops
torch.testing.assert_close(output_permute_cpu, output_permute_ref_tensor)
if gpu_available:
# gpu op
output_permute_gpu = torch.ops.fbgemm.expand_into_jagged_permute(
permute_tensor.cuda(),
offsets_1d_tensor.cuda(),
permuted_offsets_1d_tensor.cuda(),
offsets_1d[-1],
)
torch.testing.assert_close(
output_permute_gpu.cpu(), output_permute_ref_tensor
)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(min_value=1, max_value=128),
D=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=200),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
)
def test_jagged_2d_to_dense(
self,
B: int,
D: int,
max_sequence_length: int,
dtype: torch.dtype,
) -> None:
D = D * 4
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.rand(total_lengths, D)
ref_output_values = var_list_to_coo(
lengths,
ref_values,
max_sequence_length,
D,
).to_dense()
ref_output_values = ref_output_values.to(dtype)
# test cpu forward
values = ref_values.clone().to(dtype).detach().requires_grad_(True)
output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
if torch.cuda.is_available():
# test gpu forward
ref_values = ref_values.cuda()
values = ref_values.clone().to(dtype).detach().requires_grad_(True)
offsets = offsets.cuda()
ref_output_values = ref_output_values.cuda()
output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
# test gpu backward
output_values.backward(ref_output_values)
ref_values = ref_values.to(dtype)
torch.testing.assert_close(ref_values, values.grad)
def test_jagged_2d_to_dense_truncation(self) -> None:
# Test the case where max_sequence_length < max(lengths[i])
lengths_ = np.array([2, 3, 0, 1])
lengths = torch.from_numpy(lengths_)
total_lengths = lengths_.sum()
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
embedding_dim = 16
max_sequence_length = 2
ref_values = torch.rand(total_lengths, embedding_dim)
ref_output_values = var_list_to_coo(
lengths,
ref_values,
3,
embedding_dim,
).to_dense()[:, :max_sequence_length, :]
# test cpu forward
values = ref_values.clone().detach().requires_grad_(True)
output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
if torch.cuda.is_available():
# test gpu forward
ref_values = ref_values.cuda()
values = ref_values.clone().detach().requires_grad_(True)
offsets = offsets.cuda()
ref_output_values = ref_output_values.cuda()
output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
# test gpu backward
expected_grad = ref_values
expected_grad[4, :] = 0 # due to truncation
expected_grad = expected_grad.cuda()
output_values.backward(ref_output_values)
torch.testing.assert_close(expected_grad, values.grad)
@unittest.skipIf(*symint_vector_unsupported())
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(min_value=2, max_value=128),
D=st.integers(min_value=2, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=200),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
def test_jagged_2d_to_dense_dynamic_shape(
self,
B: int,
D: int,
max_sequence_length: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
D = D * 4
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.rand(total_lengths, D)
ref_output_values = var_list_to_coo(
lengths,
ref_values,
max_sequence_length,
D,
).to_dense()
ref_output_values = ref_output_values.to(dtype)
ref_values = ref_values.to(device_type)
values = ref_values.clone().to(dtype).detach().requires_grad_(True)
offsets = offsets.to(device_type)
ref_output_values = ref_output_values.to(device_type)
output_values = torch.compile(
torch.ops.fbgemm.jagged_2d_to_dense, dynamic=True, fullgraph=True
)(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(ref_output_values, output_values)
output_values.backward(ref_output_values)
ref_values = ref_values.to(dtype)
torch.testing.assert_close(ref_values, values.grad)
@unittest.skipIf(*gpu_unavailable)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
T=st.integers(min_value=1, max_value=5),
B=st.integers(min_value=1, max_value=64),
D=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=300),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
def test_stacked_jagged_2d_to_dense(
self,
T: int,
B: int,
D: int,
max_sequence_length: int,
use_cpu: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
D = D * 4
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B * T)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_).to(device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.rand(total_lengths, D, device=device)
ref_output_values = var_list_to_coo(
lengths,
ref_values,
max_sequence_length,
D,
).to_dense()
lengths = lengths.view(T, B)
values = ref_values.clone().detach().requires_grad_(True)
output_values_per_table = torch.ops.fbgemm.stacked_jagged_2d_to_dense(
values=values,
lengths=lengths,
offset_per_key=[0]
+ np.cumsum([lengths[t].sum().item() for t in range(T)]).tolist(),
max_lengths_per_key=[max_sequence_length] * T,
)
ref_output_values = torch.ops.fbgemm.jagged_2d_to_dense(
values=ref_values,
offsets=offsets,
max_sequence_length=max_sequence_length,
)
torch.testing.assert_close(
ref_output_values, torch.cat(output_values_per_table)
)
# test backward
output_values = torch.cat(output_values_per_table)
output_values.backward(ref_output_values)
torch.testing.assert_close(ref_values, values.grad)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=500),
padding_value=st.integers(min_value=-100000, max_value=100000),
)
def test_jagged_1d_to_dense(
self,
B: int,
max_sequence_length: int,
padding_value: int,
) -> None:
def lengths_to_segment_ids(lengths: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(
torch._dim_arange(lengths, 0).long(),
lengths.long(),
)
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.randint(low=0, high=1000000000, size=(total_lengths,))
ref_values_mask = var_list_to_coo_1d(
lengths, torch.ones_like(ref_values), max_sequence_length
).to_dense()
ref_output_values = (
var_list_to_coo_1d(
lengths,
ref_values,
max_sequence_length,
).to_dense()
+ (1 - ref_values_mask) * torch.ones_like(ref_values_mask) * padding_value
)
# test cpu forward
values = ref_values.clone().detach().requires_grad_(False)
output_values = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
padding_value=padding_value,
)
torch.testing.assert_close(ref_output_values, output_values)
if torch.cuda.is_available():
# test gpu forward
ref_values = ref_values.cuda()
values = ref_values.clone().detach().requires_grad_(False)
offsets = offsets.cuda()
ref_output_values = ref_output_values.cuda()
output_values = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
padding_value=padding_value,
)
torch.testing.assert_close(ref_output_values, output_values)
def test_jagged_1d_to_dense_truncation(self) -> None:
lengths_ = np.array([1, 3, 0, 1])
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.from_numpy(np.array([100, 3, 4, 5, 6]))
ref_output = torch.from_numpy(np.array([100, 3, -1, 6])).reshape(-1, 1)
# test cpu forward
values = ref_values.clone().detach().requires_grad_(False)
output = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=1,
padding_value=-1,
)
torch.testing.assert_close(ref_output, output)
if torch.cuda.is_available():
# test gpu forward
ref_values = ref_values.cuda()
values = ref_values.clone().detach().requires_grad_(False)
offsets = offsets.cuda()
ref_output = ref_output.cuda()
output = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=offsets,
max_sequence_length=1,
padding_value=-1,
)
torch.testing.assert_close(ref_output, output)
@unittest.skipIf(*symint_vector_unsupported())
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=500),
padding_value=st.integers(min_value=-100000, max_value=100000),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
def test_jagged_1d_to_dense_dynamic_shape(
self, B: int, max_sequence_length: int, padding_value: int, device_type: str
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
def lengths_to_segment_ids(lengths: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(
torch._dim_arange(lengths, 0).long(),
lengths.long(),
)
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
ref_values = torch.randint(low=0, high=1000000000, size=(total_lengths,))
ref_values_mask = var_list_to_coo_1d(
lengths, torch.ones_like(ref_values), max_sequence_length
).to_dense()
ref_output_values = (
var_list_to_coo_1d(
lengths,
ref_values,
max_sequence_length,
).to_dense()
+ (1 - ref_values_mask) * torch.ones_like(ref_values_mask) * padding_value
)
ref_values = ref_values.to(device_type)
values = ref_values.clone().detach().requires_grad_(False)
offsets = offsets.to(device_type)
ref_output_values = ref_output_values.to(device_type)
output_values = torch.compile(
torch.ops.fbgemm.jagged_1d_to_dense, dynamic=True, fullgraph=True
)(
values=values,
offsets=offsets,
max_sequence_length=max_sequence_length,
padding_value=padding_value,
)
torch.testing.assert_close(ref_output_values, output_values)
@unittest.skipIf(*gpu_unavailable)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
T=st.integers(min_value=1, max_value=20),
B=st.integers(min_value=1, max_value=128),
max_sequence_length=st.integers(min_value=1, max_value=500),
padding_value=st.integers(min_value=-100000, max_value=100000),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
def test_stacked_jagged_1d_to_dense(
self,
T: int,
B: int,
max_sequence_length: int,
padding_value: int,
use_cpu: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
def lengths_to_segment_ids(lengths: torch.Tensor) -> torch.Tensor:
return torch.repeat_interleave(
torch._dim_arange(lengths, 0).long(),
lengths.long(),
)
lengths_ = np.random.randint(low=0, high=max_sequence_length, size=B * T)
total_lengths = lengths_.sum()
lengths = torch.from_numpy(lengths_).to(device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
lengths = lengths.view(T, B)
ref_values = torch.randint(
low=0, high=1000000000, size=(total_lengths,), device=device
)
values = ref_values.clone().detach().requires_grad_(False)
output_values_per_table = torch.ops.fbgemm.stacked_jagged_1d_to_dense(
values=values,
lengths=lengths,
offset_per_key=[0]
+ np.cumsum([lengths[t].sum().item() for t in range(T)]).tolist(),
max_lengths_per_key=[max_sequence_length] * T,
padding_value=padding_value,
)
ref_output_values = torch.ops.fbgemm.jagged_1d_to_dense(
values=ref_values,
offsets=offsets,
max_sequence_length=max_sequence_length,
padding_value=padding_value,
)
torch.testing.assert_close(
ref_output_values, torch.cat(output_values_per_table)
)
def _to_padded_dense(
self,
values: torch.Tensor,
offsets: List[torch.LongTensor],
max_lengths: np.ndarray,
padding_value: float = 0,
) -> torch.Tensor:
outer_dense_size = len(offsets[0]) - 1
# canonicalize by unsqueeze the last dim if the inner dense dimension
# is 1 and folded.
inner_dense_size = 1 if values.ndim == 1 else values.size(-1)
dense = torch.empty(
(outer_dense_size,) + tuple(max_lengths) + (inner_dense_size,),
dtype=values.dtype,
device=values.device,
)
for i in range(outer_dense_size):
for jagged_coord in itertools.product(
*(list(range(max_l)) for max_l in max_lengths)
):
cur_offset = i
is_zero = False
for d in range(len(max_lengths)):
begin = offsets[d][cur_offset].item()
end = offsets[d][cur_offset + 1].item()
# pyre-fixme[6]: For 1st param expected `int` but got
# `Union[bool, float, int]`.
if jagged_coord[d] >= end - begin:
is_zero = True
break
cur_offset = begin + jagged_coord[d]
dense[(i,) + jagged_coord] = (
padding_value if is_zero else values[cur_offset]
)
return dense.squeeze(-1) if values.ndim == 1 else dense
# TODO: reuse this code in test_(stacked)_jagged_1/2d
def _generate_jagged_tensor(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device: torch.device,
fold_inner_dense: bool = False,
# dynamo to mark the input as dynamic shape to make sure symbolic
# shape is generated
mark_dynamic: bool = False,
) -> Tuple[torch.Tensor, List[torch.LongTensor], np.ndarray]:
max_lengths = np.random.randint(low=1, high=10, size=(num_jagged_dim,))
x_offsets: List[torch.LongTensor] = []
num_lengths = outer_dense_size
for d in range(num_jagged_dim):
# Sometimes length[i] exceed max_L meaning jagged->dense will be
# truncation vs. padding
lengths = torch.randint(
# PT2 specialize 0/1 dims as non-symbolic shape. So we need
# to make it non 0/1 for testing. In real cases it'll likelyl
# not 0/1 anyway (if so, they'll be recompiled)
low=0 if not mark_dynamic else 1,
high=max_lengths[d] * 2,
# pyre-fixme[6]: For 3rd param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Tuple[Union[bool, float, int]]`.
size=(num_lengths,),
device=device,
)
offset = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
if mark_dynamic:
torch._dynamo.mark_dynamic(offset, 0)
x_offsets.append(offset)
num_lengths = x_offsets[-1][-1].item()
x_values = torch.rand(
# pyre-fixme[6]: For 1st param expected `Union[List[int], Size,
# typing.Tuple[int, ...]]` but got `Tensor`.
x_offsets[-1][-1] * inner_dense_size,
dtype=dtype,
device=device,
)
if inner_dense_size != 1 or not fold_inner_dense:
# pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, float, int]`.
x_values = x_values.reshape(x_offsets[-1][-1].item(), inner_dense_size)
if mark_dynamic:
for i in range(inner_dense_size):
torch._dynamo.mark_dynamic(x_values, i)
return x_values, x_offsets, max_lengths
def _test_dense_to_jagged(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
# Generate multi-dim jagged tensor
device = torch.device(device_type)
values_2d, offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
values_2d = values_2d.clone().detach().requires_grad_(True)
# jagged -> dense
dense = torch.ops.fbgemm.jagged_to_padded_dense(values_2d, offsets, max_lengths)
# dense -> jagged (op which is being tested)
if precompute_total_L:
total_L = values_2d.size(0)
jagged_values, jagged_offsets = torch.ops.fbgemm.dense_to_jagged(
dense, offsets, total_L
)
else:
jagged_values, jagged_offsets = torch.ops.fbgemm.dense_to_jagged(
dense, offsets
)
# jagged -> dense
dense2 = torch.ops.fbgemm.jagged_to_padded_dense(
jagged_values, jagged_offsets, max_lengths
)
# verify forward
torch.testing.assert_close(dense, dense2)
# verify backward
dense.retain_grad()
ref_output_values = jagged_values.clone().detach().requires_grad_(True)
ref_values = dense.clone().detach().requires_grad_(True)
jagged_values.backward(ref_output_values)
torch.testing.assert_close(dense.grad, ref_values)
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(0, 5),
inner_dense_size=st.integers(0, 5),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
precompute_total_L=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_dense_to_jagged(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
self._test_dense_to_jagged(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
device_type,
precompute_total_L,
)
@unittest.skipIf(*gpu_unavailable)
@given(
num_jagged_dim=st.just(1),
outer_dense_size=st.integers(0, 6000),
inner_dense_size=st.sampled_from([8, 16, 23, 24, 48, 50, 64, 72, 96, 192]),
dtype=st.just(torch.half),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
precompute_total_L=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_dense_to_jagged_opt(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
self._test_dense_to_jagged(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
device_type,
precompute_total_L,
)
# (8000+1) * 8 (size of the element of LongTensor/int64_t offsets)
# = ~62.5KB > 48KB default shared memory on V100/A100.
@unittest.skipIf(*gpu_unavailable)
@given(
num_jagged_dim=st.just(1),
outer_dense_size=st.just(8000),
inner_dense_size=st.just(16),
dtype=st.just(torch.half),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
precompute_total_L=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=1, deadline=None)
def test_dense_to_jagged_opt_large_batch(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
self._test_dense_to_jagged(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
device_type,
precompute_total_L,
)
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(0, 5),
inner_dense_size=st.integers(0, 5),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
device_type=st.sampled_from(["meta"]),
precompute_total_L=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_dense_to_jagged_meta_backend(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
precompute_total_L: bool,
) -> None:
device = torch.device("cpu")
values_2d, offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
values_2d = values_2d.clone().detach().requires_grad_(True)
# jagged -> dense
dense = torch.ops.fbgemm.jagged_to_padded_dense(values_2d, offsets, max_lengths)
# dense -> jagged (op which is being tested)
if precompute_total_L:
total_L = values_2d.size(0)
dense.to(device_type)
jagged_values, jagged_offsets = torch.ops.fbgemm.dense_to_jagged(
dense, offsets, total_L
)
else:
dense.to(device_type)
jagged_values, jagged_offsets = torch.ops.fbgemm.dense_to_jagged(
dense, offsets
)
jagged_values.to(device_type)
# jagged -> dense
dense2 = torch.ops.fbgemm.jagged_to_padded_dense(
jagged_values, jagged_offsets, max_lengths
)
# verify forward
assert dense.size() == dense2.size()
@unittest.skipIf(*symint_vector_unsupported())
@given(
num_jagged_dim=st.integers(1, 5),
# TODO: size = 0/1 will be incorrectly specialized
outer_dense_size=st.integers(2, 5),
inner_dense_size=st.integers(2, 5),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_dense_to_jagged_dynamic_shape(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
values_2d, offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
torch.device(device_type),
mark_dynamic=True,
)
values_2d = values_2d.clone().detach().requires_grad_(True)
@torch.compile(fullgraph=True, dynamic=True)
def jagged_to_dense(
values: torch.Tensor, offsets: torch.Tensor, max_lengths: List[int]
) -> torch.Tensor:
return torch.ops.fbgemm.jagged_to_padded_dense(values, offsets, max_lengths)
# jagged -> dense
dense = jagged_to_dense(values_2d, offsets, max_lengths.tolist())
# dense -> jagged, it is required to pre-compute totalL
total_L = values_2d.size(0)
dense = dense.clone().detach().to(device_type)
torch._dynamo.mark_dynamic(dense, 0)
torch._dynamo.mark_dynamic(dense, -1)
@torch.compile(fullgraph=True, dynamic=True)
def dense_to_jagged_withL(
dense: torch.Tensor, offsets: torch.Tensor, total_L: List[int]
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.fbgemm.dense_to_jagged(dense, offsets, total_L)
@torch.compile(fullgraph=False, dynamic=True)
def dense_to_jagged_noL(
dense: torch.Tensor, offsets: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.fbgemm.dense_to_jagged(dense, offsets)
jagged_values, jagged_offsets = dense_to_jagged_noL(dense, offsets)
jagged_values, jagged_offsets = dense_to_jagged_withL(dense, offsets, total_L)
jagged_values.to(device_type)
# jagged -> dense
dense2 = torch.ops.fbgemm.jagged_to_padded_dense(
jagged_values, jagged_offsets, max_lengths
)
# verify forward
assert dense.size() == dense2.size()
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(0, 5),
inner_dense_size=st.integers(0, 5),
fold_inner_dense=st.booleans(),
padding_value=st.sampled_from([0, -1e-8]),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_to_padded_dense(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
fold_inner_dense: bool,
padding_value: float,
dtype: torch.dtype,
device_type: str,
) -> None:
# CPU doesn't support bfloat16
assume(device_type != "cpu" or dtype != torch.bfloat16)
assume(not fold_inner_dense or inner_dense_size == 1)
# Testing with a basic crafted example.
# dense representation is
# [[[[0, 1], [ 0, 0], [0, 0]],
# [[2, 3], [ 4, 5], [6, 7]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]]],
# [[[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]]],
# [[[8, 9], [10, 11], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]],
# [[0, 0], [ 0, 0], [0, 0]]]],
# inner_dense_size = 2
# x_offsets = [
# torch.LongTensor([0, 2, 2, 3]), # lengths torch.Tensor([2, 0, 1]),
# torch.LongTensor([0, 1, 4, 6]), # lengths torch.Tensor([1, 3, 2]),
# ]
# outer_dense_size = len(x_offsets[0]) - 1
# max_lengths = [4, 3]
device = torch.device(device_type)
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
torch.float,
device,
fold_inner_dense,
)
output_ref = self._to_padded_dense(
x_values, x_offsets, max_lengths, padding_value=padding_value
)
output = torch.ops.fbgemm.jagged_to_padded_dense(
x_values,
x_offsets,
max_lengths,
padding_value=padding_value,
)
torch.testing.assert_close(output, output_ref)
torch.autograd.gradcheck(
torch.ops.fbgemm.jagged_to_padded_dense,
(
x_values.double().requires_grad_(True),
x_offsets,
max_lengths,
padding_value,
),
)
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(0, 5),
inner_dense_size=st.integers(0, 5),
padding_value=st.just(0),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.just("meta"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_to_padded_dense_meta_backend(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
padding_value: float,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(device_type != "cpu" or dtype != torch.bfloat16)
device = torch.device("cpu")
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, torch.float, device
)
output_ref = self._to_padded_dense(
x_values, x_offsets, max_lengths, padding_value=padding_value
)
x_values.to(device_type)
output = torch.ops.fbgemm.jagged_to_padded_dense(
x_values,
x_offsets,
max_lengths,
padding_value=padding_value,
)
assert output.size() == output_ref.size()
def _test_jagged_elementwise_binary(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
operation: str,
dtype: torch.dtype,
device_type: str,
) -> None:
device = torch.device(device_type)
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
y = torch.rand(
outer_dense_size * np.prod(max_lengths) * inner_dense_size,
dtype=dtype,
device=device,
).reshape((outer_dense_size,) + tuple(max_lengths) + (inner_dense_size,))
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
if operation == "add":
output_ref = x_padded + y
output = torch.ops.fbgemm.jagged_dense_elementwise_add(
x_values, x_offsets, y
)
elif operation == "add_jagged_output":
# create a jagged tensor and then densify
y = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y
(
output,
output_offsets,
) = torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output(
x_values, x_offsets, y
)
output = self._to_padded_dense(output, output_offsets, max_lengths)
elif operation == "mul":
output_ref = x_padded * y
output, output_offsets = torch.ops.fbgemm.jagged_dense_elementwise_mul(
x_values, x_offsets, y
)
output = self._to_padded_dense(output, output_offsets, max_lengths)
else:
raise AssertionError(f"Unknown operation {operation}")
torch.testing.assert_close(output, output_ref)
if operation == "add":
f = torch.ops.fbgemm.jagged_dense_elementwise_add
elif operation == "add_jagged_output":
# pyre-fixme[2]: Parameter must be annotated.
def add_jagged_output_func(*args) -> torch.Tensor:
return torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output(
*args
)[0]
f = add_jagged_output_func
else:
assert operation == "mul"
# pyre-fixme[2]: Parameter must be annotated.
def mul_func(*args) -> torch.Tensor:
return torch.ops.fbgemm.jagged_dense_elementwise_mul(*args)[0]
f = mul_func
torch.autograd.gradcheck(
f,
(
x_values.double().requires_grad_(True),
x_offsets,
y.double().requires_grad_(True),
),
)
@given(
num_jagged_dim=st.integers(1, 4),
outer_dense_size=st.integers(0, 4),
inner_dense_size=st.integers(0, 4),
operation=st.sampled_from(["add", "add_jagged_output", "mul"]),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_elementwise_binary(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
operation: str,
dtype: torch.dtype,
device_type: str,
) -> None:
self._test_jagged_elementwise_binary(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
operation,
dtype,
device_type,
)
@unittest.skipIf(*gpu_unavailable)
@given(
num_jagged_dim=st.just(1),
outer_dense_size=st.integers(0, 8),
inner_dense_size=st.sampled_from([16, 64, 96, 192]),
operation=st.sampled_from(["add_jagged_output", "mul"]),
dtype=st.just(torch.half),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=4, deadline=None)
def test_jagged_elementwise_binary_opt(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
operation: str,
dtype: torch.dtype,
device_type: str,
) -> None:
self._test_jagged_elementwise_binary(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
operation,
dtype,
device_type,
)
@unittest.skipIf(*symint_vector_unsupported())
@given(
num_jagged_dim=st.integers(1, 5),
outer_dense_size=st.integers(2, 5),
inner_dense_size=st.integers(2, 5),
operation=st.sampled_from(["add", "add_jagged_output", "mul"]),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_elementwise_binary_dynamic_shape(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
operation: str,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
device = torch.device(device_type)
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
device,
mark_dynamic=True,
)
y = torch.rand(
outer_dense_size * np.prod(max_lengths) * inner_dense_size,
dtype=dtype,
device=device,
).reshape((outer_dense_size,) + tuple(max_lengths) + (inner_dense_size,))
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
@torch.compile(fullgraph=True, dynamic=True)
def jagged_dense_elementwise_add(
x_values: torch.Tensor, x_offsets: torch.Tensor, y: torch.Tensor
) -> torch.Tensor:
return torch.ops.fbgemm.jagged_dense_elementwise_add(x_values, x_offsets, y)
@torch.compile(fullgraph=True, dynamic=True)
def jagged_dense_elementwise_add_jagged_output(
x_values: torch.Tensor, x_offsets: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.fbgemm.jagged_dense_elementwise_add_jagged_output(
x_values, x_offsets, y
)
@torch.compile(fullgraph=True, dynamic=True)
def jagged_dense_elementwise_mul(
x_values: torch.Tensor, x_offsets: torch.Tensor, y: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.ops.fbgemm.jagged_dense_elementwise_mul(x_values, x_offsets, y)
if operation == "add":
output_ref = x_padded + y
output = jagged_dense_elementwise_add(x_values, x_offsets, y)
elif operation == "add_jagged_output":
# create a jagged tensor and then densify
y = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y
(
output,
output_offsets,
) = jagged_dense_elementwise_add_jagged_output(x_values, x_offsets, y)
output = self._to_padded_dense(output, output_offsets, max_lengths)
elif operation == "mul":
output_ref = x_padded * y
output, output_offsets = jagged_dense_elementwise_mul(
x_values, x_offsets, y
)
output = self._to_padded_dense(output, output_offsets, max_lengths)
else:
raise AssertionError(f"Unknown operation {operation}")
assert output.size() == output_ref.size()
def _test_jagged_dense_dense_elementwise_add_jagged_output(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
device = torch.device(device_type)
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
# create a jagged tensor and then densify
y_0 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
y_1 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y_0 + y_1
(
output,
output_offsets,
) = torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output(
x_values, x_offsets, y_0, y_1
)
output = self._to_padded_dense(output, output_offsets, max_lengths)
torch.testing.assert_close(output, output_ref)
# pyre-fixme[2]: Parameter must be annotated.
def add_jagged_output_func(*args) -> torch.Tensor:
return torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output(
*args
)[0]
f = add_jagged_output_func
torch.autograd.gradcheck(
f,
(
x_values.double().requires_grad_(True),
x_offsets,
y_0.double().requires_grad_(True),
y_1.double().requires_grad_(True),
),
)
@given(
num_jagged_dim=st.integers(1, 4),
outer_dense_size=st.integers(0, 4),
inner_dense_size=st.integers(0, 4),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_dense_dense_elementwise_add_jagged_output(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
self._test_jagged_dense_dense_elementwise_add_jagged_output(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device_type
)
@unittest.skipIf(*gpu_unavailable)
@given(
num_jagged_dim=st.just(1),
outer_dense_size=st.integers(0, 8),
inner_dense_size=st.sampled_from([16, 64, 96, 192]),
dtype=st.just(torch.half),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=4, deadline=None)
def test_jagged_dense_dense_elementwise_add_jagged_output_opt(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
self._test_jagged_dense_dense_elementwise_add_jagged_output(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device_type
)
@given(
num_jagged_dim=st.integers(1, 4),
outer_dense_size=st.integers(0, 4),
inner_dense_size=st.integers(0, 4),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.just("meta"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_dense_dense_elementwise_add_jagged_output_meta_backend(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
device = torch.device("cpu")
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim, outer_dense_size, inner_dense_size, dtype, device
)
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
# create a jagged tensor and then densify
y_0 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
y_1 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=device,
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y_0 + y_1
x_values.to(device_type)
(
output,
output_offsets,
) = torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output(
x_values, x_offsets, y_0, y_1
)
output.to("cpu")
output = self._to_padded_dense(output, output_offsets, max_lengths)
assert output.size() == output_ref.size()
@unittest.skipIf(*symint_vector_unsupported())
@given(
num_jagged_dim=st.integers(1, 4),
outer_dense_size=st.integers(2, 4),
inner_dense_size=st.integers(2, 4),
dtype=st.sampled_from([torch.float, torch.half, torch.double, torch.bfloat16]),
device_type=st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_dense_dense_elementwise_add_jagged_output_dynamic_shape(
self,
num_jagged_dim: int,
outer_dense_size: int,
inner_dense_size: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
x_values, x_offsets, max_lengths = self._generate_jagged_tensor(
num_jagged_dim,
outer_dense_size,
inner_dense_size,
dtype,
torch.device(device_type),
mark_dynamic=True,
)
x_padded = self._to_padded_dense(x_values, x_offsets, max_lengths)
# create a jagged tensor and then densify
y_0 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=torch.device(device_type),
),
x_offsets,
max_lengths,
)
y_1 = self._to_padded_dense(
torch.rand(
(
max(outer_dense_size * np.prod(max_lengths), x_values.size(0)),
inner_dense_size,
),
dtype=dtype,
device=torch.device(device_type),
),
x_offsets,
max_lengths,
)
output_ref = x_padded + y_0 + y_1
x_values.to(device_type)
(output, output_offsets) = torch.compile(
torch.ops.fbgemm.jagged_dense_dense_elementwise_add_jagged_output,
fullgraph=True,
dynamic=True,
)(x_values, x_offsets, y_0, y_1)
output.to("cpu")
output = self._to_padded_dense(output, output_offsets, max_lengths)
assert output.size() == output_ref.size()
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(0, 32),
H=st.integers(1, 3),
max_L=st.integers(1, 32),
D=st.integers(0, 32),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
def test_batched_dense_vec_jagged_2d_mul(
self,
B: int,
H: int,
max_L: int,
D: int,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(H == 1 or B != 0)
# CPU doesn't support bfloat16
assume(device_type != "cpu" or dtype != torch.bfloat16)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
# Sometimes length[i] exceed max_L meaning jagged->dense will be
# truncation vs. padding
lengths = torch.randint(max_L * 2, size=(B,), device=device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
values = torch.rand((offsets[-1], H * D), dtype=dtype, device=device)
dense = torch.rand((B * H, max_L), dtype=dtype, device=device)
padded_values = torch.ops.fbgemm.jagged_to_padded_dense(
values,
[offsets],
[max_L],
) # [B, N, H * D]
bmm_arg1 = dense.unsqueeze(1)
bmm_arg2 = (
padded_values.reshape(B, max_L, H, D)
.transpose(1, 2)
.reshape(B * H, max_L, D)
)
# torch.bmm not implemented for Half on CPU
if dtype in [torch.half, torch.bfloat16] and device_type == "cpu":
bmm_arg1 = bmm_arg1.float()
bmm_arg2 = bmm_arg2.float()
output_ref = torch.bmm(bmm_arg1, bmm_arg2).squeeze(
1
) # [B H, 1, N] x [B H, N, D] = [B H, 1, D]
if dtype in [torch.half, torch.bfloat16] and device_type == "cpu":
output_ref = output_ref.to(dtype)
output = torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul(
dense, values, offsets
)
torch.testing.assert_close(
output,
output_ref,
rtol=1e-2 if dtype in [torch.half, torch.bfloat16] else None,
atol=1e-2 if dtype in [torch.half, torch.bfloat16] else None,
)
torch.autograd.gradcheck(
torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul,
(
dense.clone().detach().double().requires_grad_(True),
values.clone().detach().double().requires_grad_(True),
offsets,
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(0, 32),
H=st.integers(1, 3),
max_L=st.integers(1, 32),
D=st.integers(0, 32),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.sampled_from(["meta"]),
)
def test_batched_dense_vec_jagged_2d_mul_meta_backend(
self,
B: int,
H: int,
max_L: int,
D: int,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(H == 1 or B != 0)
device = torch.device("cpu")
torch.backends.cuda.matmul.allow_tf32 = False
# Sometimes length[i] exceed max_L meaning jagged->dense will be
# truncation vs. padding
lengths = torch.randint(max_L * 2, size=(B,), device=device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
values = torch.rand((offsets[-1], H * D), dtype=dtype, device=device)
dense = torch.rand((B * H, max_L), dtype=dtype, device=device)
padded_values = torch.ops.fbgemm.jagged_to_padded_dense(
values,
[offsets],
[max_L],
) # [B, N, H * D]
bmm_arg1 = dense.unsqueeze(1)
bmm_arg2 = (
padded_values.reshape(B, max_L, H, D)
.transpose(1, 2)
.reshape(B * H, max_L, D)
)
# torch.bmm not implemented for Half on CPU
if dtype in [torch.half, torch.bfloat16]:
bmm_arg1 = bmm_arg1.float()
bmm_arg2 = bmm_arg2.float()
output_ref = torch.bmm(bmm_arg1, bmm_arg2).squeeze(
1
) # [B H, 1, N] x [B H, N, D] = [B H, 1, D]
dense.to(device_type)
values.to(device_type)
output = torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul(
dense, values, offsets
)
assert output.size() == output_ref.size()
@unittest.skipIf(*symint_vector_unsupported())
@settings(
verbosity=Verbosity.verbose,
max_examples=20,
deadline=None,
)
@given(
B=st.integers(2, 32),
H=st.integers(1, 3),
max_L=st.integers(1, 32),
D=st.integers(2, 32),
dtype=st.sampled_from([torch.float, torch.half, torch.bfloat16, torch.double]),
device_type=st.just("cpu"),
)
def test_batched_dense_vec_jagged_2d_mul_dynamic_shape(
self,
B: int,
H: int,
max_L: int,
D: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
assume(H == 1 or B != 0)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
# Sometimes length[i] exceed max_L meaning jagged->dense will be
# truncation vs. padding
lengths = torch.randint(low=1, high=max_L * 2, size=(B,), device=device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
values = torch.rand((offsets[-1], H * D), dtype=dtype, device=device)
dense = torch.rand((B * H, max_L), dtype=dtype, device=device)
padded_values = torch.ops.fbgemm.jagged_to_padded_dense(
values,
[offsets],
[max_L],
) # [B, N, H * D]
bmm_arg1 = dense.unsqueeze(1)
bmm_arg2 = (
padded_values.reshape(B, max_L, H, D)
.transpose(1, 2)
.reshape(B * H, max_L, D)
)
# torch.bmm not implemented for Half on CPU
if dtype in [torch.half, torch.bfloat16]:
bmm_arg1 = bmm_arg1.float()
bmm_arg2 = bmm_arg2.float()
output_ref = torch.bmm(bmm_arg1, bmm_arg2).squeeze(
1
) # [B H, 1, N] x [B H, N, D] = [B H, 1, D]
dense.to(device_type)
values.to(device_type)
torch._dynamo.mark_dynamic(dense, 0)
torch._dynamo.mark_dynamic(values, 0)
torch._dynamo.mark_dynamic(values, 1)
torch._dynamo.mark_dynamic(offsets, 0)
output = torch.compile(
torch.ops.fbgemm.batched_dense_vec_jagged_2d_mul,
fullgraph=True,
dynamic=True,
)(dense, values, offsets)
assert output.size() == output_ref.size()
@staticmethod
def jagged_index_select_2d_ref(
values: torch.Tensor,
lengths: torch.Tensor,
inverse_lookup: torch.Tensor,
device: torch.device,
) -> torch.Tensor:
offsets = torch.ops.fbgemm.asynchronous_exclusive_cumsum(lengths)
end_offsets = offsets + lengths
full_start_offset = torch.index_select(offsets, 0, inverse_lookup)
full_end_offset = torch.index_select(end_offsets, 0, inverse_lookup)
index_ranges = torch.stack(
(full_start_offset, full_end_offset), dim=0
).transpose(0, 1)
to_be_merged_tensors = []
for row in index_ranges:
to_be_merged_tensors.append(torch.arange(row[0], row[1], device=device))
all_indices = torch.cat(to_be_merged_tensors, dim=0)
new_embeddings = torch.index_select(values, 0, all_indices)
return new_embeddings
@unittest.skipIf(*running_on_github)
@given(
max_seq_length=st.integers(5, 10),
batch_size=st.integers(1, 128),
num_cols=st.integers(1, 128),
num_jagged_tensor_rows=st.integers(1, 128),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from(
[
torch.float,
torch.half,
torch.int,
torch.long,
] # Disable torch.bfloat16 due to large error bound
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(max_examples=20, deadline=None)
def test_jagged_index_select_2d(
self,
max_seq_length: int,
batch_size: int,
num_cols: int,
num_jagged_tensor_rows: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
use_cpu: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
is_float = jagged_tensor_dtype in [torch.float, torch.half, torch.bfloat16]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(num_jagged_tensor_rows,),
dtype=index_dtype,
device=device,
)
indices, _ = torch.sort(
torch.randint(
low=0,
high=num_jagged_tensor_rows,
size=(batch_size,),
dtype=index_dtype,
device=device,
)
)
if is_float:
values = torch.rand(
int(lengths.sum().item()),
num_cols,
dtype=jagged_tensor_dtype,
device=device,
)
else:
values = torch.randint(
2**16,
(int(lengths.sum().item()), num_cols),
dtype=jagged_tensor_dtype,
device=device,
)
values_ref = values.detach().clone()
# Only float tensors can require grad
if is_float:
values.requires_grad = True
values_ref.requires_grad = True
output, _ = torch.ops.fbgemm.jagged_index_select(values, lengths, indices)
output_ref = self.jagged_index_select_2d_ref(
values_ref, lengths, indices, device
)
assert torch.equal(output, output_ref)
if not is_float:
return
grad = torch.rand_like(output)
grad_ref = grad.detach().clone()
output.backward(grad)
output_ref.backward(grad_ref)
torch.testing.assert_close(
values.grad,
values_ref.grad,
rtol=1e-2 if jagged_tensor_dtype in [torch.half, torch.bfloat16] else None,
atol=1e-2 if jagged_tensor_dtype in [torch.half, torch.bfloat16] else None,
)
@unittest.skipIf(*running_on_github)
@given(
max_seq_length=st.integers(5, 10),
batch_size=st.integers(1, 128),
num_cols=st.integers(1, 128),
num_jagged_tensor_rows=st.integers(1, 128),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from(
[
torch.float,
torch.half,
torch.int,
torch.long,
] # Disable torch.bfloat16 due to large error bound
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(max_examples=20, deadline=None)
def test_jagged_index_select_2d_in_inference(
self,
max_seq_length: int,
batch_size: int,
num_cols: int,
num_jagged_tensor_rows: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
use_cpu: bool,
) -> None:
device = torch.device("cpu" if use_cpu else "cuda")
is_float = jagged_tensor_dtype in [torch.float, torch.half, torch.bfloat16]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(num_jagged_tensor_rows,),
dtype=index_dtype,
device=device,
)
indices, _ = torch.sort(
torch.randint(
low=0,
high=num_jagged_tensor_rows,
size=(batch_size,),
dtype=index_dtype,
device=device,
)
)
if is_float:
values = torch.rand(
int(lengths.sum().item()),
num_cols,
dtype=jagged_tensor_dtype,
device=device,
)
else:
values = torch.randint(
2**16,
(int(lengths.sum().item()), num_cols),
dtype=jagged_tensor_dtype,
device=device,
)
values_ref = values.detach().clone()
with torch.inference_mode():
output, _ = torch.ops.fbgemm.jagged_index_select(values, lengths, indices)
output_ref = self.jagged_index_select_2d_ref(
values_ref, lengths, indices, device
)
assert torch.equal(output, output_ref)
@given(
batch_size=st.integers(1, 128),
max_length=st.integers(0, 128),
max_truncated_length=st.integers(1, 32),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from(
[torch.float, torch.half, torch.bfloat16, torch.int, torch.long]
),
use_cpu=st.just(True),
)
@settings(max_examples=20, deadline=None)
def test_jagged_1d_to_truncated_values(
self,
max_length: int,
batch_size: int,
max_truncated_length: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
use_cpu: bool,
) -> None:
device = "cpu" if use_cpu else "cuda"
is_float = jagged_tensor_dtype in [torch.float, torch.half, torch.bfloat16]
lengths = torch.randint(
low=0,
high=max_length + 1,
size=(batch_size,),
dtype=index_dtype,
device=device,
)
n = int(lengths.sum().item())
if is_float:
values = torch.rand(
(n,),
dtype=jagged_tensor_dtype,
device=device,
)
else:
values = torch.randint(
2**16,
(n,),
dtype=jagged_tensor_dtype,
device=device,
)
truncated_values = torch.ops.fbgemm.jagged_1d_to_truncated_values(
values,
lengths,
max_truncated_length,
)
dense_values = torch.ops.fbgemm.jagged_1d_to_dense(
values=values,
offsets=torch.ops.fbgemm.asynchronous_complete_cumsum(lengths),
max_sequence_length=max_truncated_length,
padding_value=0,
) # [B, N]
truncated_lengths_ref = torch.clamp(lengths, max=max_truncated_length)
mask2d = torch.arange(max_truncated_length, device=device).expand(
batch_size, -1
) < truncated_lengths_ref.unsqueeze(-1)
truncated_values_ref = dense_values[mask2d].view(-1)
torch.testing.assert_close(truncated_values, truncated_values_ref)
@given(
batch_size=st.integers(1, 128),
max_length=st.integers(0, 128),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from([torch.int, torch.long]),
empty_lengths=st.booleans(),
use_cpu=st.just(True),
)
@settings(max_examples=20, deadline=None)
def test_masked_select_jagged_1d(
self,
max_length: int,
batch_size: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
empty_lengths: bool,
use_cpu: bool,
) -> None:
device = "cpu" if use_cpu else "cuda"
if empty_lengths:
lengths = torch.zeros(batch_size, dtype=index_dtype, device=device)
else:
lengths = torch.randint(
low=0,
high=max_length + 1,
size=(batch_size,),
dtype=index_dtype,
device=device,
)
lengths[batch_size // 2] = 0 # test a corner case
n = int(lengths.sum().item())
values = torch.randint(
2**16,
(n,),
dtype=jagged_tensor_dtype,
device=device,
)
mask = torch.randint(2, (n,)) > 0
masked_values, masked_lengths = torch.ops.fbgemm.masked_select_jagged_1d(
values,
lengths,
mask,
)
masked_values_ref = values[mask]
cum_count = torch.cumsum(mask, 0)
cum_count = torch.cat((cum_count, torch.tensor([0])))
cum_length = cum_count[torch.cumsum(lengths, 0) - 1]
cum_length_shift_right = torch.roll(cum_length, 1)
cum_length_shift_right[0] = 0
masked_lengths_ref = (cum_length - cum_length_shift_right).to(lengths.dtype)
torch.testing.assert_close(masked_values, masked_values_ref)
torch.testing.assert_close(masked_lengths, masked_lengths_ref)
@unittest.skipIf(*gpu_unavailable)
@given(
max_seq_length=st.integers(5, 10),
input_batch_size=st.integers(1, 128),
output_batch_size=st.integers(1, 128),
num_batches=st.integers(1, 3),
index_dtype=st.sampled_from([torch.int, torch.long]),
jagged_tensor_dtype=st.sampled_from(
[
torch.float,
torch.half,
torch.int,
torch.long,
] # Disable torch.bfloat16 due to large error bound
),
has_weights=st.booleans(),
)
@settings(max_examples=20, deadline=None)
def test_keyed_jagged_index_select_dim1(
self,
max_seq_length: int,
input_batch_size: int,
output_batch_size: int,
num_batches: int,
index_dtype: torch.dtype,
jagged_tensor_dtype: torch.dtype,
has_weights: bool,
) -> None:
is_float = jagged_tensor_dtype in [torch.float, torch.half, torch.bfloat16]
lengths = torch.randint(
low=0,
high=max_seq_length,
size=(input_batch_size * num_batches,),
dtype=index_dtype,
device="cuda",
)
offsets = torch.concat(
[torch.zeros(1, dtype=torch.long, device="cuda"), lengths.cumsum(0)]
)
indices = torch.randint(
low=0,
high=1,
size=(output_batch_size,),
dtype=index_dtype,
device="cuda",
)
if is_float:
values = torch.rand(
int(offsets[-1].item()),
dtype=jagged_tensor_dtype,
device="cuda",
)
else:
values = torch.randint(
2**16,
(int(offsets[-1].item()),),
dtype=jagged_tensor_dtype,
device="cuda",
)
values_ref = values.detach().clone()
if has_weights:
weights = torch.rand(
int(offsets[-1].item()),
dtype=random.choice([torch.float, torch.half]),
device="cuda",
)
else:
weights = None
# Only float tensors can require grad
if is_float:
values.requires_grad = True
values_ref.requires_grad = True
index_select_output = torch.ops.fbgemm.keyed_jagged_index_select_dim1(
values, lengths, offsets, indices, input_batch_size, weights
)
output = index_select_output[0]
if has_weights:
output_weights = index_select_output[2]
output_ref = []
output_weight_ref = []
for k in range(num_batches):
key_lengths = lengths[k * input_batch_size : (k + 1) * input_batch_size]
start_offset = offsets[k * input_batch_size]
end_offset = offsets[(k + 1) * input_batch_size]
key_values = values_ref[start_offset:end_offset].view(-1, 1)
output_ref.append(
torch.ops.fbgemm.jagged_index_select(key_values, key_lengths, indices)[
0
].view(-1)
)
if has_weights:
# pyre-ignore[16]
key_weights = weights[start_offset:end_offset].view(-1, 1)
output_weight_ref.append(
torch.ops.fbgemm.jagged_index_select(
key_weights, key_lengths, indices
)[0].view(-1)
)
output_ref = torch.concat(output_ref)
assert torch.equal(output, output_ref)
if has_weights:
output_weight_ref = torch.concat(output_weight_ref)
# pyre-ignore[61]
assert torch.equal(output_weights, output_weight_ref)
if not is_float:
return
grad = torch.rand_like(output)
grad_ref = grad.detach().clone()
output.backward(grad)
output_ref.backward(grad_ref)
torch.testing.assert_close(
values.grad,
values_ref.grad,
rtol=1e-2 if jagged_tensor_dtype in [torch.half, torch.bfloat16] else None,
atol=1e-2 if jagged_tensor_dtype in [torch.half, torch.bfloat16] else None,
)
@given(
B=st.integers(1, 512),
max_L=st.integers(1, 1000),
D=st.integers(1, 32),
dtype=st.sampled_from([torch.float, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_softmax(
self,
B: int,
max_L: int,
D: int,
dtype: torch.dtype,
device_type: str,
) -> None:
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(max_L + 1, size=(B,), device=device)
total_length = int(lengths.sum().item())
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
values = torch.rand(
(total_length, D), requires_grad=True, dtype=dtype, device=device
)
output, _ = torch.ops.fbgemm.jagged_softmax(
values,
offsets,
max_L,
)
values_ref = values.detach().clone().requires_grad_(True)
output_ref, _ = torch.ops.fbgemm.dense_to_jagged(
torch.nn.functional.softmax(
torch.ops.fbgemm.jagged_to_padded_dense(
values_ref,
[offsets],
max_lengths=[max_L],
padding_value=-5e7,
).transpose(1, 2),
dim=-1,
).permute(0, 2, 1),
[offsets],
total_length,
)
# verify forward
torch.testing.assert_close(output, output_ref)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(values.grad, values_ref.grad)
@given(
B=st.integers(10, 512),
M=st.integers(1, 32),
N=st.integers(1, 32),
max_L=st.integers(1, 32),
dtype=st.sampled_from([torch.float, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@unittest.skipIf(*on_arm_platform)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_jagged_bmm(
self,
B: int,
M: int,
N: int,
max_L: int,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(B != 0)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(max_L + 1, size=(B,), device=device)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
total_length = int(lengths.sum().item())
x_values = torch.rand(
(total_length, M), requires_grad=True, dtype=dtype, device=device
)
y_values = torch.rand(
(total_length, N), requires_grad=True, dtype=dtype, device=device
)
output = torch.ops.fbgemm.jagged_jagged_bmm(
x_values,
y_values,
offsets,
max_L,
)
x_values_ref = x_values.detach().clone().requires_grad_(True)
y_values_ref = y_values.detach().clone().requires_grad_(True)
x_dense_ref = torch.ops.fbgemm.jagged_to_padded_dense(
x_values_ref,
[offsets],
max_lengths=[max_L],
)
y_dense_ref = torch.ops.fbgemm.jagged_to_padded_dense(
y_values_ref,
[offsets],
max_lengths=[max_L],
)
output_ref = torch.bmm(x_dense_ref.transpose(2, 1), y_dense_ref)
# verify forward
torch.testing.assert_close(output, output_ref)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(x_values.grad, x_values_ref.grad)
torch.testing.assert_close(y_values.grad, y_values_ref.grad)
@given(
B=st.integers(10, 512),
M=st.integers(1, 32),
N=st.integers(1, 32),
max_L=st.integers(1, 32),
dtype=st.sampled_from([torch.float, torch.double]),
device_type=st.sampled_from(["cpu", "cuda"])
if gpu_available
else st.just("cpu"),
)
@unittest.skipIf(*on_arm_platform)
@settings(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_jagged_dense_bmm(
self,
B: int,
M: int,
N: int,
max_L: int,
dtype: torch.dtype,
device_type: str,
) -> None:
assume(B != 0)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(max_L + 1, size=(B,), device=device)
total_length = int(lengths.sum().item())
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
x_values = torch.rand(
(total_length, M), requires_grad=True, dtype=dtype, device=device
)
y = torch.rand((B, M, N), requires_grad=True, dtype=dtype, device=device)
output, _ = torch.ops.fbgemm.jagged_dense_bmm(
x_values,
offsets,
y,
max_L,
)
x_values_ref = x_values.detach().clone().requires_grad_(True)
x_dense_ref = torch.ops.fbgemm.jagged_to_padded_dense(
x_values_ref,
[offsets],
max_lengths=[max_L],
)
y_ref = y.detach().clone().requires_grad_(True)
output_dense = torch.bmm(x_dense_ref, y_ref)
output_ref, _ = torch.ops.fbgemm.dense_to_jagged(
output_dense, [offsets], total_length
)
# verify forward
torch.testing.assert_close(output, output_ref)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(x_values.grad, x_values_ref.grad)
torch.testing.assert_close(y.grad, y_ref.grad)
@unittest.skipIf(*symint_vector_unsupported())
@given(
B=st.integers(10, 512),
M=st.integers(2, 32),
N=st.integers(2, 32),
max_L=st.integers(2, 32),
dtype=st.sampled_from([torch.float, torch.double]),
device_type=st.just("cpu"),
)
@unittest.skipIf(*on_arm_platform)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_dense_bmm_dynamic_shape(
self,
B: int,
M: int,
N: int,
max_L: int,
dtype: torch.dtype,
device_type: str,
) -> None:
# Start a fresh compile for each parameter of the test case
torch._dynamo.reset()
assume(B != 0)
device = torch.device(device_type)
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(low=1, high=max_L + 1, size=(B,), device=device)
total_length = int(lengths.sum().item())
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
x_values = torch.rand(
(total_length, M), requires_grad=True, dtype=dtype, device=device
)
y = torch.rand((B, M, N), requires_grad=True, dtype=dtype, device=device)
torch._dynamo.mark_dynamic(x_values, 0)
torch._dynamo.mark_dynamic(x_values, 1)
torch._dynamo.mark_dynamic(lengths, 0) # offsets = lengths + 1
output, _ = torch.compile(
torch.ops.fbgemm.jagged_dense_bmm, fullgraph=True, dynamic=True
)(
x_values,
offsets,
y,
max_L,
)
x_values_ref = x_values.detach().clone().requires_grad_(True)
x_dense_ref = torch.ops.fbgemm.jagged_to_padded_dense(
x_values_ref,
[offsets],
max_lengths=[max_L],
)
y_ref = y.detach().clone().requires_grad_(True)
output_dense = torch.bmm(x_dense_ref, y_ref)
output_ref, _ = torch.ops.fbgemm.dense_to_jagged(
output_dense, [offsets], total_length
)
# verify forward
torch.testing.assert_close(output, output_ref)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(x_values.grad, x_values_ref.grad)
torch.testing.assert_close(y.grad, y_ref.grad)
@given(
B=st.integers(10, 512),
N=st.integers(10, 64),
slice_length=st.integers(0, 64),
dtype=st.sampled_from([torch.float, torch.double]),
)
@settings(verbosity=Verbosity.verbose, max_examples=20, deadline=None)
def test_jagged_slice(
self,
B: int,
N: int,
slice_length: int,
dtype: torch.dtype,
) -> None:
assume(B != 0)
device = torch.device("cpu")
torch.backends.cuda.matmul.allow_tf32 = False
lengths = torch.randint(N + 1, size=(B,), device=device)
start_list = [random.randint(0, max(len_ - 1, 0)) for len_ in lengths.tolist()]
start = torch.tensor(start_list, device=device)
total_length = int(lengths.sum().item())
x_values = torch.rand(
(total_length), requires_grad=True, dtype=dtype, device=device
)
output, output_lengths = torch.ops.fbgemm.jagged_slice(
x_values,
lengths,
start,
slice_length,
)
output_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(output_lengths)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
x_values_ref = x_values.detach().clone().requires_grad_(True)
def jagged_slice_ref(
x_values: torch.Tensor,
offsets: torch.Tensor,
start: torch.Tensor,
slice_length: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
end_offsets_ = slice_length + start + offsets[:-1]
end_offsets = torch.where(
end_offsets_ > offsets[1:], offsets[1:], end_offsets_
)
start_offsets = start + offsets[:-1]
indices_to_select: List[torch.Tensor] = []
for i in range(end_offsets.size(0)):
indices_to_select.append(
torch.arange(start_offsets[i].item(), end_offsets[i].item())
)
output_ref = torch.index_select(x_values, 0, torch.cat(indices_to_select))
new_lengths = end_offsets - start_offsets
new_offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(new_lengths)
return output_ref, new_offsets
output_ref, output_offsets_ref = jagged_slice_ref(
x_values_ref, offsets, start, slice_length
)
# verify forward
torch.testing.assert_close(
output, output_ref, msg=f"output={output} output_ref={output_ref}"
)
torch.testing.assert_close(
output_offsets,
output_offsets_ref,
msg=f"output_off={output_offsets} output_off_ref={output_offsets_ref}",
)
# verify backward
grad_output = output.detach().clone().requires_grad_(True)
output.backward(grad_output)
output_ref.backward(grad_output)
torch.testing.assert_close(
x_values.grad,
x_values_ref.grad,
msg=f"grad={x_values.grad} x_values_ref.grad={x_values_ref.grad}",
)
def test_jagged_slice_errors(
self,
) -> None:
lengths = torch.tensor([1, 2, 3, 4, 5, 6])
values = torch.tensor([x + y for x in range(6) for y in range(x)])
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.jagged_slice(
values, lengths, torch.tensor([2, 1, 2, 3, 4, 2]), 7
)
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.jagged_slice(
values, lengths, torch.tensor([-2, 1, 1, 0, 1, 2]), 7
)
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=100, max_value=200),
F=st.integers(min_value=50, max_value=100),
max_length=st.integers(min_value=5, max_value=10),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_jagged_unique_indices(
self,
B: int, # Batch size
F: int, # The number of features
max_length: int, # The maximum value of pooling factor
) -> None:
hash_size_list = []
lengths_list = []
indices_list = []
linearized_indices_list = []
hash_size_offsets_list = [0]
for _ in range(F):
# We generate a small hash size to increase index duplication
hash_size = random.randint(3, 5)
hash_size_list.append(hash_size)
hash_size_offset = hash_size_offsets_list[-1] + 1
hash_size_offsets_list.append(hash_size_offset)
for _ in range(B):
length = random.randint(0, max_length)
lengths_list.append(length)
if length > 0:
indices = np.random.randint(0, hash_size, size=length)
linearized_indices = indices + sum(hash_size_list[:-1])
indices_list.extend(indices)
linearized_indices_list.extend(linearized_indices)
device = torch.device("cuda")
dtype = torch.int64
hash_size = torch.as_tensor(hash_size_list, dtype=dtype, device=device)
hash_size_offsets = torch.as_tensor(
hash_size_offsets_list, dtype=dtype, device=device
)
lengths = torch.as_tensor(lengths_list, dtype=dtype, device=device)
indices = torch.as_tensor(indices_list, dtype=dtype, device=device)
linearized_indices = torch.as_tensor(
linearized_indices_list, dtype=dtype, device=device
)
hash_size_cum_sum = torch.zeros(F + 1, dtype=dtype, device=device)
hash_size_cum_sum[1:] = torch.cumsum(hash_size, dim=0)
offsets = torch.zeros(F * B + 1, dtype=dtype, device=device)
offsets[1:] = torch.cumsum(lengths, dim=0)
(
output_lengths,
output_offsets,
unique_indices,
reverse_index,
) = torch.ops.fbgemm.jagged_unique_indices(
hash_size_cum_sum, hash_size_offsets, offsets, indices
)
# Check hash size cumsum to offsets function
output_hash_size_offsets_list = hash_size_cumsum_to_offsets(
hash_size_cum_sum.tolist()
)
self.assertEqual(output_hash_size_offsets_list, hash_size_offsets_list)
# Compute hash size cumsum and offsets based on KJT offsets and indices
(
inferred_hash_size_cum_sum,
inferred_hash_size_offsets,
) = torch.ops.fbgemm.jagged_hash_size_cumsum(offsets, indices, B)
(
output_lengths_inf,
output_offsets_inf,
unique_indices_inf,
reverse_index_inf,
) = torch.ops.fbgemm.jagged_unique_indices(
inferred_hash_size_cum_sum, inferred_hash_size_offsets, offsets, indices
)
self.assertTrue(torch.equal(output_lengths, output_lengths_inf))
self.assertTrue(torch.equal(output_offsets, output_offsets_inf))
self.assertTrue(torch.equal(unique_indices, unique_indices_inf))
self.assertTrue(torch.equal(reverse_index, reverse_index_inf))
unique_linearized_indices = torch.unique(linearized_indices, sorted=True)
self.assertTrue(unique_linearized_indices.numel() == unique_indices.numel())
unique_indices_list = unique_indices.tolist()
reverse_index_list = reverse_index.tolist()
for i in range(len(reverse_index_list)):
pos = reverse_index_list[i]
self.assertTrue(unique_indices_list[pos] == indices_list[i])
input_offsets_list = offsets.tolist()
output_offsets_list = output_offsets.tolist()
for i in range(F):
input_start = input_offsets_list[i * B]
input_end = input_offsets_list[(i + 1) * B]
output_start = output_offsets_list[i * B]
output_end = output_offsets_list[(i + 1) * B]
for each_offset in range(input_start, input_end):
pos = reverse_index_list[each_offset]
self.assertTrue((output_start <= pos) and (pos < output_end))
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=100, max_value=200),
F=st.integers(min_value=50, max_value=100),
max_length=st.integers(min_value=5, max_value=10),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_jagged_unique_indices_multi_keys(
self,
B: int, # Batch size
F: int, # The number of features
max_length: int, # The maximum value of pooling factor
) -> None:
hash_size_list = []
lengths_list = []
indices_list = []
linearized_indices_list = []
MAX_HASH_SIZE = 10
for _ in range(F):
# We generate a small hash size to increase index duplication
hash_size = random.randint(3, 6)
self.assertTrue(hash_size <= MAX_HASH_SIZE)
masked_hash_size = MAX_HASH_SIZE if random.randint(1, 3) == 3 else 0
hash_size_list.append(masked_hash_size)
for _ in range(B):
length = random.randint(0, max_length)
lengths_list.append(length)
if length > 0:
indices = np.random.randint(0, hash_size, size=length)
linearized_indices = indices + sum(hash_size_list[:-1])
indices_list.extend(indices)
linearized_indices_list.extend(linearized_indices)
device = torch.device("cuda")
dtype = torch.int64
hash_size = torch.as_tensor(hash_size_list, dtype=dtype, device=device)
lengths = torch.as_tensor(lengths_list, dtype=dtype, device=device)
indices = torch.as_tensor(indices_list, dtype=dtype, device=device)
linearized_indices = torch.as_tensor(
linearized_indices_list, dtype=dtype, device=device
)
hash_size_cum_sum = torch.zeros(F + 1, dtype=dtype, device=device)
hash_size_cum_sum[1:] = torch.cumsum(hash_size, dim=0)
offsets = torch.zeros(F * B + 1, dtype=dtype, device=device)
offsets[1:] = torch.cumsum(lengths, dim=0)
# Compute hash size offsets based on hash size cumsum to dedup
# indices from multiple keys
hash_size_cum_sum_list = hash_size_cum_sum.tolist()
hash_size_offsets_list = hash_size_cumsum_to_offsets(hash_size_cum_sum_list)
hash_size_offsets = torch.as_tensor(
hash_size_offsets_list, dtype=dtype, device=device
)
(
_, # output lengths
_, # output offsets
unique_indices,
reverse_index,
) = torch.ops.fbgemm.jagged_unique_indices(
hash_size_cum_sum, hash_size_offsets, offsets, indices
)
unique_linearized_indices = torch.unique(linearized_indices, sorted=True)
self.assertTrue(unique_linearized_indices.numel() == unique_indices.numel())
unique_indices_list = unique_indices.tolist()
reverse_index_list = reverse_index.tolist()
for i in range(len(reverse_index_list)):
pos = reverse_index_list[i]
self.assertTrue(unique_indices_list[pos] == indices_list[i])
@unittest.skipIf(*gpu_unavailable)
@given(
B=st.integers(min_value=100, max_value=200),
F=st.integers(min_value=50, max_value=100),
)
@settings(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_jagged_unique_indices_empty(
self,
B: int, # Batch size
F: int, # The number of features
) -> None:
hash_size_cumsum_list = [0] + list(itertools.accumulate([10] * F))
hash_size_offsets_list = [0] + list(itertools.accumulate([1] * F))
offsets_list = [0] * (B * F + 1)
indices_list = []
device = torch.device("cuda")
dtype = torch.int64
hash_size_cumsum = torch.as_tensor(
hash_size_cumsum_list, device=device, dtype=dtype
)
hash_size_offsets = torch.as_tensor(
hash_size_offsets_list, device=device, dtype=dtype
)
offsets = torch.as_tensor(offsets_list, device=device, dtype=dtype)
indices = torch.as_tensor(indices_list, device=device, dtype=dtype)
(
output_lengths,
output_offsets,
unique_indices,
reverse_index,
) = torch.ops.fbgemm.jagged_unique_indices(
hash_size_cumsum, hash_size_offsets, offsets, indices
)
# The output should be empty since there are no input indices
self.assertEqual(unique_indices.numel(), 0)
self.assertEqual(reverse_index.numel(), 0)
self.assertEqual(torch.sum(output_lengths).item(), 0)
self.assertEqual(torch.sum(output_offsets).item(), 0)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import unittest
import hypothesis.strategies as st
import torch
from hypothesis import given, settings, Verbosity
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import gpu_unavailable
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings")
torch.ops.load_library(
"//deeplearning/fbgemm/fbgemm_gpu:merge_pooled_embeddings_cpu"
)
from fbgemm_gpu.test.test_utils import gpu_unavailable
open_source = False
@unittest.skipIf(*gpu_unavailable)
@unittest.skipIf(open_source, "Not supported in open source yet")
class MergePooledEmbeddingsTest(unittest.TestCase):
@given(
num_ads=st.integers(min_value=1, max_value=10),
embedding_dimension=st.integers(min_value=1, max_value=32),
ads_tables=st.integers(min_value=1, max_value=32),
num_gpus=st.integers(min_value=1, max_value=torch.cuda.device_count()),
non_default_stream=st.booleans(),
r=st.randoms(use_true_random=False),
dim=st.integers(min_value=0, max_value=1),
)
# Can instantiate 8 contexts which takes a long time.
@settings(verbosity=Verbosity.verbose, max_examples=40, deadline=None)
def test_merge(
self,
num_ads,
embedding_dimension,
ads_tables,
num_gpus,
non_default_stream,
r,
dim: int,
) -> None:
dst_device = r.randint(0, num_gpus - 1)
torch.cuda.set_device(dst_device)
ad_ds = [embedding_dimension * ads_tables for _ in range(num_gpus)]
batch_indices = torch.zeros(num_ads).long().cuda()
pooled_ad_embeddings = [
torch.randn(
num_ads, ad_d, dtype=torch.float16, device=torch.device(f"cuda:{i}")
)
for i, ad_d in enumerate(ad_ds)
]
r.shuffle(pooled_ad_embeddings)
streams = [torch.cuda.Stream(device=i) for i in range(num_gpus)]
import contextlib
uncat_size = batch_indices.size(0) if dim == 1 else ad_ds[0]
with contextlib.ExitStack() as stack:
if non_default_stream:
for stream in streams:
stack.enter_context(torch.cuda.stream(stream))
output = torch.ops.fbgemm.merge_pooled_embeddings(
pooled_ad_embeddings, uncat_size, batch_indices.device, dim
)
def ref(pooled_ad_embeddings, batch_indices):
return torch.cat([p.cpu() for p in pooled_ad_embeddings], dim=dim)
output_ref = ref(pooled_ad_embeddings, batch_indices)
output_cpu = torch.ops.fbgemm.merge_pooled_embeddings(
[pe.cpu() for pe in pooled_ad_embeddings],
uncat_size,
batch_indices.cpu().device,
dim,
)
self.assertEqual(output.device, torch.device(f"cuda:{dst_device}"))
torch.testing.assert_close(output_ref, output.cpu())
torch.testing.assert_close(output_ref, output_cpu)
@given(
num_inputs=st.integers(min_value=1, max_value=10),
num_gpus=st.integers(min_value=1, max_value=torch.cuda.device_count()),
r=st.randoms(use_true_random=False),
)
# Can instantiate 8 contexts which takes a long time.
@settings(verbosity=Verbosity.verbose, max_examples=40, deadline=None)
def test_all_to_one_device(
self,
num_inputs,
num_gpus,
r,
) -> None:
dst_device = torch.device(f"cuda:{r.randint(0, num_gpus - 1)}")
with torch.cuda.device(dst_device):
inputs = [torch.randn(10, 20) for _ in range(num_inputs)]
cuda_inputs = [
input.to(f"cuda:{i % num_gpus}") for i, input in enumerate(inputs)
]
cuda_outputs = torch.ops.fbgemm.all_to_one_device(cuda_inputs, dst_device)
for i, o in zip(inputs, cuda_outputs):
self.assertEqual(o.device, dst_device)
torch.testing.assert_close(o.cpu(), i)
def test_merge_pooled_embeddings_cpu_with_different_target_device(self) -> None:
uncat_size = 2
pooled_embeddings = [torch.ones(uncat_size, 4), torch.ones(uncat_size, 8)]
output_meta = torch.ops.fbgemm.merge_pooled_embeddings(
pooled_embeddings,
uncat_size,
torch.device("meta"),
1,
)
self.assertFalse(output_meta.is_cpu)
self.assertTrue(output_meta.is_meta)
@given(
num_inputs=st.integers(min_value=1, max_value=10),
num_gpus=st.integers(min_value=1, max_value=torch.cuda.device_count()),
r=st.randoms(use_true_random=False),
)
# Can instantiate 8 contexts which takes a long time.
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_sum_reduce_to_one(
self,
num_inputs,
num_gpus,
r,
) -> None:
dst_device = torch.device(f"cuda:{r.randint(0, num_gpus - 1)}")
with torch.cuda.device(dst_device):
inputs = [torch.randn(10, 20) for _ in range(num_inputs)]
cuda_inputs = [
input.to(f"cuda:{i % num_gpus}") for i, input in enumerate(inputs)
]
cuda_output = torch.ops.fbgemm.sum_reduce_to_one(cuda_inputs, dst_device)
self.assertEqual(cuda_output.device, dst_device)
torch.testing.assert_close(
cuda_output.cpu(), torch.stack(inputs).sum(dim=0)
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import copy
import math
import pickle
import random
import unittest
from itertools import accumulate
from typing import Any, List, Optional, Tuple, Union
import fbgemm_gpu
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_embedding_configs import (
EmbOptimType as OptimType,
FP8QuantizationConfig,
SparseType,
)
from fbgemm_gpu.split_embedding_optimizer_ops import (
SplitEmbeddingArgs,
SplitEmbeddingOptimizerParams,
SplitEmbeddingRowwiseAdagrad,
)
from fbgemm_gpu.split_embedding_utils import (
b_indices,
fake_quantize_embs,
generate_requests,
get_table_batched_offsets_from_dense,
quantize_embs,
round_up,
to_device,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_common import (
BoundsCheckMode,
CacheAlgorithm,
EmbeddingLocation,
PoolingMode,
RecordCacheMetrics,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
)
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
CounterBasedRegularizationDefinition,
CounterWeightDecayMode,
DEFAULT_ASSOC,
DenseTableBatchedEmbeddingBagsCodegen,
GradSumDecay,
INT8_EMB_ROW_DIM_OFFSET,
LearningRateMode,
SplitTableBatchedEmbeddingBagsCodegen,
TailIdThreshold,
WeightDecayMode,
)
from hypothesis import assume, given, HealthCheck, settings, Verbosity
from hypothesis.strategies import composite
from torch import Tensor
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from test_utils import gpu_available, gpu_unavailable, TEST_WITH_ROCM
else:
from fbgemm_gpu.test.test_utils import (
gpu_available,
gpu_unavailable,
TEST_WITH_ROCM,
)
MAX_EXAMPLES = 40
# For long running tests reduce the number of iterations to reduce timeout errors.
MAX_EXAMPLES_LONG_RUNNING = 15
@composite
# pyre-ignore
def get_nbit_weights_ty(draw) -> Optional[SparseType]:
"""
Returns None if mixed weights ty should be used, otherwise, returns specific SparseType.
"""
mixed_weights_ty = draw(st.booleans())
if mixed_weights_ty:
return None
return draw(
st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.FP8,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
)
def gen_mixed_B_batch_sizes(B: int, T: int) -> Tuple[List[List[int]], List[int]]:
num_ranks = np.random.randint(low=1, high=4)
low = max(int(0.25 * B), 1)
high = int(B)
if low == high:
Bs_rank_feature = [[B] * num_ranks for _ in range(T)]
else:
Bs_rank_feature = [
np.random.randint(low=low, high=high, size=num_ranks).tolist()
for _ in range(T)
]
Bs = [sum(Bs_feature) for Bs_feature in Bs_rank_feature]
return Bs_rank_feature, Bs
def format_ref_tensors_in_mixed_B_layout(
ref_tensors: List[torch.Tensor], Bs_rank_feature: List[List[int]]
) -> torch.Tensor:
# Relayout the reference tensor
# Jagged dimension: (rank, table, local batch)
num_ranks = len(Bs_rank_feature[0])
split_tensors = [[] for _ in range(num_ranks)] # shape (rank, table)
for t, ref_tensor in enumerate(ref_tensors):
assert ref_tensor.shape[0] == sum(Bs_rank_feature[t])
tensors = ref_tensor.split(Bs_rank_feature[t])
for r, tensor in enumerate(tensors):
split_tensors[r].append(tensor.flatten())
concat_list = []
for r in range(num_ranks):
concat_list += split_tensors[r]
return torch.cat(concat_list, dim=0)
class SplitTableBatchedEmbeddingsTest(unittest.TestCase):
def execute_forward_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
pooling_mode: PoolingMode,
use_cpu: bool,
output_dtype: SparseType,
use_experimental_tbe: bool,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
# NOTE: CPU does not support FP16.
assume(not (use_cpu and weights_precision == SparseType.FP16))
# NOTE: weighted operation can be done only for SUM.
assume(pooling_mode == PoolingMode.SUM or not weighted)
# NOTE: No bag ops only work on GPUs, no mixed
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
# TODO: Support these cases
assume(
not mixed_B
or (
weights_precision != SparseType.INT8
and output_dtype != SparseType.INT8
and not use_cpu
and not use_cache
and pooling_mode != PoolingMode.NONE
)
)
emb_op = SplitTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if not mixed_B:
Bs = [B] * T
Bs_rank_feature = [[0]]
else:
Bs_rank_feature, Bs = gen_mixed_B_batch_sizes(B, T)
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
elif use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.INT8:
for t in range(T):
bs[t].weight.data.copy_(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
bs[t].weight.data
)
)
)
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
# Generate indices
xs = [
to_device(torch.randint(low=0, high=e, size=(b, L)), use_cpu)
for e, b in zip(Es, Bs)
]
# Generate positional weights
xws = [to_device(torch.randn(size=(b, L)), use_cpu) for b in Bs]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16:
xws = [xw.half() for xw in xws]
# Run baseline
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
if do_pooling:
if mixed_B:
f = format_ref_tensors_in_mixed_B_layout(fs, Bs_rank_feature)
else:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
# Create a TBE op
cc = emb_op(
embedding_specs=[
(
E,
D,
EmbeddingLocation(M),
compute_device,
)
for (E, D, M) in zip(Es, Ds, managed)
],
weights_precision=weights_precision,
optimizer=OptimType.EXACT_ROWWISE_ADAGRAD
if mixed_B
else OptimType.EXACT_SGD,
learning_rate=0.05,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
use_experimental_tbe=use_experimental_tbe,
)
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(
bs[t].weight
if weights_precision != SparseType.INT8
else torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(bs[t].weight)
)
x = torch.cat([x.contiguous().flatten() for x in xs], dim=0)
xw = torch.cat([xw.contiguous().flatten() for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, L, sum(Bs), use_cpu
)
batch_size_per_feature_per_rank = Bs_rank_feature if mixed_B else None
# Run TBE
fc2 = (
cc(
indices,
offsets,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
)
# Compare results: f = baseline, fc2 = TBE
tolerance = (
1.0e-5
if weights_precision == SparseType.FP32 and output_dtype == SparseType.FP32
else 8.0e-3
)
torch.testing.assert_close(
fc2.float(), f.float(), atol=tolerance, rtol=tolerance
)
def test_forward_cpu_int8(
self,
) -> None:
weights_precision = SparseType.INT8
use_cpu = True
T = random.randint(1, 10)
D = random.randint(2, min(256, int(2048 / T)))
B = random.randint(1, min(128, int(2048 / T / D)))
L = random.randint(0, min(20, int(2048 / T / D / B)))
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
)
mixed = False
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
False, # use_experimental_tbe
)
def test_forward_cpu_fp32(
self,
) -> None:
weights_precision = SparseType.FP32
use_cpu = True
T = random.randint(1, 10)
D = random.randint(2, min(256, int(2048 / T)))
B = random.randint(1, min(128, int(2048 / T / D)))
L = random.randint(0, min(20, int(2048 / T / D / B)))
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
)
mixed = False
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
False, # use_experimental_tbe
)
@unittest.skipIf(*gpu_unavailable)
def test_forward_gpu_no_cache_int8(
self,
) -> None:
weights_precision = SparseType.INT8
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
False, # use_experimental_tbe
)
@unittest.skipIf(*gpu_unavailable)
@given(
use_experimental_tbe=st.booleans() if not TEST_WITH_ROCM else st.just(False),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_no_cache_fp16(
self,
use_experimental_tbe: bool,
) -> None:
weights_precision = SparseType.FP16
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
+ ([PoolingMode.NONE] if not use_experimental_tbe else [])
)
if pooling_mode == PoolingMode.NONE:
mixed = False
mixed_B = False
else:
mixed = random.choice([True, False])
mixed_B = (
random.choice([True, False]) if not use_experimental_tbe else False
)
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
use_experimental_tbe,
)
@unittest.skipIf(*gpu_unavailable)
@given(
use_experimental_tbe=st.booleans() if not TEST_WITH_ROCM else st.just(False),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_no_cache_fp32(
self,
use_experimental_tbe: bool,
) -> None:
weights_precision = SparseType.FP32
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
+ ([PoolingMode.NONE] if not use_experimental_tbe else [])
)
if pooling_mode == PoolingMode.NONE:
mixed = False
mixed_B = False
else:
mixed = random.choice([True, False])
mixed_B = (
random.choice([True, False]) if not use_experimental_tbe else False
)
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
SparseType.FP32,
use_experimental_tbe,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(CacheAlgorithm),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_int8(
self,
cache_algorithm: CacheAlgorithm,
) -> None:
weights_precision = SparseType.INT8
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
)
output_dtype = random.choice(
[
SparseType.FP32,
SparseType.FP16,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
output_dtype,
False, # use_experimental_tbe
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_experimental_tbe=st.booleans() if not TEST_WITH_ROCM else st.just(False),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_fp16(
self,
cache_algorithm: CacheAlgorithm,
use_experimental_tbe: bool,
) -> None:
weights_precision = SparseType.FP16
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
+ ([PoolingMode.NONE] if not use_experimental_tbe else [])
)
output_dtype = random.choice(
[
SparseType.FP32,
SparseType.FP16,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
output_dtype,
use_experimental_tbe,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_experimental_tbe=st.booleans() if not TEST_WITH_ROCM else st.just(False),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_fp32(
self,
cache_algorithm: CacheAlgorithm,
use_experimental_tbe: bool,
) -> None:
weights_precision = SparseType.FP32
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
+ ([PoolingMode.NONE] if not use_experimental_tbe else [])
)
output_dtype = random.choice(
[
SparseType.FP32,
SparseType.FP16,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
mixed_B = False
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
output_dtype,
use_experimental_tbe,
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
output_dtype=st.sampled_from([SparseType.FP16, SparseType.INT8]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much],
)
def test_forward_fused_pooled_emb_quant(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
output_dtype: SparseType,
) -> None:
Ds = [
round_up(np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)), 4)
for _ in range(T)
]
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
op = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
EmbeddingLocation.DEVICE,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
output_dtype=output_dtype,
device=torch.cuda.current_device(),
)
op_ref = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
EmbeddingLocation.DEVICE,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
output_dtype=SparseType.FP32,
device=torch.cuda.current_device(),
)
# sync weights between two ops
split_weights = op.split_embedding_weights()
ref_split_weights = op_ref.split_embedding_weights()
for t in range(T):
split_weights[t].data.copy_(ref_split_weights[t])
requests = generate_requests(2, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
lowp_pooled_output = op(
indices=indices,
offsets=offsets,
)
fp32_pooled_output = op_ref(
indices=indices,
offsets=offsets,
)
lowp_pooled_emb_split = [
d + 8 if output_dtype == SparseType.INT8 else d for d in op.dims
]
lowp_pooled_output_per_table = torch.split(
lowp_pooled_output, lowp_pooled_emb_split, dim=1
)
deq_lowp_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(t.contiguous())
if output_dtype == SparseType.INT8
else t.float()
for t in lowp_pooled_output_per_table
]
fp32_pooled_output_per_table = torch.split(
fp32_pooled_output, op.dims, dim=1
)
dq_fp32_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
t.contiguous()
).contiguous()
)
if output_dtype == SparseType.INT8
else t.half().float()
for t in fp32_pooled_output_per_table
]
cat_deq_lowp_pooled_output = torch.cat(
deq_lowp_pooled_output_per_table, dim=1
)
cat_dq_fp32_pooled_output = torch.cat(
dq_fp32_pooled_output_per_table, dim=1
)
torch.testing.assert_close(
cat_deq_lowp_pooled_output, cat_dq_fp32_pooled_output
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
# FIXME: INT2 caused big numerical error for this test
# SparseType.INT2,
]
),
output_dtype=st.sampled_from(
[
SparseType.FP16,
SparseType.BF16,
SparseType.INT8,
# SparseType.INT4,
]
)
if not TEST_WITH_ROCM
else st.sampled_from(
[
SparseType.FP16,
# The counterparts of __nv_bfloat16 and __nv_bfloat162 are not supported on ROCm
SparseType.INT8,
# SparseType.INT4,
]
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much],
)
def test_nbit_forward_fused_pooled_emb_quant(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_ty: SparseType,
output_dtype: SparseType,
) -> None:
D_alignment = max(weights_ty.align_size() for t in range(T))
D_alignment = max(D_alignment, output_dtype.align_size())
D = round_up(D, D_alignment)
# BF16 output only works for CUDA device sm80+ (e.g., A100)
assume(
torch.cuda.is_available()
and torch.cuda.get_device_capability() >= (8, 0)
or not output_dtype == SparseType.BF16
)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Ds = [D] * T
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
weights_ty_list = [weights_ty] * T
managed = [EmbeddingLocation.DEVICE] * T
op = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
output_dtype=output_dtype,
device=torch.cuda.current_device(),
)
# Initialize the random weights for int nbit table split embedding bag
op.fill_random_weights()
op_ref = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
output_dtype=SparseType.FP32,
device=torch.cuda.current_device(),
)
# Initialize the random weights for int nbit table split embedding bag
op_ref.fill_random_weights()
# sync weights between two ops
split_weights = op.split_embedding_weights()
ref_split_weights = op_ref.split_embedding_weights()
for t in range(T):
(weights, scale_shift) = split_weights[t]
(ref_weights, ref_scale_shift) = ref_split_weights[t]
self.assertEqual(weights.size(), ref_weights.size())
element_size = weights_ty_list[t].bit_rate() / 8.0
rand_tensor = torch.rand(
ref_weights.shape[0], int(ref_weights.shape[1] / element_size)
)
rand_weights, rand_scale_shift = quantize_embs(
rand_tensor, weights_ty_list[t]
)
ref_weights.copy_(rand_weights)
weights.copy_(ref_weights)
if rand_scale_shift is not None:
self.assertIsNotNone(scale_shift)
self.assertIsNotNone(ref_scale_shift)
ref_scale_shift.copy_(rand_scale_shift)
scale_shift.copy_(ref_scale_shift)
requests = generate_requests(1, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
lowp_pooled_output = op(
indices=indices.int(),
offsets=offsets.int(),
)
fp32_pooled_output = op_ref(
indices=indices.int(),
offsets=offsets.int(),
)
lowp_pooled_emb_split = [
d + 8 if output_dtype == SparseType.INT8 else d for d in Ds
]
lowp_pooled_output_per_table = torch.split(
lowp_pooled_output, lowp_pooled_emb_split, dim=1
)
deq_lowp_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(t.contiguous())
if output_dtype == SparseType.INT8
else t.float()
for t in lowp_pooled_output_per_table
]
fp32_pooled_output_per_table = torch.split(fp32_pooled_output, Ds, dim=1)
dq_fp32_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
t.contiguous()
).contiguous()
).contiguous()
if output_dtype == SparseType.INT8
else t.half().float()
for t in fp32_pooled_output_per_table
]
cat_deq_lowp_pooled_output = torch.cat(
deq_lowp_pooled_output_per_table, dim=1
)
cat_dq_fp32_pooled_output = torch.cat(
dq_fp32_pooled_output_per_table, dim=1
)
torch.testing.assert_close(
cat_deq_lowp_pooled_output,
cat_dq_fp32_pooled_output,
rtol=1e-2,
atol=1e-2,
equal_nan=True,
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
output_dtype=st.sampled_from(
[
SparseType.FP16,
SparseType.BF16,
SparseType.INT8,
]
)
if not TEST_WITH_ROCM
else st.sampled_from(
[
SparseType.FP16,
# The counterparts of __nv_bfloat16 and __nv_bfloat162 are not supported on ROCm
SparseType.INT8,
]
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much],
)
def test_nbit_split_embedding_weights_with_scale_and_bias(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_ty: SparseType,
output_dtype: SparseType,
) -> None:
D_alignment = max(weights_ty.align_size() for t in range(T))
D_alignment = max(D_alignment, output_dtype.align_size())
D = round_up(D, D_alignment)
# BF16 output only works for CUDA device sm80+ (e.g., A100)
assume(
torch.cuda.is_available()
and torch.cuda.get_device_capability() >= (8, 0)
or not output_dtype == SparseType.BF16
)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Ds = [D] * T
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
weights_ty_list = [weights_ty] * T
managed = [EmbeddingLocation.DEVICE] * T
op = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
output_dtype=output_dtype,
device=torch.cuda.current_device(),
)
# Initialize the random weights for int nbit table split embedding bag
op.fill_random_weights()
# sync weights between two ops
split_weights = op.split_embedding_weights()
split_weights_with_scale_bias = op.split_embedding_weights_with_scale_bias(
split_scale_bias_mode=2
)
for t in range(T):
(weights, scale_bias) = split_weights[t]
(weights2, scale, bias) = split_weights_with_scale_bias[t]
torch.testing.assert_close(weights2, weights)
if scale is None:
self.assertIsNone(scale_bias)
self.assertIsNone(bias)
else:
torch.testing.assert_close(
scale.cpu(),
torch.tensor(
scale_bias[:, : scale_bias.size(1) // 2]
.contiguous()
.cpu()
.numpy()
.view(np.float16)
),
)
torch.testing.assert_close(
bias.cpu(),
torch.tensor(
scale_bias[:, scale_bias.size(1) // 2 :]
.contiguous()
.cpu()
.numpy()
.view(np.float16)
),
)
@given(
T=st.integers(min_value=1, max_value=3),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=32),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=10),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
mixed=st.booleans(),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=10,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_dense( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 2048)
assume(pooling_mode == PoolingMode.SUM or not weighted)
assume(not (use_cpu and weights_precision == SparseType.FP16))
# No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
emb_op = DenseTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2 * E)) for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=False), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=False), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(e), size=(B, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for e in Es
]
if long_segments and L > 0 and weights_precision != SparseType.FP16:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16:
xws = [xw.half() for xw in xws]
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# pyre-fixme[16]: `Optional` has no attribute `view`.
grad_weights = torch.cat([b.weight.grad.view(-1) for b in bs])
if weights_precision == SparseType.FP16 and not use_cpu:
grad_weights = grad_weights.half()
cc = emb_op(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
pooling_mode=pooling_mode,
use_cpu=use_cpu,
weights_precision=weights_precision,
output_dtype=output_dtype,
)
if do_pooling:
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
if do_pooling:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=5.0e-3
if weights_precision == SparseType.FP16 or output_dtype == SparseType.FP16
else 1.0e-5,
rtol=5.0e-3
if weights_precision == SparseType.FP16 or output_dtype == SparseType.FP16
else 1.0e-5,
)
if do_pooling:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
fc2.backward(goc)
torch.testing.assert_close(
cc.weights.grad,
grad_weights,
atol=5.0e-3
if weights_precision == SparseType.FP16 or output_dtype == SparseType.FP16
else 1.0e-4,
rtol=5.0e-3
if weights_precision == SparseType.FP16 or output_dtype == SparseType.FP16
else 1.0e-4,
)
cc = DenseTableBatchedEmbeddingBagsCodegen(
[(E, D) for (E, D) in zip(Es, Ds)],
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=PoolingMode.SUM,
use_cpu=use_cpu,
)
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
# NOTE: GPU version of DenseTableBatchedEmbeddingBagsCodegen doesn't support double.
cc = cc.double()
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
y = cc(indices, offsets, per_sample_weights)
y.sum().backward()
# pyre-fixme[16]: `Optional` has no attribute `clone`.
indice_weight_grad_all = per_sample_weights.grad.clone().cpu()
T_ = len(xws)
feature_requires_grad = to_device(
torch.tensor(np.random.choice([0, 1], replace=True, size=(T_,))).int(),
use_cpu,
)
per_sample_weights = per_sample_weights.detach().clone()
per_sample_weights.requires_grad = True
y = cc(
indices,
offsets,
per_sample_weights,
feature_requires_grad=feature_requires_grad,
)
y.sum().backward()
indice_weight_grad_mask = per_sample_weights.grad.clone().cpu()
for t in range(T_):
if feature_requires_grad[t]:
torch.testing.assert_close(
indice_weight_grad_mask.view(T_, B, L)[t],
indice_weight_grad_all.view(T_, B, L)[t],
)
else:
torch.testing.assert_close(
indice_weight_grad_mask.view(T_, B, L)[t],
torch.zeros_like(indice_weight_grad_mask.view(T_, B, L)[t]),
)
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
# NOTE: GPU version of DenseTableBatchedEmbeddingBagsCodegen doesn't support double.
cc = cc.double()
per_sample_weights = per_sample_weights.double()
else:
cc = cc.float()
per_sample_weights = per_sample_weights.float()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(
cc, (indices, offsets, per_sample_weights), eps=1e-2, atol=1e-3, rtol=1e-3
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
output_dtype=st.sampled_from([SparseType.FP16, SparseType.FP32]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_none(self, **kwargs: Any) -> None:
self.execute_backward_none_(**kwargs)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
output_dtype=st.sampled_from([SparseType.FP16, SparseType.FP32]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_none_with_rowwise_adagrad(self, **kwargs: Any) -> None:
self.execute_backward_none_(optimizer=OptimType.EXACT_ROWWISE_ADAGRAD, **kwargs)
def execute_backward_none_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
long_segments: bool,
pooling_mode: PoolingMode,
output_dtype: SparseType,
optimizer: Optional[OptimType] = None,
) -> None:
use_cpu = False
mixed = False
use_cache = False
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(not (use_cpu and weights_precision == SparseType.FP16))
# No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
assume(pooling_mode == PoolingMode.SUM or not weighted)
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
elif use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(Es[t]), size=(B, L)).astype(np.int64)
),
use_cpu,
)
for t in feature_table_map
]
if long_segments and L > 0:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(len(xs))]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16:
xws = [xw.half() for xw in xws]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=use_cpu)
embedding_specs = [
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
]
# Hyperparameters in case optimizer is not None
lr = 0.5
eps = 0.2
stochastic_rounding = random.choice([True, False])
if optimizer is None:
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos: Union[List[Tensor], Tensor] = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
else:
bs_ = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=embedding_specs,
optimizer=optimizer,
feature_table_map=feature_table_map,
weights_precision=weights_precision,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
learning_rate=lr,
eps=eps,
stochastic_rounding=stochastic_rounding,
)
for t in range(T):
bs_.split_embedding_weights()[t].data.copy_(bs[t].weight)
fs = (
bs_(indices, offsets)
if not weighted
else bs_(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
)
)
gos: Union[List[Tensor], Tensor] = torch.rand_like(fs)
fs.backward(gos)
cc = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=embedding_specs,
optimizer=OptimType.NONE,
feature_table_map=feature_table_map,
weights_precision=weights_precision,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
total_unique_indices = 0
# Compute number of unique indices
for t in range(len(feature_table_map)):
start = offsets[t * B]
end = offsets[(t + 1) * B]
uniq_indices = indices[start:end].unique()
total_unique_indices += uniq_indices.numel()
fc2 = (
cc(indices, offsets, total_unique_indices=total_unique_indices)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
total_unique_indices=total_unique_indices,
)
)
if optimizer is None:
assert type(gos) is list
if do_pooling:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
else:
assert type(gos) is Tensor
goc = gos.clone()
fc2.backward(goc)
if optimizer is not None:
params = SplitEmbeddingOptimizerParams(weights_dev=cc.weights_dev)
embedding_args = SplitEmbeddingArgs(
weights_placements=cc.weights_placements,
weights_offsets=cc.weights_offsets,
max_D=cc.max_D,
)
optim = SplitEmbeddingRowwiseAdagrad(
params,
embedding_args,
embedding_specs,
feature_table_map,
learning_rate=lr,
eps=eps,
stochastic_rounding=stochastic_rounding,
)
optim.step()
if use_cache:
cc.flush()
if optimizer is None:
test_tensor = cc.weights_dev.grad
weight_grads = []
for t in range(T):
grad = bs[t].weight.grad
# Check grad to suppress pyre error
assert grad is not None
weight_grads.append(grad)
ref_grad = torch.concat(weight_grads, dim=0).to_sparse().coalesce()
ref_tensor = (
ref_grad.half() if weights_precision == SparseType.FP16 else ref_grad
)
else:
indices = cc.weights_dev.grad._indices().flatten()
# Select only the part in the table that is updated
test_tensor = torch.index_select(cc.weights_dev.view(-1, D), 0, indices)
ref_tensor = torch.index_select(bs_.weights_dev.view(-1, D), 0, indices)
tolerance = (
1.0e-2
if long_segments
else (
1.0e-4
if weights_precision == SparseType.FP32
and output_dtype == SparseType.FP32
else 1.0e-2
)
)
torch.testing.assert_close(
test_tensor,
ref_tensor,
atol=tolerance,
rtol=tolerance,
)
def execute_backward_sgd_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(not (use_cpu and weights_precision == SparseType.FP16))
# No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
assume(pooling_mode == PoolingMode.SUM or not weighted)
# TODO: Support these cases
assume(
not mixed_B
or (
weights_precision != SparseType.INT8
and output_dtype != SparseType.INT8
and not use_cpu
and not use_cache
and pooling_mode != PoolingMode.NONE
)
)
emb_op = SplitTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if not mixed_B:
Bs = [B] * T
else:
low = max(int(0.25 * B), 1)
high = int(B)
if low == high:
Bs = [B] * T
else:
Bs = [np.random.randint(low=low, high=high) for _ in range(T)]
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
elif use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
table_to_replicate = T // 2
# pyre-fixme[6]: For 2nd param expected `Embedding` but got
# `Union[Embedding, EmbeddingBag]`.
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
num_features = len(feature_table_map)
if not mixed_B:
Bs = [B] * num_features
Bs_rank_feature = [[0]]
else:
Bs_rank_feature, Bs = gen_mixed_B_batch_sizes(B, num_features)
# Generate indices
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(Es[t]), size=(b, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for t, b in zip(feature_table_map, Bs)
]
if long_segments and L > 0:
for x in xs:
x[:, 0] = 0
# Generate positional weights
xws = [to_device(torch.randn(size=(b, L)), use_cpu) for b in Bs]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16:
xws = [xw.half() for xw in xws]
# Run baseline's forward
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
# Generate gradients
gos = [torch.randn_like(f) for f in fs]
# Run baseline's backward
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.05
del bs[table_to_replicate]
# pyre-fixme[58]: `*` is not supported for operand types
# `Optional[torch._tensor.Tensor]` and `float`.
new_weights = [(b.weight - b.weight.grad * lr) for b in bs]
# Create a TBE op
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
optimizer=OptimType.EXACT_SGD,
feature_table_map=feature_table_map,
learning_rate=lr,
weights_precision=weights_precision,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.contiguous().flatten() for x in xs], dim=0)
xw = torch.cat([xw.contiguous().flatten() for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, L, sum(Bs), use_cpu=use_cpu
)
batch_size_per_feature_per_rank = Bs_rank_feature if mixed_B else None
# Run TBE's forward
fc2 = (
cc(
indices,
offsets,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
)
# Generate gradients
if do_pooling:
if mixed_B:
goc = format_ref_tensors_in_mixed_B_layout(gos, Bs_rank_feature)
else:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
# Run TBE's backward
fc2.backward(goc)
if use_cache:
cc.flush()
for t in range(T):
torch.testing.assert_close(
cc.split_embedding_weights()[t],
new_weights[t].half()
if weights_precision == SparseType.FP16 and not use_cpu
else new_weights[t],
atol=1.0e-2
if long_segments
else (5.0e-3 if weights_precision == SparseType.FP16 else 1.0e-5),
rtol=1.0e-1
if long_segments
else (2.0e-2 if weights_precision == SparseType.FP16 else 1.0e-5),
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_sgd( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_sgd_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
mixed_B if not use_cpu else False,
use_cache,
cache_algorithm,
long_segments,
pooling_mode,
use_cpu,
SparseType.FP32, # output_dtype
)
@given(
D=st.integers(min_value=2, max_value=10),
# 128 * 1024 is to exercise a case num_ctas_for_run needs to be capped
# at the number of SMs (H100 SXM5 has 132 SMs and the default seglen
# per CTA is 1024)
B=st.sampled_from([1152, 256 * 1024]),
L=st.integers(min_value=1, max_value=4),
weighted=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_sgd_really_long_segments( # noqa C901
self,
D: int,
B: int,
L: int,
weighted: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
) -> None:
self.execute_backward_sgd_(
2, # T
D,
B,
1, # log_E,
L,
SparseType.FP32, # weights_precision
weighted,
mixed,
mixed_B,
use_cache,
cache_algorithm,
True, # long_segments
PoolingMode.SUM, # pooling_mode
False, # use_cpu
SparseType.FP32, # output_dtype
)
def execute_backward_adagrad_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
pooling_mode: PoolingMode,
use_cpu: bool,
output_dtype: SparseType,
weight_decay_mode: WeightDecayMode = WeightDecayMode.NONE,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 1024)
assume(not (use_cpu and weights_precision == SparseType.FP16))
assume(
pooling_mode == PoolingMode.SUM or not weighted
) # No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
# TODO: Support these cases
assume(
not mixed_B
or (
weights_precision != SparseType.INT8
and output_dtype != SparseType.INT8
and not use_cpu
and not use_cache
and pooling_mode != PoolingMode.NONE
)
)
emb_op = SplitTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
# stochastic rounding only implemented for rowwise
assume(not stochastic_rounding or row_wise)
# only row-wise supports caching
assume(row_wise or not use_cache)
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if not mixed_B:
Bs = [B] * T
else:
low = max(int(0.25 * B), 1)
high = int(B)
if low == high:
Bs = [B] * T
else:
Bs = [np.random.randint(low=low, high=high) for _ in range(T)]
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
elif use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16:
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
# autograd with shared embedding only works for exact
table_to_replicate = T // 2
# pyre-fixme[6]: For 2nd param expected `Embedding` but got
# `Union[Embedding, EmbeddingBag]`.
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
num_features = len(feature_table_map)
if not mixed_B:
Bs = [B] * num_features
Bs_rank_feature = [[0]]
else:
Bs_rank_feature, Bs = gen_mixed_B_batch_sizes(B, num_features)
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(Es[t]), size=(b, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for t, b in zip(feature_table_map, Bs)
]
xws = [to_device(torch.randn(size=(b, L)), use_cpu) for b in Bs]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16 and not use_cpu:
xws = [xw.half() for xw in xws]
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.5
eps = 0.2
optimizer = (
OptimType.EXACT_ROWWISE_ADAGRAD if row_wise else OptimType.EXACT_ADAGRAD
)
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
feature_table_map=feature_table_map,
optimizer=optimizer,
learning_rate=lr,
eps=eps,
weights_precision=weights_precision,
stochastic_rounding=stochastic_rounding,
pooling_mode=pooling_mode,
output_dtype=output_dtype,
)
del bs[table_to_replicate]
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.contiguous().flatten() for x in xs], dim=0)
xw = torch.cat([xw.contiguous().flatten() for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, L, sum(Bs), use_cpu=use_cpu
)
batch_size_per_feature_per_rank = Bs_rank_feature if mixed_B else None
fc2 = (
cc(
indices,
offsets,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
)
if do_pooling:
if mixed_B:
goc = format_ref_tensors_in_mixed_B_layout(gos, Bs_rank_feature)
else:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
fc2.backward(goc)
cc.flush()
split_optimizer_states = cc.split_optimizer_states()
assert len(split_optimizer_states) == T
get_optimizer_states = None
if row_wise:
# get_optimizer_state should/must be implemented for rowwise
get_optimizer_states = cc.get_optimizer_state()
assert len(get_optimizer_states) == T
tolerance = (
1.0e-4
if weights_precision == SparseType.FP32 and output_dtype == SparseType.FP32
else 1.0e-2
)
for t in range(T):
expected_keys = {"sum"}
if row_wise and weight_decay_mode == WeightDecayMode.COUNTER:
(m1, c1, c2) = split_optimizer_states[t]
expected_keys.update(
[
"prev_iter",
"row_counter",
]
)
else:
(m1,) = split_optimizer_states[t]
if get_optimizer_states is not None:
optimizer_states_dict = get_optimizer_states[t]
assert set(optimizer_states_dict.keys()) == expected_keys
# pyre-fixme[16]: `Optional` has no attribute `float`.
ref_optimizer_state = bs[t].weight.grad.float().cpu().to_dense().pow(2)
torch.testing.assert_close(
m1.float().cpu(),
ref_optimizer_state.mean(dim=1) if row_wise else ref_optimizer_state,
atol=tolerance,
rtol=tolerance,
)
for t in range(T):
# optimizer_state = squares (no row-wise) or sum squares (row-wise)
if row_wise and weight_decay_mode == WeightDecayMode.COUNTER:
(m1, c1, c2) = split_optimizer_states[t]
else:
(m1,) = split_optimizer_states[t]
torch.testing.assert_close(
cc.split_embedding_weights()[t].float().cpu(),
torch.addcdiv(
bs[t].weight.float().cpu(),
value=-lr,
tensor1=bs[t].weight.grad.float().cpu().to_dense(),
tensor2=m1.float()
.sqrt_()
.add_(eps)
.view(Es[t], 1 if row_wise else Ds[t])
.cpu(),
),
atol=tolerance,
rtol=tolerance,
)
if use_cpu:
D_gradcheck = (D_gradcheck + 15) // 16 * 4
else:
D_gradcheck = D_gradcheck * 4
cc = emb_op(
embedding_specs=[
(E, D_gradcheck, M, compute_device) for (E, M) in zip(Es, managed)
],
feature_table_map=feature_table_map,
optimizer=optimizer,
learning_rate=0.0,
eps=eps,
weights_precision=weights_precision,
stochastic_rounding=stochastic_rounding,
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=PoolingMode.SUM,
output_dtype=output_dtype,
)
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
# NOTE: GPU version of SplitTableBatchedEmbeddingBagsCodegen doesn't support double.
cc = cc.double()
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(
cc,
(
indices,
offsets,
per_sample_weights,
None,
batch_size_per_feature_per_rank,
),
)
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
y = cc(
indices,
offsets,
per_sample_weights,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
y.sum().backward()
# pyre-fixme[16]: `Optional` has no attribute `clone`.
indice_weight_grad_all = per_sample_weights.grad.clone().cpu()
T_ = len(xws)
feature_requires_grad = to_device(
torch.tensor(np.random.choice([0, 1], replace=True, size=(T_,))).int(),
use_cpu,
)
per_sample_weights = per_sample_weights.detach().clone()
per_sample_weights.requires_grad = True
y = cc(
indices,
offsets,
per_sample_weights,
feature_requires_grad=feature_requires_grad,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
y.sum().backward()
indice_weight_grad_mask = per_sample_weights.grad.clone().cpu()
torch.cuda.synchronize()
acc_B = 0
for t in range(T_):
B = Bs[t]
table_indice_weight_grad_mask = indice_weight_grad_mask[
acc_B : acc_B + B * L
]
table_indice_weight_grad_all = indice_weight_grad_all[acc_B : acc_B + B * L]
acc_B += B * L
if feature_requires_grad[t]:
torch.testing.assert_close(
table_indice_weight_grad_mask,
table_indice_weight_grad_all,
)
else:
torch.testing.assert_close(
table_indice_weight_grad_mask,
torch.zeros_like(table_indice_weight_grad_mask),
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmSUM( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# VBE is supported in rowwise_adagrad only
if not row_wise:
mixed_B = False
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
mixed_B,
use_cache,
cache_algorithm,
PoolingMode.SUM,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmMEAN( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# VBE is supported in rowwise_adagrad only
if not row_wise:
mixed_B = False
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
mixed_B,
use_cache,
cache_algorithm,
PoolingMode.MEAN,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmNONE( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
False, # mixed_B
use_cache,
cache_algorithm,
PoolingMode.NONE,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmSUM( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# VBE is supported in rowwise_adagrad only
if not row_wise:
mixed_B = False
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
mixed_B,
use_cache,
cache_algorithm,
PoolingMode.SUM,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmMEAN( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
mixed_B: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
# VBE is supported in rowwise_adagrad only
if not row_wise:
mixed_B = False
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
mixed_B,
use_cache,
cache_algorithm,
PoolingMode.MEAN,
use_cpu,
output_dtype,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmNONE( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
output_dtype: SparseType,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
False, # mixed_B
use_cache,
cache_algorithm,
PoolingMode.NONE,
use_cpu,
output_dtype,
)
def _generate_cache_tbes(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
cache_algorithm: CacheAlgorithm = CacheAlgorithm.LRU,
prefetch_pipeline: bool = False,
use_int_weight: bool = False,
) -> Tuple[
SplitTableBatchedEmbeddingBagsCodegen,
SplitTableBatchedEmbeddingBagsCodegen,
int,
int,
]:
lr = 1.0 if use_int_weight else 0.02
E = int(10**log_E)
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = EmbeddingLocation.DEVICE if d < average_D else managed[t]
cc_ref = SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
D,
EmbeddingLocation.DEVICE,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
stochastic_rounding=False,
prefetch_pipeline=False,
learning_rate=lr,
)
cc = SplitTableBatchedEmbeddingBagsCodegen(
[(E, D, M, ComputeDevice.CUDA) for (E, D, M) in zip(Es, Ds, managed)],
cache_algorithm=cache_algorithm,
stochastic_rounding=False,
prefetch_pipeline=prefetch_pipeline,
learning_rate=lr,
)
if use_int_weight:
min_val = -20
max_val = +20
for param in cc_ref.split_embedding_weights():
p = torch.randint(
int(min_val),
int(max_val) + 1,
size=param.shape,
device=param.device,
)
param.data.copy_(p)
for t in range(T):
self.assertEqual(
cc.split_embedding_weights()[t].size(),
cc_ref.split_embedding_weights()[t].size(),
)
cc.split_embedding_weights()[t].data.copy_(
cc_ref.split_embedding_weights()[t]
)
return (cc, cc_ref, min(Es), sum(Ds))
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
cache_algorithm=st.sampled_from(CacheAlgorithm),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_pipeline(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
cache_algorithm: CacheAlgorithm,
) -> None:
cc, cc_ref, min_Es, sum_Ds = self._generate_cache_tbes(
T, D, B, log_E, L, mixed, cache_algorithm
)
iters = 3
requests = generate_requests(iters, B, T, L, min_Es, reuse=0.1)
grad_output = torch.randn(B, sum_Ds).cuda()
for indices, offsets, _ in requests:
output = cc(indices, offsets)
output_ref = cc_ref(indices, offsets)
torch.testing.assert_close(output, output_ref)
output.backward(grad_output)
output_ref.backward(grad_output)
cc.flush()
for t in range(T):
torch.testing.assert_close(
cc.split_embedding_weights()[t], cc_ref.split_embedding_weights()[t]
)
def _test_cache_prefetch_pipeline( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
prefetch_location: str,
prefetch_stream: Optional[torch.cuda.Stream],
) -> None:
"""
test cache prefetch pipeline with prefetch_pipeline=True.
prefetch_location can be "before_fwd" or "between_fwd_bwd",
where the TBE prefetch(batch_{i+1}) is called before forward(batch_i)
or in between of forward(batch_i) and backward(batch_i), respectively.
If prefetch_stream is not None, the TBE prefetch function will use this stream.
In addition, we make the TBE weights initialized as integer values, learning_rate
as integer value, and gradients as integer values so that the test is more stable.
"""
assert prefetch_location in ["before_fwd", "between_fwd_bwd"]
cc, cc_ref, min_Es, sum_Ds = self._generate_cache_tbes(
T, D, B, log_E, L, mixed, CacheAlgorithm.LRU, True, True
)
iters = 5
requests = generate_requests(iters, B, T, L, min_Es, reuse=0.1)
grad_output = (
torch.randint(
low=-10,
high=10,
size=(B, sum_Ds),
)
.float()
.cuda()
)
torch.cuda.synchronize() # make sure TBEs and inputs are ready
self.assertTrue(torch.all(cc.lxu_cache_locking_counter == 0))
cur_stream: torch.cuda.Stream = torch.cuda.current_stream()
req_iter = iter(requests)
batch_i = next(req_iter)
batch_ip1 = None
output, output_ref = None, None
def _prefetch(
cc: SplitTableBatchedEmbeddingBagsCodegen,
batch: Optional[Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]],
) -> None:
if not batch:
return
context_stream = prefetch_stream if prefetch_stream else cur_stream
stream = cur_stream if prefetch_stream else None
indices, offsets, _ = batch
with torch.cuda.stream(context_stream):
cc.prefetch(indices, offsets, stream)
_prefetch(cc, batch_i)
while batch_i:
indices, offsets, _ = batch_i
batch_ip1 = next(req_iter, None)
if prefetch_stream:
cur_stream.wait_stream(prefetch_stream)
if prefetch_location == "before_fwd":
_prefetch(cc, batch_ip1)
output = cc(indices, offsets)
if prefetch_location == "between_fwd_bwd":
_prefetch(cc, batch_ip1)
output.backward(grad_output)
batch_i = batch_ip1
batch_ip1 = None
cc.flush()
for indices, offsets, _ in requests:
output_ref = cc_ref(indices, offsets)
output_ref.backward(grad_output)
for t in range(T):
torch.testing.assert_close(
cc.split_embedding_weights()[t], cc_ref.split_embedding_weights()[t]
)
torch.testing.assert_close(output, output_ref)
self.assertTrue(torch.all(cc.lxu_cache_locking_counter == 0))
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
prefetch_location=st.sampled_from(["before_fwd", "between_fwd_bwd"]),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_prefetch_pipeline(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
prefetch_location: str,
) -> None:
self._test_cache_prefetch_pipeline(
T,
D,
B,
log_E,
L,
mixed,
prefetch_location,
prefetch_stream=None,
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_prefetch_pipeline_stream_1(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
) -> None:
self._test_cache_prefetch_pipeline(
T,
D,
B,
log_E,
L,
mixed,
prefetch_location="before_fwd",
prefetch_stream=torch.cuda.Stream(),
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_prefetch_pipeline_stream_2(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
) -> None:
self._test_cache_prefetch_pipeline(
T,
D,
B,
log_E,
L,
mixed,
prefetch_location="between_fwd_bwd",
prefetch_stream=torch.cuda.Stream(),
)
def execute_backward_optimizers_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
mixed_B: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
weight_decay_mode: WeightDecayMode = WeightDecayMode.L2,
uvm_non_rowwise_momentum: bool = False,
) -> None:
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
not use_cpu
or optimizer
in [
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_SGD,
]
)
assume(pooling_mode == PoolingMode.SUM or not weighted)
# No bag ops only work on GPUs, no mixed, no weighted
assume(not use_cpu or pooling_mode != PoolingMode.NONE)
assume(not mixed or pooling_mode != PoolingMode.NONE)
assume(not weighted or pooling_mode != PoolingMode.NONE)
assume(not mixed_B or (not use_cpu and pooling_mode != PoolingMode.NONE))
emb_op = SplitTableBatchedEmbeddingBagsCodegen
if pooling_mode == PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10**log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if not mixed_B:
Bs = [B] * T
Bs_rank_feature = [[0]]
else:
Bs_rank_feature, Bs = gen_mixed_B_batch_sizes(B, T)
compute_device = ComputeDevice.CUDA
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
compute_device = ComputeDevice.CPU
elif TEST_WITH_ROCM:
# ROCm managed memory allocation is under development
managed = [EmbeddingLocation.DEVICE] * T
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(e), size=(b, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for (e, b) in zip(Es, Bs)
]
if long_segments and L > 0:
for x, e in zip(xs, Es):
x[:, 0] = np.random.randint(low=0, high=e)
xws = [to_device(torch.randn(size=(b, L)), use_cpu) for b in Bs]
xws_acc_type = copy.deepcopy(xws)
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
optimizer_kwargs = {"learning_rate": 0.5}
(lr, eps, beta1, beta2, weight_decay, momentum, eta) = (
0.5,
1e-4,
0.9,
0.99,
0.01,
0.9,
0.01,
)
counter_based_regularization: CounterBasedRegularizationDefinition
if optimizer == OptimType.EXACT_ADAGRAD:
optimizer_kwargs["eps"] = eps
if optimizer == OptimType.EXACT_ROWWISE_ADAGRAD:
optimizer_kwargs["eps"] = eps
optimizer_kwargs["weight_decay"] = weight_decay
optimizer_kwargs["weight_decay_mode"] = weight_decay_mode
if weight_decay_mode == WeightDecayMode.COUNTER:
counter_based_regularization = CounterBasedRegularizationDefinition(
counter_weight_decay_mode=CounterWeightDecayMode.DECOUPLE,
counter_halflife=20000,
adjustment_iter=24000,
adjustment_ub=0.1,
learning_rate_mode=LearningRateMode.TAIL_ID_LR_DECREASE,
grad_sum_decay=GradSumDecay.NO_DECAY,
tail_id_threshold=TailIdThreshold(val=1000, is_ratio=False),
)
optimizer_kwargs[
"counter_based_regularization"
# pyre-fixme[6]: Expected `float` for 2nd param but got `CounterBasedRegularizationDefinition`.
] = counter_based_regularization
if optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD:
optimizer_kwargs["eps"] = eps
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.ADAM):
optimizer_kwargs["eps"] = eps
optimizer_kwargs["beta1"] = beta1
optimizer_kwargs["beta2"] = beta2
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer in (OptimType.PARTIAL_ROWWISE_LAMB, OptimType.LAMB):
optimizer_kwargs["eps"] = eps
optimizer_kwargs["beta1"] = beta1
optimizer_kwargs["beta2"] = beta2
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer == OptimType.LARS_SGD:
optimizer_kwargs["weight_decay"] = weight_decay
optimizer_kwargs["momentum"] = momentum
optimizer_kwargs["eta"] = eta
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
optimizer=optimizer,
pooling_mode=pooling_mode,
uvm_non_rowwise_momentum=uvm_non_rowwise_momentum,
# pyre-fixme[6]: Expected `CacheAlgorithm` for 5th param but got `float`.
**optimizer_kwargs,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.contiguous().flatten() for x in xs], dim=0)
xw = torch.cat([xw.contiguous().flatten() for xw in xws_acc_type], dim=0)
batch_size_per_feature_per_rank = Bs_rank_feature if mixed_B else None
(indices, offsets) = get_table_batched_offsets_from_dense(
x, L, sum(Bs), use_cpu=use_cpu
)
fc2 = (
cc(
indices,
offsets,
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
if not weighted
else cc(
indices,
offsets,
to_device(xw.contiguous().view(-1), use_cpu),
batch_size_per_feature_per_rank=batch_size_per_feature_per_rank,
)
)
if do_pooling:
if mixed_B:
goc = format_ref_tensors_in_mixed_B_layout(gos, Bs_rank_feature)
else:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0)
fc2.backward(goc)
cc.flush()
split_optimizer_states = cc.split_optimizer_states()
self.assertEqual(len(split_optimizer_states), T)
split_weights = cc.split_embedding_weights()
get_optimizer_states = None
try:
get_optimizer_states = cc.get_optimizer_state()
assert len(get_optimizer_states) == T
except NotImplementedError:
assert optimizer not in (
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
OptimType.EXACT_SGD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
OptimType.EXACT_ADAGRAD,
)
if optimizer in (OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.EXACT_ADAGRAD):
rowwise = optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
for t in range(T):
row_counter: Optional[torch.Tensor] = None
freq: Optional[torch.Tensor] = None
iter_: int = -1
if rowwise and weight_decay_mode == WeightDecayMode.COUNTER:
(m1, prev_iter, row_counter) = split_optimizer_states[t]
else:
(m1,) = split_optimizer_states[t]
# to_dense in GPU is non-deterministic due to atmomics used in
# coalescing and floating point non-associativity.
# pyre-fixme[16]: `Optional` has no attribute `cpu`.
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
if rowwise and not use_cpu:
# We need to skip when using cpu because use_fbgemm (https://fburl.com/code/12131iub)
# is true and the template code (https://fburl.com/code/1kctlup3) is not executed.
if weight_decay_mode == WeightDecayMode.L2:
dense_cpu_grad += weight_decay * bs[t].weight.cpu()
elif weight_decay_mode == WeightDecayMode.COUNTER:
iter_ = int(cc.iter.item())
(
dense_cpu_grad,
row_counter,
freq,
) = self.get_grad_from_counter_adagrad(
dense_cpu_grad,
bs[t].weight.cpu(),
counter_based_regularization,
row_counter.cpu(),
prev_iter.cpu(),
iter_,
weight_decay,
)
m1_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
)
torch.testing.assert_close(
m1.float().index_select(dim=0, index=xs[t].view(-1)).cpu(),
m1_ref.float().index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
denom = (
torch.sqrt(
m1_ref if not rowwise else m1_ref.view(m1_ref.numel(), 1)
)
+ eps
)
if rowwise and not use_cpu:
if weight_decay_mode == WeightDecayMode.DECOUPLE:
weights_ref = bs[t].weight.cpu() - lr * (
dense_cpu_grad / denom + weight_decay * bs[t].weight.cpu()
)
elif weight_decay_mode == WeightDecayMode.L2:
# pyre-fixme[58]: `/` is not supported for operand types `float`
# and `Tensor`.
weights_ref = bs[t].weight.cpu() - lr * dense_cpu_grad / denom
elif weight_decay_mode == WeightDecayMode.COUNTER:
max_counter = cc.max_counter.item()
weights_ref = self.get_wts_from_counter_adagrad(
dense_cpu_grad,
bs[t].weight.cpu(),
denom,
counter_based_regularization,
row_counter,
# pyre-fixme[6]: Expected `Tensor` for 6th param but got `Optional[Tensor]`
freq,
max_counter,
iter_,
eps,
lr,
weight_decay,
)
else:
# pyre-fixme[58]: `/` is not supported for operand types `float`
# and `Tensor`.
weights_ref = bs[t].weight.cpu() - lr * dense_cpu_grad / denom
# TODO: why is tolerance off here?
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-2,
rtol=1.0e-2,
)
optimizer_states_dict = get_optimizer_states[t]
expected_keys = {"sum"}
if rowwise and weight_decay_mode == WeightDecayMode.COUNTER:
expected_keys.update(["prev_iter", "row_counter"])
assert set(optimizer_states_dict.keys()) == expected_keys
if optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD:
for t in range(T):
(m1,) = split_optimizer_states[t]
# to_dense in GPU is non-deterministic due to atmomics used in
# coalescing and floating point non-associativity.
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
dense_cpu_grad += weight_decay * bs[t].weight.cpu()
iter_ = cc.iter.item()
lambda_ = (iter_ + 1) ** 0.5
m1_ref = dense_cpu_grad.pow(2).mean(dim=1)
m1_ref *= lambda_
torch.testing.assert_close(
m1.float().index_select(dim=0, index=xs[t].view(-1)).cpu(),
m1_ref.float().index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - lr * lambda_ * dense_cpu_grad / (
# pyre-fixme[58]: `/` is not supported for operand types `float`
# and `Tensor`.
torch.pow(m1_ref.view(m1_ref.numel(), 1), 1.0 / 3)
+ eps
)
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
if get_optimizer_states is not None:
optimizer_states_dict = get_optimizer_states[t]
assert set(optimizer_states_dict.keys()) == {"sum"}
if optimizer in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.ADAM):
rowwise = optimizer == OptimType.PARTIAL_ROWWISE_ADAM
for t in range(T):
(m1, m2) = split_optimizer_states[t]
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
m2_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
) * (1.0 - beta2)
torch.testing.assert_close(m2.cpu(), m2_ref, atol=1.0e-4, rtol=1.0e-4)
m1_ref = dense_cpu_grad * (1.0 - beta1)
torch.testing.assert_close(m1.cpu(), m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2**iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1**iter_)
weights_new = split_weights[t]
weights_ref = (
torch.addcdiv(
bs[t].weight.cpu(),
value=-lr,
tensor1=m_hat_t,
tensor2=v_hat_t.sqrt_().add_(eps),
)
- lr * weight_decay * bs[t].weight.cpu()
)
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-3,
rtol=1.0e-3,
)
if get_optimizer_states is not None:
optimizer_states_dict = get_optimizer_states[t]
assert set(optimizer_states_dict.keys()) == {
"exp_avg",
"exp_avg_sq",
}
if optimizer in (OptimType.PARTIAL_ROWWISE_LAMB, OptimType.LAMB):
rowwise = optimizer == OptimType.PARTIAL_ROWWISE_LAMB
for t in range(T):
(m1, m2) = split_optimizer_states[t]
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
m2_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
) * (1.0 - beta2)
torch.testing.assert_close(m2.cpu(), m2_ref, atol=1.0e-4, rtol=1.0e-4)
m1_ref = dense_cpu_grad * (1.0 - beta1)
torch.testing.assert_close(m1.cpu(), m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2**iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1**iter_)
rtw = (m_hat_t / (torch.sqrt(v_hat_t) + eps)) + weight_decay * bs[
t
].weight.cpu()
true_ratio = torch.linalg.norm(bs[t].weight, dim=1, ord=2).view(
m1.shape[0], 1
).cpu() / torch.linalg.norm(rtw, dim=1, ord=2).view(m1.shape[0], 1)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - lr * true_ratio * rtw
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-3,
rtol=1.0e-3,
)
if get_optimizer_states is not None:
optimizer_states_dict = get_optimizer_states[t]
assert set(optimizer_states_dict.keys()) == {
"exp_avg",
"exp_avg_sq",
}
if optimizer == OptimType.LARS_SGD:
for t in range(T):
(m1,) = split_optimizer_states[t]
weight_norm = (
torch.linalg.norm(bs[t].weight, dim=1, ord=2)
.view(m1.shape[0], 1)
.cpu()
)
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
grad_norm = torch.linalg.norm(dense_cpu_grad, dim=1, ord=2).view(
m1.shape[0], 1
)
adjusted_lr = (
lr * eta * weight_norm / (grad_norm + weight_decay * weight_norm)
)
m1_ref = adjusted_lr * (
dense_cpu_grad + weight_decay * bs[t].weight.cpu()
)
torch.testing.assert_close(
m1.index_select(dim=0, index=xs[t].view(-1)).cpu(),
# pyre-fixme[16]: `float` has no attribute `index_select`.
m1_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - m1_ref
torch.testing.assert_close(
weights_new.index_select(dim=0, index=xs[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=xs[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
def get_grad_from_counter_adagrad(
self,
dense_cpu_grad: torch.Tensor,
weights: torch.Tensor,
counter_based_regularization: CounterBasedRegularizationDefinition,
row_counter: torch.Tensor,
prev_iter: torch.Tensor,
iter_: int,
weight_decay: float,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
row_counter = row_counter.view(row_counter.numel(), 1)
prev_iter = prev_iter.view(prev_iter.numel(), 1)
freq = torch.ones_like(row_counter)
counter_weight_decay_mode = (
counter_based_regularization.counter_weight_decay_mode
)
counter_halflife = counter_based_regularization.counter_halflife
l2_wd = 1.0 if counter_weight_decay_mode == CounterWeightDecayMode.L2 else 0.0
if counter_halflife > 0:
counter_log_rho = math.log(2.0) / counter_halflife
# if id occurs multiple times in a batch, iter_delta=1
iter_delta = torch.where(prev_iter == 0.0, 1.0, iter_ * 1.0 - prev_iter)
prev_iter = iter_ * torch.ones_like(prev_iter)
row_counter = 1.0 + torch.exp(-iter_delta * counter_log_rho) * row_counter
freq = torch.tensor([counter_halflife]) / row_counter
dense_cpu_grad += l2_wd * freq * weight_decay * weights
return dense_cpu_grad, row_counter, freq
def get_wts_from_counter_adagrad(
self,
dense_cpu_grad: torch.Tensor,
weights: torch.Tensor,
denom: torch.Tensor,
counter_based_regularization: CounterBasedRegularizationDefinition,
row_counter: torch.Tensor,
freq: torch.Tensor,
max_counter: float,
iter_: int,
eps: float,
learning_rate: float,
weight_decay: float,
) -> torch.Tensor:
counter_weight_decay_mode = (
counter_based_regularization.counter_weight_decay_mode
)
counter_halflife = counter_based_regularization.counter_halflife
tail_id_threshold_val = counter_based_regularization.tail_id_threshold.val
if counter_based_regularization.tail_id_threshold.is_ratio:
tail_id_threshold_val = math.floor(tail_id_threshold_val * max_counter)
learning_rate_mode = counter_based_regularization.learning_rate_mode
adjustment_iter = counter_based_regularization.adjustment_iter
adjustment_ub = counter_based_regularization.adjustment_ub
multiplier = torch.tensor([learning_rate]) / denom
adjusted_multiplier = multiplier
exp_reg_correction = torch.ones_like(row_counter)
if counter_halflife > 0:
if adjustment_iter <= 0 or (
adjustment_iter > 0 and iter_ > adjustment_iter
):
if learning_rate_mode == LearningRateMode.TAIL_ID_LR_INCREASE:
adjusted_multiplier = torch.where(
row_counter > tail_id_threshold_val,
multiplier
* torch.maximum(
torch.minimum(
torch.pow(
torch.tensor([max_counter]) / (row_counter + 1.0),
adjustment_ub,
),
torch.Tensor([10.0]),
),
torch.Tensor([1.0]),
),
multiplier,
)
elif learning_rate_mode == LearningRateMode.TAIL_ID_LR_DECREASE:
adjusted_multiplier = torch.where(
row_counter > tail_id_threshold_val,
multiplier
* torch.minimum(
torch.maximum(
torch.pow(
(row_counter + 1.0) / max_counter,
adjustment_ub,
),
torch.Tensor([0.1]),
),
torch.Tensor([1.0]),
),
multiplier,
)
elif learning_rate_mode == LearningRateMode.COUNTER_SGD:
adjusted_multiplier = torch.where(
row_counter > tail_id_threshold_val,
torch.Tensor([learning_rate])
/ (torch.sqrt(adjustment_ub * row_counter) + eps),
multiplier,
)
if counter_weight_decay_mode == CounterWeightDecayMode.DECOUPLE:
exp_reg_correction = 1.0 - freq * weight_decay * learning_rate
elif counter_weight_decay_mode == CounterWeightDecayMode.L2:
exp_reg_correction = 1.0 - freq * weight_decay * multiplier
weights = exp_reg_correction * weights - adjusted_multiplier * dense_cpu_grad
return weights
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
uvm_non_rowwise_momentum=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_adam( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
uvm_non_rowwise_momentum: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
False, # mixed_B
optimizer,
long_segments,
pooling_mode,
use_cpu,
uvm_non_rowwise_momentum=uvm_non_rowwise_momentum,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=2, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
mixed_B=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
weight_decay_mode=st.sampled_from(
[
WeightDecayMode.L2,
WeightDecayMode.DECOUPLE,
# temporarily disabled due to a test error to unblock release
# will fix in a follow-up diff
# WeightDecayMode.COUNTER,
]
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_adagrad( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
mixed_B: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
weight_decay_mode: WeightDecayMode,
) -> None:
if (
pooling_mode == PoolingMode.NONE
or optimizer != OptimType.EXACT_ROWWISE_ADAGRAD
):
mixed_B = False
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
mixed_B,
optimizer,
long_segments,
pooling_mode,
use_cpu,
weight_decay_mode,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_lamb( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
False, # mixed_B
optimizer,
long_segments,
pooling_mode,
use_cpu,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.just(OptimType.LARS_SGD),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_lars( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
False, # mixed_B
optimizer,
long_segments,
pooling_mode,
use_cpu,
)
def execute_nbit_forward_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
pooling_mode: PoolingMode,
weights_ty: SparseType,
use_cache: bool,
cache_algorithm: CacheAlgorithm,
use_cpu: bool,
use_array_for_index_remapping: bool,
do_pruning: bool,
mixed_weights_ty: bool,
output_dtype: SparseType,
) -> None:
# NOTE: weighted operation can be done only for SUM.
assume(pooling_mode == PoolingMode.SUM or not weighted)
assume(not mixed or pooling_mode != PoolingMode.NONE)
mode = "sum"
do_pooling = True
if pooling_mode == PoolingMode.SUM:
mode = "sum"
elif pooling_mode == PoolingMode.MEAN:
mode = "mean"
else:
mode = "sum"
do_pooling = False
E = int(10**log_E)
if not mixed_weights_ty:
weights_ty_list = [weights_ty] * T
else:
weights_ty_list = [
np.random.choice(
[
SparseType.FP32,
SparseType.FP16,
SparseType.FP8,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
for _ in range(T)
]
D_alignment = max(
1 if ty.bit_rate() % 8 == 0 else int(8 / ty.bit_rate())
for ty in weights_ty_list
)
D = round_up(D, D_alignment)
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Ds = [min(D, 128) for D in Ds]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if use_cpu:
managed = [EmbeddingLocation.HOST] * T
elif use_cache:
managed = [
EmbeddingLocation.MANAGED_CACHING,
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
# Fix exponent bias to 7 for now (TODO: Randomize it from a range of integers)
if SparseType.FP8 in weights_ty_list:
fp8_config = FP8QuantizationConfig(random.choice([4, 5]), 7)
has_fp8_weight = True
else:
has_fp8_weight = False
xs = [to_device(torch.randint(low=0, high=e, size=(B, L)), use_cpu) for e in Es]
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
if do_pruning:
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, use_cpu=use_cpu
)
# generate index_remapping
dense_indices = torch.randint(low=0, high=E, size=(T, B, L)).view(-1).int()
original_E = E
current_device = "cpu" if use_cpu else torch.cuda.current_device()
indices = indices.view(-1).int()
offsets = offsets.view(-1).int()
# generate index_remapping done
# Initialize and insert Array index remapping based data structure
index_remappings_array = []
for t in range(T):
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
indice_t = (indices.view(T, B, L))[t].long().view(-1).to(current_device)
dense_indice_t = (
(dense_indices.view(T, B, L))[t].view(-1)
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
.to(current_device)
)
index_remappings_array_t = torch.tensor(
[-1] * original_E,
dtype=torch.int32,
device=current_device,
)
index_remappings_array_t[indice_t] = dense_indice_t
index_remappings_array.append(index_remappings_array_t.cpu())
else:
index_remappings_array = [torch.arange(E, dtype=torch.int32) for E in Es]
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(
x, use_cpu=use_cpu
)
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
pooling_mode=pooling_mode,
index_remapping=index_remappings_array if B != 0 else None,
device="cpu" if use_cpu else torch.cuda.current_device(),
cache_algorithm=cache_algorithm,
use_array_for_index_remapping=use_array_for_index_remapping,
output_dtype=output_dtype,
fp8_exponent_bits=fp8_config.get("exponent_bits")
if has_fp8_weight
else None,
fp8_exponent_bias=fp8_config.get("exponent_bias")
if has_fp8_weight
else None,
)
# Initialize the random weights for int nbit table split embedding bag
cc.fill_random_weights()
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
(weights, scale_shift) = cc.split_embedding_weights()[t]
if scale_shift is not None:
(E, R) = scale_shift.shape
self.assertEqual(R, 4)
if weights_ty_list[t] == SparseType.INT2:
scales = np.random.uniform(0.1, 1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT4:
scales = np.random.uniform(0.01, 0.1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT8:
scales = np.random.uniform(0.001, 0.01, size=(E,)).astype(
np.float16
)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
scale_shift[:, :] = torch.tensor(
np.stack([scales, shifts], axis=1).astype(np.float16).view(np.uint8)
)
fake_quantize_embs(
weights,
scale_shift,
bs[t].weight.detach(),
weights_ty_list[t],
use_cpu=False,
# pyre-fixme[61]: `fp8_config` is undefined, or not always defined.
fp8_config=fp8_config if has_fp8_weight else None,
)
if not use_cpu:
fc2 = (
cc(indices.int(), offsets.int())
if not weighted
else cc(indices.int(), offsets.int(), xw.contiguous().view(-1))
)
else:
cc = cc.cpu()
indices, offsets = indices.cpu(), offsets.cpu()
fc2 = (
cc(indices.int(), offsets.int())
if not weighted
else cc(indices.int(), offsets.int(), xw.contiguous().view(-1).cpu())
)
if do_pooling and B == 0:
self.assertEqual(fc2.size(), (0, cc.total_D))
return
new_indices = []
for t in range(T):
new_indices_t = torch.zeros([B, L], dtype=torch.int32)
for i in range(B):
for j in range(L):
old_index = xs[t][i, j]
new_index = index_remappings_array[t][old_index]
new_indices_t[i][j] = new_index
new_indices.append(new_indices_t)
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, new_indices)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, new_indices, xws)
]
)
if do_pooling:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
torch.testing.assert_close(
fc2.float().cpu(),
f.float().cpu(),
atol=1.0e-2,
rtol=1.0e-2,
)
@given(
nbit_weights_ty=get_nbit_weights_ty(),
use_array_for_index_remapping=st.booleans(),
do_pruning=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
)
def test_nbit_forward_cpu(
self,
nbit_weights_ty: Optional[SparseType],
use_array_for_index_remapping: bool,
do_pruning: bool,
) -> None:
use_cpu = True
T = random.randint(1, 50)
B = random.randint(0, 128)
L = random.randint(0, 32)
D = random.randint(2, 2048)
log_E = random.randint(2, 4)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
)
mixed = random.choice([True, False])
if pooling_mode == PoolingMode.NONE:
nbit_weights_ty = random.choice(
[
SparseType.FP32,
SparseType.FP16,
# CPU sequence embedding does not support FP8/INT4/INT2 yet
# SparseType.FP8,
SparseType.INT8,
# SparseType.INT4,
# SparseType.INT2,
]
)
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
if nbit_weights_ty is None:
# don't care when mixed type is used.
weights_ty: SparseType = SparseType.INT8
mixed_weights_ty = True
else:
weights_ty: SparseType = nbit_weights_ty
mixed_weights_ty = False
output_dtype = random.choice(
(
[SparseType.BF16]
if weights_ty in [SparseType.INT4, SparseType.INT2]
else []
)
+ [SparseType.FP32, SparseType.FP16]
)
self.execute_nbit_forward_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
pooling_mode,
weights_ty,
use_cache,
cache_algorithm,
use_cpu,
use_array_for_index_remapping,
do_pruning,
mixed_weights_ty,
output_dtype,
)
@given(
nbit_weights_ty=get_nbit_weights_ty(),
use_array_for_index_remapping=st.booleans(),
do_pruning=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
)
def test_nbit_forward_cpu_bf16_out(
self,
nbit_weights_ty: Optional[SparseType],
use_array_for_index_remapping: bool,
do_pruning: bool,
) -> None:
use_cpu = True
T = random.randint(1, 50)
B = random.randint(0, 128)
L = random.randint(0, 32)
D = random.randint(2, 2048)
log_E = random.randint(2, 4)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
]
)
mixed = random.choice([True, False])
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
if nbit_weights_ty is None:
# don't care when mixed type is used.
weights_ty: SparseType = SparseType.INT8
mixed_weights_ty = True
else:
weights_ty: SparseType = nbit_weights_ty
mixed_weights_ty = False
output_dtype = SparseType.BF16
self.execute_nbit_forward_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
pooling_mode,
weights_ty,
use_cache,
cache_algorithm,
use_cpu,
use_array_for_index_remapping,
do_pruning,
mixed_weights_ty,
output_dtype,
)
@unittest.skipIf(*gpu_unavailable)
@given(
nbit_weights_ty=get_nbit_weights_ty(),
use_array_for_index_remapping=st.booleans(),
do_pruning=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
)
def test_nbit_forward_gpu_no_cache(
self,
nbit_weights_ty: Optional[SparseType],
use_array_for_index_remapping: bool,
do_pruning: bool,
) -> None:
use_cpu = False
T = random.randint(1, 50)
B = random.randint(0, 128)
L = random.randint(0, 32)
D = random.randint(2, 2048)
log_E = random.randint(2, 4)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = CacheAlgorithm.LRU
pooling_mode = random.choice(
[
PoolingMode.SUM,
PoolingMode.MEAN,
PoolingMode.NONE,
]
)
if pooling_mode == PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
if nbit_weights_ty is None:
# don't care when mixed type is used.
weights_ty: SparseType = SparseType.INT8
mixed_weights_ty = True
else:
weights_ty: SparseType = nbit_weights_ty
mixed_weights_ty = False
output_dtype = random.choice(
[SparseType.FP32, SparseType.FP16, SparseType.BF16]
)
self.execute_nbit_forward_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
pooling_mode,
weights_ty,
use_cache,
cache_algorithm,
use_cpu,
use_array_for_index_remapping,
do_pruning,
mixed_weights_ty,
output_dtype,
)
@unittest.skipIf(*gpu_unavailable)
@given(
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
emulate_pruning=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_int_nbit_split_embedding_uvm_caching_codegen_lookup_function(
self,
weights_ty: SparseType,
emulate_pruning: bool,
) -> None:
# TODO: support direct-mapped in int_nbit_split_embedding_uvm_caching_codegen_lookup_function
# This test is for int_nbit_split_embedding_uvm_caching_codegen_lookup_function.
# We run IntNBitTableBatchedEmbeddingBagsCodegen with UVM_CACHING, and then
# run int_nbit_split_embedding_uvm_caching_codegen_lookup_function with the
# exact same cache configuration. As both use the same logic, the result
# as well as cache state should match.
# Currently, int_nbit_split_embedding_uvm_caching_codegen_lookup_function supports only LRU.
cache_algorithm = CacheAlgorithm.LRU
associativity = 32 # Currently, hard-coded 32-way set associative.
current_device: torch.device = torch.device(torch.cuda.current_device())
T = random.randint(1, 5)
B = random.randint(1, 128)
L = random.randint(1, 20)
D = random.randint(2, 256)
log_E = random.randint(3, 5)
iters = 3
E = int(10**log_E)
D_alignment = (
1 if weights_ty.bit_rate() % 8 == 0 else int(8 / weights_ty.bit_rate())
)
D = round_up(D, D_alignment)
# Currently, int_nbit_split_embedding_uvm_caching_codegen_lookup_function supports only all UVM or all UVM_CACHING.
Ds = [D] * T
Es = [E] * T
managed_caching = [EmbeddingLocation.MANAGED_CACHING] * T
# Note both cc_ref and cc use caching.
cc_ref = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, D, weights_ty, M) for (E, D, M) in zip(Es, Ds, managed_caching)],
cache_algorithm=cache_algorithm,
)
cc_ref.fill_random_weights()
# cc is only for cache states; we test int_nbit_split_embedding_uvm_caching_codegen_lookup_function directly;
# hence, no need to synchronize cc's weights with cc_ref's.
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, D, weights_ty, M) for (E, D, M) in zip(Es, Ds, managed_caching)],
cache_algorithm=cache_algorithm,
)
cc.fill_random_weights()
# weights_placement for all UVM case.
managed_uvm = [EmbeddingLocation.MANAGED] * T
placement_uvm = torch.tensor(
managed_uvm, device=current_device, dtype=torch.int32
)
# zero size HBM cache for UVM case.
zero_size_cache_weights = torch.zeros(
0, 0, device=current_device, dtype=torch.uint8
)
requests = generate_requests(
iters, B, T, L, min(Es), reuse=0.1, emulate_pruning=emulate_pruning
)
for indices, offsets, _ in requests:
indices = indices.int()
offsets = offsets.int()
output_ref = cc_ref(indices, offsets)
# int_nbit_split_embedding_uvm_caching_codegen_lookup_function for UVM_CACHING.
# using weights and other params from cc_ref, but
# cache states from cc.
output_uvm_caching = torch.ops.fbgemm.int_nbit_split_embedding_uvm_caching_codegen_lookup_function(
dev_weights=cc_ref.weights_host
if cc_ref.host_size > 0
else cc_ref.weights_dev,
uvm_weights=cc_ref.weights_uvm,
weights_placements=cc_ref.weights_placements,
weights_offsets=cc_ref.weights_offsets,
weights_tys=cc_ref.weights_tys,
D_offsets=cc_ref.D_offsets,
total_D=cc_ref.total_D,
max_int2_D=cc_ref.max_int2_D,
max_int4_D=cc_ref.max_int4_D,
max_int8_D=cc_ref.max_int8_D,
max_float16_D=cc_ref.max_float16_D,
max_float32_D=cc_ref.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=int(cc_ref.pooling_mode),
indice_weights=None,
output_dtype=cc_ref.output_dtype,
lxu_cache_weights=cc.lxu_cache_weights, # cc, not cc_ref.
lxu_cache_locations=torch.empty(0, dtype=torch.int32).fill_(-1),
row_alignment=cc_ref.row_alignment,
max_float8_D=cc_ref.max_float8_D,
fp8_exponent_bits=cc_ref.fp8_exponent_bits,
fp8_exponent_bias=cc_ref.fp8_exponent_bias,
# Additional args for UVM_CACHING: using cc, not cc_ref.
cache_hash_size_cumsum=cc.cache_hash_size_cumsum,
total_cache_hash_size=cc.total_cache_hash_size,
cache_index_table_map=cc.cache_index_table_map,
lxu_cache_state=cc.lxu_cache_state,
lxu_state=cc.lxu_state,
)
torch.testing.assert_close(output_uvm_caching, output_ref, equal_nan=True)
# cache status; we use the exact same logic, but still assigning ways in a associative cache can be
# arbitrary. We compare sum along ways in each set, instead of expecting exact tensor match.
cache_weights_ref = torch.reshape(
cc_ref.lxu_cache_weights,
[-1, associativity],
)
cache_weights = torch.reshape(cc.lxu_cache_weights, [-1, associativity])
torch.testing.assert_close(
torch.sum(cache_weights_ref, 1),
torch.sum(cache_weights, 1),
equal_nan=True,
)
torch.testing.assert_close(
torch.sum(cc.lxu_cache_state, 1),
torch.sum(cc_ref.lxu_cache_state, 1),
equal_nan=True,
)
# lxu_state can be different as time_stamp values can be different.
# we check the entries with max value.
max_timestamp_ref = torch.max(cc_ref.lxu_state)
max_timestamp_uvm_caching = torch.max(cc.lxu_state)
x = cc_ref.lxu_state == max_timestamp_ref
y = cc.lxu_state == max_timestamp_uvm_caching
torch.testing.assert_close(torch.sum(x, 1), torch.sum(y, 1))
# int_nbit_split_embedding_uvm_caching_codegen_lookup_function for UVM.
output_uvm = torch.ops.fbgemm.int_nbit_split_embedding_uvm_caching_codegen_lookup_function(
dev_weights=cc_ref.weights_host
if cc_ref.host_size > 0
else cc_ref.weights_dev,
uvm_weights=cc_ref.weights_uvm,
weights_placements=placement_uvm, # all UVM weights placement.
weights_offsets=cc_ref.weights_offsets,
weights_tys=cc_ref.weights_tys,
D_offsets=cc_ref.D_offsets,
total_D=cc_ref.total_D,
max_int2_D=cc_ref.max_int2_D,
max_int4_D=cc_ref.max_int4_D,
max_int8_D=cc_ref.max_int8_D,
max_float16_D=cc_ref.max_float16_D,
max_float32_D=cc_ref.max_float32_D,
indices=indices,
offsets=offsets,
pooling_mode=int(cc_ref.pooling_mode),
indice_weights=None,
output_dtype=cc_ref.output_dtype,
lxu_cache_weights=zero_size_cache_weights, # empty HBM cache.
lxu_cache_locations=torch.empty(0, dtype=torch.int32).fill_(-1),
row_alignment=cc_ref.row_alignment,
max_float8_D=cc_ref.max_float8_D,
fp8_exponent_bits=cc_ref.fp8_exponent_bits,
fp8_exponent_bias=cc_ref.fp8_exponent_bias,
# Additional args for UVM_CACHING; not needed for UVM.
cache_hash_size_cumsum=None,
total_cache_hash_size=None,
cache_index_table_map=None,
lxu_cache_state=None,
lxu_state=None,
)
torch.testing.assert_close(output_uvm, output_ref, equal_nan=True)
@unittest.skipIf(*gpu_unavailable)
@given(
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
cache_algorithm=st.sampled_from(CacheAlgorithm),
associativity=st.sampled_from([1, DEFAULT_ASSOC]),
do_pruning=st.booleans(),
use_array_for_index_remapping=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_forward_uvm_cache(
self,
weights_ty: SparseType,
cache_algorithm: CacheAlgorithm,
associativity: int,
do_pruning: bool,
use_array_for_index_remapping: bool,
) -> None:
assume(cache_algorithm == CacheAlgorithm.LRU or associativity != 1)
T = random.randint(1, 5)
B = random.randint(1, 128)
L = random.randint(1, 20)
D = random.randint(2, 256)
log_E = random.randint(3, 5)
mixed = random.choice([True, False])
iters = 3
E = int(10**log_E)
D_alignment = (
1 if weights_ty.bit_rate() % 8 == 0 else int(8 / weights_ty.bit_rate())
)
D = round_up(D, D_alignment)
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = EmbeddingLocation.DEVICE if d < average_D else managed[t]
index_remapping = None
pruning_hash_load_factor = 0.5
if do_pruning:
current_device = torch.cuda.current_device()
index_remapping = []
for E in Es:
# For each table, keep the first half of rows as is, but
# the rest is treated as pruned (-1).
remapping = list(range(0, E // 2)) + [-1] * (E - E // 2)
remapping_t = torch.tensor(
remapping,
dtype=torch.int32,
device=current_device,
)
index_remapping.append(remapping_t)
cc_ref = IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
D,
weights_ty,
EmbeddingLocation.DEVICE,
)
for (E, D) in zip(Es, Ds)
],
index_remapping=index_remapping,
use_array_for_index_remapping=use_array_for_index_remapping,
pruning_hash_load_factor=pruning_hash_load_factor,
)
cc_ref.fill_random_weights()
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, D, weights_ty, M) for (E, D, M) in zip(Es, Ds, managed)],
cache_algorithm=cache_algorithm,
cache_assoc=associativity,
index_remapping=index_remapping,
use_array_for_index_remapping=use_array_for_index_remapping,
pruning_hash_load_factor=pruning_hash_load_factor,
)
cc.fill_random_weights()
split_weights = cc.split_embedding_weights()
ref_split_weights = cc_ref.split_embedding_weights()
for t in range(T):
(weights, scale_shift) = split_weights[t]
(ref_weights, ref_scale_shift) = ref_split_weights[t]
self.assertEqual(weights.size(), ref_weights.size())
weights.copy_(ref_weights)
if ref_scale_shift is not None:
scale_shift.copy_(ref_scale_shift)
requests = generate_requests(iters, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
indices = indices.int()
offsets = offsets.int()
output = cc(indices, offsets)
output_ref = cc_ref(indices, offsets)
torch.testing.assert_close(output, output_ref, equal_nan=True)
@given(
T=st.integers(min_value=1, max_value=5),
B=st.integers(min_value=1, max_value=8),
L=st.integers(min_value=0, max_value=8),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
use_cpu_hashtable=st.booleans(),
use_array_for_index_remapping=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_pruning(
self,
T: int,
B: int,
L: int,
use_cpu: bool,
use_cpu_hashtable: bool,
use_array_for_index_remapping: bool,
) -> None:
E = int(1000)
LOAD_FACTOR = 0.8
pruning_ratio = 0.5
capacities = [int(B * L / LOAD_FACTOR) + 1 for _ in range(T)]
original_E = int(E / (1.0 - pruning_ratio))
# Enforce the size of original_E/B/L to get the unique indices
assume(original_E > B * L)
current_device = "cpu" if use_cpu else torch.cuda.current_device()
if use_cpu_hashtable:
assume(use_cpu)
indices = torch.randint(low=0, high=original_E, size=(T, B, L))
for t in range(T):
while (
torch.unique(
indices[t], return_counts=False, return_inverse=False
).numel()
!= indices[t].numel()
):
indices[t] = torch.randint(low=0, high=original_E, size=(B, L))
indices = indices.view(-1).int()
dense_indices = torch.randint(low=0, high=E, size=(T, B, L)).view(-1).int()
offsets = torch.tensor([L * b_t for b_t in range(B * T + 1)]).int()
# Initialize and insert Hashmap index remapping based data structure
hash_table = torch.empty(
(sum(capacities), 2),
dtype=torch.int32,
)
hash_table[:, :] = -1
hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()
torch.ops.fbgemm.pruned_hashmap_insert(
indices, dense_indices, offsets, hash_table, hash_table_offsets
)
if use_cpu_hashtable:
ht = torch.classes.fbgemm.PrunedMapCPU()
ht.insert(indices, dense_indices, offsets, T)
# Initialize and insert Array index remapping based data structure
index_remappings_array = torch.tensor(
[-1] * original_E * T,
dtype=torch.int32,
device=current_device,
)
index_remappings_array_offsets = torch.empty(
T + 1,
dtype=torch.int64,
# pyre-fixme[6]: For 3rd param expected `Union[None, str, device]` but
# got `Union[int, str]`.
device=current_device,
)
index_remappings_array_offsets[0] = 0
for t in range(T):
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int, str]`.
indice_t = (indices.view(T, B, L))[t].long().view(-1).to(current_device)
dense_indice_t = (
(dense_indices.view(T, B, L))[t].view(-1)
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
.to(current_device)
)
selected_indices = torch.add(indice_t, t * original_E)[:E]
index_remappings_array[selected_indices] = dense_indice_t
index_remappings_array_offsets[t + 1] = (
index_remappings_array_offsets[t] + original_E
)
# Move data when using device
if not use_cpu:
(
indices,
dense_indices,
offsets,
hash_table,
hash_table_offsets,
index_remappings_array,
index_remappings_array_offsets,
) = (
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
indices.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
dense_indices.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
offsets.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
hash_table.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
hash_table_offsets.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
index_remappings_array.to(current_device),
# pyre-fixme[6]: For 1st param expected `dtype` but got `Union[int,
# str]`.
index_remappings_array_offsets.to(current_device),
)
# Lookup
if use_cpu_hashtable:
dense_indices_ = ht.lookup(indices, offsets)
elif not use_array_for_index_remapping: # hashmap based pruning
dense_indices_ = torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
else: # array based pruning
dense_indices_ = torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings_array,
index_remappings_array_offsets,
)
# Validate the lookup result
torch.testing.assert_close(dense_indices, dense_indices_)
# For array based pruning, it will be out-of-boundary for arbitrarily
# large indices. We will rely on bound checker to make sure indices
# are within the boundary.
if not use_array_for_index_remapping:
# now, use a value that does not exist in the original set of indices
# and so should be pruned out.
indices[:] = np.iinfo(np.int32).max
if use_cpu_hashtable:
dense_indices_ = ht.lookup(indices, offsets)
elif not use_array_for_index_remapping: # hashmap based pruning
dense_indices_ = torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
else: # array based pruning
dense_indices_ = torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings_array,
index_remappings_array_offsets,
)
torch.testing.assert_close(dense_indices.clone().fill_(-1), dense_indices_)
@given(
L=st.integers(min_value=0, max_value=16),
H=st.integers(min_value=512, max_value=1024),
S=st.integers(min_value=0, max_value=128),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_update_function(self, L: int, H: int, S: int) -> None:
# Generate synthetic data
linear_cache_indices_cpu = torch.randint(L, H, (S,))
lxu_cache_locations_cpu = torch.clone(linear_cache_indices_cpu)
indices = [True if np.random.rand() < 0.5 else False for _ in range(S)]
lxu_cache_locations_cpu[indices] = -1
cache_miss_ids = torch.clone(linear_cache_indices_cpu)
cache_miss_ids[lxu_cache_locations_cpu != -1] = -2
# Calculate the correct output
unique_cache_miss_ids = torch.unique(cache_miss_ids)
expect_out = sum(unique_cache_miss_ids >= 0)
linear_cache_indices = to_device(
torch.tensor(linear_cache_indices_cpu, dtype=torch.int64), use_cpu=False
)
lxu_cache_locations = to_device(
torch.tensor(lxu_cache_locations_cpu, dtype=torch.int32), use_cpu=False
)
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
emb_op = SplitTableBatchedEmbeddingBagsCodegen
cc = emb_op(
embedding_specs=[
(
E,
D,
EmbeddingLocation.MANAGED_CACHING,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
record_cache_metrics=RecordCacheMetrics(True, False),
)
cc._update_cache_miss_counter(lxu_cache_locations, linear_cache_indices)
(
cache_miss_forward_count,
unique_cache_miss_count,
) = cc.get_cache_miss_counter().cpu()
self.assertEqual(unique_cache_miss_count, expect_out)
self.assertLessEqual(cache_miss_forward_count, unique_cache_miss_count)
@given(N=st.integers(min_value=1, max_value=8))
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_miss_counter(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
emb_op = SplitTableBatchedEmbeddingBagsCodegen
cc = emb_op(
embedding_specs=[
(
E,
D,
EmbeddingLocation.MANAGED_CACHING,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
record_cache_metrics=RecordCacheMetrics(True, True),
)
# Create fake input data and the target output
xs = []
x1 = torch.Tensor([[[1], [1]], [[3], [4]]])
x1 = to_device(torch.tensor(x1, dtype=torch.int64), use_cpu=False)
x2 = torch.Tensor([[[2], [1]], [[3], [4]]])
x2 = to_device(torch.tensor(x2, dtype=torch.int64), use_cpu=False)
x3 = torch.Tensor([[[5], [6]], [[7], [8]]])
x3 = to_device(torch.tensor(x3, dtype=torch.int64), use_cpu=False)
xs.append(x1)
xs.append(x2)
xs.append(x3)
target_counter_list = [[1, 3], [2, 4], [3, 8]]
target_tablewise_cache_miss_list = [[1, 2], [2, 2], [4, 4]]
for x, t_counter, t_tablewise_cache_miss in zip(
xs, target_counter_list, target_tablewise_cache_miss_list
):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc(indices, offsets)
(
cache_miss_forward_count,
unique_cache_miss_count,
) = cc.get_cache_miss_counter().cpu()
tablewise_cache_miss = cc.get_table_wise_cache_miss().cpu()
self.assertEqual(cache_miss_forward_count, t_counter[0])
self.assertEqual(unique_cache_miss_count, t_counter[1])
for i in range(len(tablewise_cache_miss)):
self.assertEqual(tablewise_cache_miss[i], t_tablewise_cache_miss[i])
@given(N=st.integers(min_value=1, max_value=2))
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_stb_uvm_cache_stats(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
emb_op = SplitTableBatchedEmbeddingBagsCodegen
cc = emb_op(
embedding_specs=[
(
E,
D,
EmbeddingLocation.MANAGED_CACHING,
ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
gather_uvm_cache_stats=True,
)
x = torch.Tensor([[[1], [1]], [[3], [4]]])
x = to_device(torch.tensor(x, dtype=torch.int64), use_cpu=False)
for _ in range(N):
indices, offsets = get_table_batched_offsets_from_dense(x, use_cpu=False)
cc.reset_cache_states()
cc.reset_uvm_cache_stats()
cc(indices, offsets)
(
n_calls,
n_requested_indices,
n_unique_indices,
n_unique_misses,
n_conflict_unique_misses,
n_conflict_misses,
) = cc.get_uvm_cache_stats()
self.assertEqual(n_calls, 1)
self.assertEqual(n_requested_indices, len(indices))
self.assertEqual(n_unique_indices, len(set(indices.tolist())))
self.assertEqual(n_unique_misses, len(set(indices.tolist())))
self.assertEqual(n_conflict_unique_misses, 0)
self.assertEqual(n_conflict_misses, 0)
@unittest.skipIf(*gpu_unavailable)
@given(
L=st.integers(min_value=0, max_value=16),
H=st.integers(min_value=512, max_value=1024),
S=st.integers(min_value=0, max_value=128),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_cache_update_function(self, L: int, H: int, S: int) -> None:
# Generate synthetic data
linear_cache_indices_cpu = torch.randint(L, H, (S,))
lxu_cache_locations_cpu = torch.clone(linear_cache_indices_cpu)
indices = [True if np.random.rand() < 0.5 else False for _ in range(S)]
lxu_cache_locations_cpu[indices] = -1
cache_miss_ids = torch.clone(linear_cache_indices_cpu)
cache_miss_ids[lxu_cache_locations_cpu != -1] = -2
# Calculate the correct output
unique_cache_miss_ids = torch.unique(cache_miss_ids)
expect_out = sum(unique_cache_miss_ids >= 0)
linear_cache_indices = linear_cache_indices_cpu.to(torch.int32).cuda()
lxu_cache_locations = lxu_cache_locations_cpu.to(torch.int32).cuda()
expected_unique_access = len(torch.unique(linear_cache_indices_cpu))
expected_total_access = len(linear_cache_indices_cpu)
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
record_cache_metrics=RecordCacheMetrics(True, False),
)
cc.fill_random_weights()
cc._update_cache_miss_counter(lxu_cache_locations, linear_cache_indices)
(
cache_miss_forward_count,
unique_cache_miss_count,
unique_access_count,
total_access_count,
) = cc.get_cache_miss_counter().cpu()
self.assertEqual(unique_cache_miss_count, expect_out)
self.assertLessEqual(cache_miss_forward_count, unique_cache_miss_count)
self.assertEqual(unique_access_count, expected_unique_access)
self.assertEqual(total_access_count, expected_total_access)
@unittest.skipIf(*gpu_unavailable)
@given(N=st.integers(min_value=1, max_value=8))
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_cache_miss_counter(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
record_cache_metrics=RecordCacheMetrics(True, True),
)
cc.fill_random_weights()
# Create fake input data and the target output
x1 = torch.Tensor([[[1], [1]], [[3], [4]]]).cuda()
x2 = torch.Tensor([[[2], [1]], [[3], [4]]]).cuda()
x3 = torch.Tensor([[[5], [6]], [[7], [8]]]).cuda()
xs = [x1, x2, x3]
target_counter_list = [[1, 3], [2, 4], [3, 8]]
target_tablewise_cache_miss_list = [[1, 2], [2, 2], [4, 4]]
for x, t_counter, t_tablewise_cache_miss in zip(
xs, target_counter_list, target_tablewise_cache_miss_list
):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc(indices.int(), offsets.int())
(
cache_miss_forward_count,
unique_cache_miss_count,
_,
_,
) = cc.get_cache_miss_counter().cpu()
tablewise_cache_miss = cc.get_table_wise_cache_miss().cpu()
self.assertEqual(cache_miss_forward_count, t_counter[0])
self.assertEqual(unique_cache_miss_count, t_counter[1])
for i in range(len(tablewise_cache_miss)):
self.assertEqual(tablewise_cache_miss[i], t_tablewise_cache_miss[i])
@unittest.skipIf(*gpu_unavailable)
@given(
N=st.integers(min_value=1, max_value=8),
dtype=st.sampled_from([SparseType.INT8, SparseType.INT4, SparseType.INT2]),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_uvm_cache_stats(self, N: int, dtype: SparseType) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
dtype,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
gather_uvm_cache_stats=True,
)
cc.fill_random_weights()
# Create fake input data and the target output
x1 = torch.Tensor([[[1], [1]], [[3], [4]]]).cuda()
x2 = torch.Tensor([[[2], [1]], [[3], [4]]]).cuda()
x3 = torch.Tensor([[[5], [6]], [[7], [8]]]).cuda()
xs = [x1, x2, x3]
# num_unique_indices, num_unique_misses
# note that these are cumulative over calls; and also "unique" is per batch.
target_counter_list = [[3, 3], [4, 4], [4, 8]]
num_calls_expected = 0
num_indices_expcted = 0
num_unique_indices_expected = 0
for x, t_counter in zip(xs, target_counter_list):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
num_calls_expected = num_calls_expected + 1
num_indices_expcted = num_indices_expcted + len(indices)
cc(indices.int(), offsets.int())
(
num_calls,
num_indices,
num_unique_indices,
num_unique_misses,
num_conflict_unique_miss,
num_conflict_miss,
) = cc.get_uvm_cache_stats().cpu()
# Note num_unique_indices is cumulative stats.
num_unique_indices_expected = num_unique_indices_expected + t_counter[0]
self.assertEqual(num_calls, num_calls_expected)
self.assertEqual(num_indices, num_indices_expcted)
self.assertEqual(num_unique_indices, num_unique_indices_expected)
self.assertEqual(num_unique_misses, t_counter[1])
self.assertEqual(num_conflict_unique_miss, 0)
self.assertEqual(num_conflict_miss, 0)
T = 1 # for simplicity
Ds = [D] * T
Es = [E] * T
cc1 = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
gather_uvm_cache_stats=True,
cache_sets=1, # Only one set.
)
cc1.fill_random_weights()
associativty = DEFAULT_ASSOC # 32 for NVidia / 64 for AMD.
repetition = 17
indices1 = torch.Tensor(
[[list(range(0, associativty))] * repetition]
).cuda() # 0, 1, ..., 31.
indices2 = torch.Tensor(
[[list(range(0, associativty + 1))] * repetition]
).cuda() # 0, 1, ..., 31, 32.
indices3 = torch.Tensor(
[[list(range(0, associativty + 10))] * repetition]
).cuda() # 0, 1, ..., 31, 32, ..., 41.
# num_conflict_unique_miss, num_conflict_miss
expected = [[0, 0], [1, 17], [10, 170]]
for x, e in zip((indices1, indices2, indices3), expected):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc1(indices.int(), offsets.int())
(
_,
_,
_,
_,
num_conflict_unique_miss,
num_conflict_miss,
) = cc1.get_uvm_cache_stats().cpu()
self.assertEqual(num_conflict_unique_miss, e[0])
self.assertEqual(num_conflict_miss, e[1])
cc1.reset_uvm_cache_stats()
@unittest.skipIf(*gpu_unavailable)
@given(
N=st.integers(min_value=1, max_value=8),
dtype=st.sampled_from([SparseType.INT8, SparseType.INT4, SparseType.INT2]),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_direct_mapped_uvm_cache_stats(
self, N: int, dtype: SparseType
) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10**3
Ds = [D] * T
Es = [E] * T
cc = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
dtype,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
gather_uvm_cache_stats=True,
cache_assoc=1, # Direct Mapped
)
cc.fill_random_weights()
# Create fake input data and the target output
x1 = torch.Tensor([[[1], [1]], [[3], [4]]]).cuda()
x2 = torch.Tensor([[[2], [1]], [[3], [4]]]).cuda()
x3 = torch.Tensor([[[5], [6]], [[7], [8]]]).cuda()
xs = [x1, x2, x3]
# num_unique_indices, num_unique_misses
# note that these are cumulative over calls; and also "unique" is per batch.
target_counter_list = [[3, 3], [4, 4], [4, 8]]
num_calls_expected = 0
num_indices_expcted = 0
num_unique_indices_expected = 0
for x, t_counter in zip(xs, target_counter_list):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
num_calls_expected = num_calls_expected + 1
num_indices_expcted = num_indices_expcted + len(indices)
cc(indices.int(), offsets.int())
(
num_calls,
num_indices,
num_unique_indices,
num_unique_misses,
num_conflict_unique_miss,
num_conflict_miss,
) = cc.get_uvm_cache_stats().cpu()
# Note num_unique_indices is cumulative stats.
num_unique_indices_expected = num_unique_indices_expected + t_counter[0]
self.assertEqual(num_calls, num_calls_expected)
self.assertEqual(num_indices, num_indices_expcted)
self.assertEqual(num_unique_indices, 0) # N/A for Direct Mapped
self.assertEqual(num_unique_misses, 0) # N/A for Direct Mapped
self.assertEqual(
num_conflict_unique_miss, t_counter[1]
) # number of actually inserted rows for Direct Mapped
self.assertEqual(num_conflict_miss, 0)
T = 1 # for simplicity
Ds = [D] * T
Es = [E] * T
cc1 = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
gather_uvm_cache_stats=True,
cache_sets=1, # Only one set.
cache_assoc=1, # Direct Mapped
)
cc1.fill_random_weights()
associativty = 1 # Direct-Mapped
repetition = 17
indices1 = torch.Tensor(
[[list(range(0, associativty))] * repetition]
).cuda() # no conflict miss
indices2 = torch.Tensor(
[[list(range(0, associativty + 1))] * repetition]
).cuda() # 1 * 17 conflict miss per request
indices3 = torch.Tensor(
[[list(range(0, associativty + 10))] * repetition]
).cuda() # 10 * 17 conflict misses per request
# num_conflict_unique_miss, num_conflict_miss
expected = [[1, 0], [1, 17], [1, 170]]
accum_num_conflict_miss = 0
for x, e in zip((indices1, indices2, indices3), expected):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc1(indices.int(), offsets.int())
(
_,
_,
_,
_,
num_conflict_unique_miss,
num_conflict_miss,
) = cc1.get_uvm_cache_stats().cpu()
# for DM this represents number of actually inserted rows
self.assertEqual(num_conflict_unique_miss, e[0])
accum_num_conflict_miss += e[1]
self.assertEqual(num_conflict_miss, accum_num_conflict_miss)
@given(
T=st.integers(min_value=1, max_value=64),
B=st.integers(min_value=1, max_value=64),
max_L=st.integers(min_value=1, max_value=64),
bounds_check_mode=st.sampled_from(
[
BoundsCheckMode.FATAL,
BoundsCheckMode.WARNING,
BoundsCheckMode.IGNORE,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
weighted=st.booleans(),
dtype=st.sampled_from(
[
torch.int64,
torch.int32,
]
),
mixed_B=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_bounds_check( # noqa C901
self,
T: int,
B: int,
max_L: int,
bounds_check_mode: BoundsCheckMode,
use_cpu: bool,
weighted: bool,
dtype: torch.dtype,
mixed_B: bool,
) -> None:
# use_cpu does not support mixed_B
if use_cpu and mixed_B:
mixed_B = False
rows_per_table = torch.tensor(
np.random.randint(low=1, high=1000, size=(T,))
).long()
if not mixed_B:
Bs = [B] * T
else:
low = max(int(0.25 * B), 1)
high = int(B)
if low == high:
Bs = [B] * T
else:
Bs = [np.random.randint(low=low, high=high) for _ in range(T)]
B_offsets = [0] + list(accumulate(Bs))
Ls = np.random.randint(low=0, high=max_L, size=(B_offsets[-1],))
indices = [
np.random.randint(
low=0,
high=rows_per_table[t],
size=sum(Ls[B_offsets[t] : B_offsets[t + 1]]),
)
for t in range(T)
]
indices = torch.tensor(np.concatenate(indices, axis=0)).to(dtype)
weights = (
torch.rand(indices.shape, dtype=torch.float, device=indices.device)
if weighted
else None
)
offsets = torch.tensor([0] + np.cumsum(Ls.flatten()).tolist()).to(dtype)
warning = torch.tensor([0]).long()
if mixed_B:
B_offsets = torch.tensor(B_offsets, device="cuda", dtype=torch.int32)
max_B = max(Bs)
else:
B_offsets = None
max_B = -1
self.assertEqual(indices.numel(), np.sum(Ls).item())
self.assertEqual(offsets[-1], np.sum(Ls).item())
if not use_cpu:
indices, offsets, rows_per_table, warning = (
indices.cuda(),
offsets.cuda(),
rows_per_table.cuda(),
warning.cuda(),
)
if weighted:
# pyre-fixme[16]: `Optional` has no attribute `cuda`.
weights = weights.cuda()
indices_copy = indices.clone()
offsets_copy = offsets.clone()
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
# we don't modify when we are in-bounds.
torch.testing.assert_close(indices_copy, indices)
indices[:] = torch.iinfo(dtype).max
if bounds_check_mode != BoundsCheckMode.FATAL:
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
torch.testing.assert_close(indices, torch.zeros_like(indices))
if bounds_check_mode == BoundsCheckMode.WARNING:
self.assertEqual(warning.item(), indices.numel())
else:
if use_cpu and indices.numel():
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
# It would be nice to test the CUDA implementation of BoundsCheckMode==FATAL,
# but the device assert kills the CUDA context and requires a process restart,
# which is a bit inconvenient.
# test offsets bound errors
indices = indices_copy.clone()
offsets = offsets_copy.clone()
if offsets.numel() > 0:
offsets[0] = -100
if offsets.numel() > 1:
offsets[-1] += 100
if bounds_check_mode != BoundsCheckMode.FATAL:
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
if offsets.numel() > 0:
self.assertEqual(offsets[0].item(), 0)
if offsets.numel() > 1:
self.assertEqual(offsets[-1].item(), indices.numel())
if bounds_check_mode == BoundsCheckMode.WARNING:
# -1 because when we have 2 elements in offsets, we have only 1
# warning for the pair.
self.assertGreaterEqual(warning.item(), min(2, offsets.numel() - 1))
else:
if use_cpu and indices.numel():
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
)
# test offsets.size(0) ! = B * T + 1 case. Here we test with T >= 2 case.
# If T == 1, we will always get the even division.
# (does not apply to mixed_B = True)
if not mixed_B and T >= 2:
indices = indices_copy.clone()
offsets = offsets_copy.clone()
offsets = torch.cat(
(
offsets,
torch.tensor(
[indices.numel()] * (T - 1),
dtype=offsets.dtype,
device=offsets.device,
),
),
dim=0,
)
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
)
# test weights.size(0) != indices.size(0) case
weights = torch.rand(
(indices.size(0) + 1,), dtype=torch.float, device=indices.device
)
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table,
indices,
offsets,
bounds_check_mode,
warning,
weights,
B_offsets=B_offsets,
max_B=max_B,
)
def test_pickle(self) -> None:
tensor_queue = torch.classes.fbgemm.TensorQueue(torch.empty(0))
pickled = pickle.dumps(tensor_queue)
unpickled = pickle.loads(pickled) # noqa: F841
@unittest.skipIf(*gpu_unavailable)
def test_linearize_cache_indices(self) -> None:
indices = torch.tensor(
[10, 2, 3, 7, 1, 4, 5, 9, 2, 7, 6, 8, 5, 1, 0, 4],
dtype=torch.int,
device="cuda",
)
pruned_indices = torch.tensor(
[10, -1, 3, 7, 1, 4, -1, 9, 2, -1, 6, 8, 5, 1, -1, 4],
dtype=torch.int,
device="cuda",
)
equal_offsets = torch.tensor([0, 4, 8, 12, 16], dtype=torch.int, device="cuda")
varying_offsets = torch.tensor(
[0, 1, 3, 6, 8, 10, 14, 15, 16], dtype=torch.int, device="cuda"
)
# Testing equal sized tables.
cache_hash_size_cumsum_0 = torch.tensor([0, 12, 24, 36, 48]).cuda()
linear_cache_indices_0 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_0, indices, equal_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_0.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 26, 31, 30, 32, 41, 37, 36, 40],
dtype=torch.int,
),
)
)
# Testing partially cached tables.
cache_hash_size_cumsum_1 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_1 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_1, indices, equal_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_1.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 36, 36, 36, 36, 29, 25, 24, 28],
dtype=torch.int,
),
)
)
# Testing batched with varying pooling factor.
cache_hash_size_cumsum_2 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_2 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_2, indices, varying_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_2.cpu(),
torch.tensor(
[10, 2, 3, 19, 13, 16, 17, 21, 36, 36, 36, 36, 36, 36, 24, 28],
dtype=torch.int,
),
)
)
# Testing when multiple features share the same table.
cache_hash_size_cumsum_3 = torch.tensor([0, 0, 12, 12, 24]).cuda()
linear_cache_indices_3 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_3, indices, varying_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_3.cpu(),
torch.tensor(
[10, 2, 3, 7, 1, 4, 5, 9, 14, 19, 18, 20, 17, 13, 12, 16],
dtype=torch.int,
),
)
)
# Testing equal sized tables + pruned indices
cache_hash_size_cumsum_4 = torch.tensor([0, 12, 24, 36, 48]).cuda()
linear_cache_indices_4 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_4, pruned_indices, equal_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_4.cpu(),
torch.tensor(
[10, 48, 3, 7, 13, 16, 48, 21, 26, 48, 30, 32, 41, 37, 48, 40],
dtype=torch.int,
),
)
)
# Testing batched with varying pooling factor + pruned indices
cache_hash_size_cumsum_5 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_5 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_5, pruned_indices, varying_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_5.cpu(),
torch.tensor(
[10, 36, 3, 19, 13, 16, 36, 21, 36, 36, 36, 36, 36, 36, 36, 28],
dtype=torch.int,
),
)
)
@unittest.skipIf(*gpu_unavailable)
def test_linearize_cache_indices_from_row_idx(self) -> None:
update_row_indices = torch.tensor(
[10, 2, 3, 7, 1, 4, 5, 9, 2, 7, 6, 8, 5, 1, 0, 4],
dtype=torch.int,
device="cuda",
)
update_table_indices = torch.tensor(
[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
dtype=torch.int,
device="cuda",
)
varying_update_table_indices = torch.tensor(
[0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3],
dtype=torch.int,
device="cuda",
)
# Testing equal sized tables.
cache_hash_size_cumsum_0 = torch.tensor([0, 12, 24, 36, 48]).cuda()
linear_cache_indices_0 = torch.ops.fbgemm.linearize_cache_indices_from_row_idx(
cache_hash_size_cumsum_0,
update_table_indices,
update_row_indices,
)
self.assertTrue(
torch.equal(
linear_cache_indices_0.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 26, 31, 30, 32, 41, 37, 36, 40],
dtype=torch.int,
),
)
)
# Testing partially cached tables.
cache_hash_size_cumsum_1 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_1 = torch.ops.fbgemm.linearize_cache_indices_from_row_idx(
cache_hash_size_cumsum_1,
update_table_indices,
update_row_indices,
)
self.assertTrue(
torch.equal(
linear_cache_indices_1.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 36, 36, 36, 36, 29, 25, 24, 28],
dtype=torch.int,
),
)
)
# Testing batched with varying pooling factor.
cache_hash_size_cumsum_2 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_2 = torch.ops.fbgemm.linearize_cache_indices_from_row_idx(
cache_hash_size_cumsum_2,
varying_update_table_indices,
update_row_indices,
)
self.assertTrue(
torch.equal(
linear_cache_indices_2.cpu(),
torch.tensor(
[10, 2, 3, 19, 13, 16, 17, 21, 36, 36, 36, 36, 36, 36, 24, 28],
dtype=torch.int,
),
)
)
@unittest.skipIf(*gpu_unavailable)
@given(
associativity=st.sampled_from([1, DEFAULT_ASSOC]),
)
@settings(deadline=None)
def test_lxu_cache_lookup(self, associativity: int) -> None:
max_index: int = 8000
# Use single cache set to avoid dealing with cache set hash algorithm.
lxu_cache_state_gpu = (
torch.arange(associativity, dtype=torch.int64).unsqueeze(0).cuda()
)
# Testing all miss.
linear_cache_indices_0 = (
torch.tensor([32, 33, 34, 35, 36, 100, 1000, 1725])
if associativity <= 32
else torch.tensor([64, 65, 66, 67, 68, 100, 1000, 1725])
).cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_0, lxu_cache_state_gpu, max_index
)
torch.testing.assert_close(
lxu_locations,
torch.full_like(lxu_locations, -1),
)
# Testing all hits.
cache_indices_1 = torch.randint(0, associativity, (associativity,))
linear_cache_indices_1 = cache_indices_1.cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_1, lxu_cache_state_gpu, max_index
)
torch.testing.assert_close(
lxu_locations.cpu(),
cache_indices_1.int(),
)
# Testing mixture.
miss_cache_indices_0 = torch.randint(associativity, max_index // 2, (10,))
hit_cache_indices_0 = torch.randint(0, associativity, (8,))
miss_cache_indices_1 = torch.randint(max_index // 2, max_index, (16,))
hit_cache_indices_1 = torch.randint(0, associativity, (8,))
linear_cache_indices_2 = torch.cat(
[
miss_cache_indices_0,
hit_cache_indices_0,
miss_cache_indices_1,
hit_cache_indices_1,
]
).cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_2, lxu_cache_state_gpu, max_index
)
expected_result = torch.cat(
[
torch.full_like(miss_cache_indices_0, -1),
hit_cache_indices_0,
torch.full_like(miss_cache_indices_1, -1),
hit_cache_indices_1,
]
).int()
torch.testing.assert_close(
lxu_locations.cpu(),
expected_result,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_sets=st.integers(min_value=10, max_value=300),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_lxu_cache_locking_counter_decrement(
self,
cache_sets: int,
) -> None:
warp_size = DEFAULT_ASSOC
N = cache_sets * warp_size
lxu_cache_locking_counter = torch.randint(
low=1,
high=3,
size=[cache_sets, warp_size],
device="cuda",
dtype=torch.int32,
)
counter_ref = lxu_cache_locking_counter.tolist()
lxu_cache_locations_list = []
lxu_cache_locations_set = set()
for _ in range(3 * N):
location = random.randrange(-1, N)
lxu_cache_locations_list.append(location)
lxu_cache_locations_set.add(location)
for idx in lxu_cache_locations_set:
if idx >= 0:
q, r = idx // warp_size, idx % warp_size
counter_ref[q][r] -= 1
counter_ref = torch.tensor(counter_ref, device="cuda", dtype=torch.int32)
lxu_cache_locations = torch.tensor(
lxu_cache_locations_list, device="cuda", dtype=torch.int32
)
torch.ops.fbgemm.lxu_cache_locking_counter_decrement(
lxu_cache_locking_counter, lxu_cache_locations
)
self.assertTrue(torch.equal(lxu_cache_locking_counter, counter_ref))
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=64),
log_E=st.integers(min_value=2, max_value=3),
N=st.integers(min_value=0, max_value=50),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
output_dtype=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
]
),
use_cpu=st.booleans()
if (gpu_available and not TEST_WITH_ROCM)
else st.just(False)
if (gpu_available and TEST_WITH_ROCM)
else st.just(True),
test_internal=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_embedding_inplace_update(
self,
T: int, # num of embedding tables
D: int, # embedding dim
log_E: int, # embedding table row number
N: int, # num of update rows per table
weights_ty: SparseType,
output_dtype: SparseType,
use_cpu: bool,
test_internal: bool, # test with OSS op or internal customized op
) -> None:
D_alignment = max(weights_ty.align_size(), output_dtype.align_size())
D = round_up(D, D_alignment)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
E = int(10**log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
row_alignment = 1 if use_cpu else 16
current_device = "cpu" if use_cpu else torch.cuda.current_device()
location = EmbeddingLocation.HOST if use_cpu else EmbeddingLocation.DEVICE
weights_ty_list = [weights_ty] * T
if open_source:
test_internal = False
# create two embedding bag op with random weights
locations = [location] * T
op = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
("", E, D, W_TY, L)
for (E, D, W_TY, L) in zip(Es, Ds, weights_ty_list, locations)
],
output_dtype=output_dtype,
device=current_device,
)
op.fill_random_weights()
op_ref = IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
("", E, D, W_TY, L)
for (E, D, W_TY, L) in zip(Es, Ds, weights_ty_list, locations)
],
output_dtype=output_dtype,
device=current_device,
)
op_ref.fill_random_weights()
# randomly generate update table and row indices
update_table_indices = []
update_table_indices2 = []
update_row_indices = []
update_row_indices2 = []
for t in range(T):
n = np.random.randint(low=0, high=N) if N > 0 else 0
if n == 0:
continue
update_table_indices.append(t)
update_row_id_list = random.sample(range(Es[t]), n)
update_row_indices.append(update_row_id_list)
update_table_indices2.extend([t] * n)
update_row_indices2.extend(update_row_id_list)
# generate update tensor based on weights from "op_ref" embedding table
update_weights_list = []
ref_split_weights = op_ref.split_embedding_weights(split_scale_shifts=False)
update_weight_size = sum(
[
rounded_row_size_in_bytes(
Ds[t],
weights_ty_list[t],
row_alignment,
)
for t in update_table_indices2
]
)
update_weights_tensor2 = torch.randint(
low=0,
high=255,
size=(update_weight_size,),
dtype=torch.uint8,
device=current_device,
)
update_offsets = 0
for i in range(len(update_table_indices)):
table_idx = update_table_indices[i]
(ref_weights, _) = ref_split_weights[table_idx]
D_bytes = rounded_row_size_in_bytes(
Ds[table_idx], weights_ty_list[table_idx], row_alignment
)
update_weights = []
for row_idx in update_row_indices[i]:
update_weights.append(ref_weights[row_idx].tolist())
update_weights_tensor2[
update_offsets : update_offsets + D_bytes
] = ref_weights[row_idx]
update_offsets += D_bytes
update_weights_tensor = torch.tensor(
update_weights,
device=current_device,
dtype=torch.uint8,
)
update_weights_list.append(update_weights_tensor)
# run inplace update on "op" embedding table
if not test_internal:
# Test scatter_ based OSS solution
op.embedding_inplace_update(
update_table_indices,
update_row_indices,
update_weights_list,
)
else:
# Test customized op
op.embedding_inplace_update_internal(
update_table_indices2,
update_row_indices2,
update_weights_tensor2,
)
# verify weights are equal with "op_ref" for the updated rows in "op"
split_weights = op.split_embedding_weights(split_scale_shifts=False)
for i in range(len(update_table_indices)):
t = update_table_indices[i]
for r in update_row_indices[i]:
(weights, _) = split_weights[t]
(ref_weights, _) = ref_split_weights[t]
self.assertEqual(weights.size(), ref_weights.size())
torch.testing.assert_close(
weights[r],
ref_weights[r],
rtol=1e-2,
atol=1e-2,
equal_nan=True,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
log_E=st.integers(min_value=2, max_value=3),
weights_precision=st.sampled_from(
[SparseType.FP16, SparseType.FP32, SparseType.INT8]
),
mixed=st.booleans(),
use_cache=st.booleans(),
output_dtype=st.sampled_from([SparseType.FP32, SparseType.FP16]),
num_indices_per_table=st.integers(min_value=1, max_value=5),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
)
def test_reset_embedding_weight_momentum(
self,
T: int,
D: int,
log_E: int,
weights_precision: SparseType,
mixed: bool,
use_cache: bool,
output_dtype: SparseType,
num_indices_per_table: int,
) -> None:
emb_op = SplitTableBatchedEmbeddingBagsCodegen
E = int(10**log_E)
D = D * 4
Ds: List[int] = []
Es: List[int] = []
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = ComputeDevice.CUDA
if use_cache:
managed = [EmbeddingLocation.MANAGED_CACHING] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
EmbeddingLocation.DEVICE if d < average_D else managed[t]
)
else:
managed = [
np.random.choice(
[
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
optimizer = OptimType.EXACT_ROWWISE_ADAGRAD
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
optimizer=optimizer,
weights_precision=weights_precision,
output_dtype=output_dtype,
)
pruned_indices: List[int] = []
pruned_indices_offsets: List[int] = [0]
logical_table_ids: List[int] = []
buffer_ids: List[int] = []
for i in range(len(Es)):
indices = [
np.random.randint(low=1, high=int(Es[i] - 2))
for _ in range(num_indices_per_table)
]
pruned_indices += indices
pruned_indices_offsets.append(
pruned_indices_offsets[i] + num_indices_per_table
)
logical_table_ids.append(i)
buffer_ids.append(i)
pruned_indices_tensor = to_device(
torch.tensor(pruned_indices, dtype=torch.int64, requires_grad=False), False
)
pruned_indices_offsets_tensor = to_device(
torch.tensor(
pruned_indices_offsets, dtype=torch.int64, requires_grad=False
),
False,
)
logical_table_ids_tensor = to_device(
torch.tensor(logical_table_ids, dtype=torch.int32, requires_grad=False),
False,
)
buffer_ids_tensor = to_device(
torch.tensor(buffer_ids, dtype=torch.int32, requires_grad=False), False
)
momentum1: List[Tensor] = [
s for (s,) in cc.split_optimizer_states()
] # List[rows]
weight: List[Tensor] = cc.split_embedding_weights() # List[(rows, dim)]
for t in range(T):
momentum1[t].fill_(1)
weight[t].fill_(1)
def check_weight_momentum(v: int) -> None:
for i in range(len(pruned_indices)):
logical_id = i // num_indices_per_table
table_momentum1 = momentum1[logical_id]
table_weight = weight[logical_id]
dim = Ds[logical_id]
expected_row_momentum1 = to_device(
torch.tensor(v, dtype=torch.float32), False
)
expected_row_weight = to_device(
torch.tensor([v] * dim, dtype=weights_precision.as_dtype()),
False,
)
pruned_index = pruned_indices[i]
row_weight = table_weight[pruned_index]
if weights_precision == SparseType.INT8:
row_weight = row_weight[:-INT8_EMB_ROW_DIM_OFFSET]
self.assertEqual(table_momentum1[pruned_index], expected_row_momentum1)
torch.testing.assert_close(
row_weight,
expected_row_weight,
rtol=0,
atol=0,
equal_nan=True,
)
check_weight_momentum(1)
cc.reset_embedding_weight_momentum(
pruned_indices_tensor,
pruned_indices_offsets_tensor,
logical_table_ids_tensor,
buffer_ids_tensor,
)
check_weight_momentum(0)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import hypothesis.strategies as st
import numpy as np
import torch
from hypothesis import given, settings, Verbosity
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
# pyre-ignore[21]
from test_utils import gpu_unavailable
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
from fbgemm_gpu.test.test_utils import gpu_unavailable
MAX_EXAMPLES = 20
class LayoutTransformOpsTest(unittest.TestCase):
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]
@given(
B=st.integers(min_value=1, max_value=20),
T=st.integers(min_value=1, max_value=20),
D=st.integers(min_value=2, max_value=20),
W=st.integers(min_value=1, max_value=20),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_recat_embedding_grad_output(self, B: int, T: int, D: int, W: int) -> None:
num_features_per_rank = np.random.randint(low=1, high=20, size=(W,)).tolist()
grad_output = torch.randn(B, sum(num_features_per_rank), D).float().cuda()
grad_outputs_by_rank = grad_output.split(num_features_per_rank, dim=1)
sharded_grad_output = torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
sharded_grad_output_impl = torch.ops.fbgemm.recat_embedding_grad_output(
grad_output, num_features_per_rank
)
torch.testing.assert_close(
sharded_grad_output_impl.cpu(), sharded_grad_output.cpu()
)
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]
@given(
B=st.integers(min_value=1, max_value=20),
W=st.integers(min_value=1, max_value=20),
cuda=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_recat_embedding_grad_output_mixed_D(
self, B: int, W: int, cuda: bool
) -> None:
num_features_per_rank = np.random.randint(low=1, high=20, size=(W,)).tolist()
global_T = sum(num_features_per_rank)
mixed_D_list = np.random.randint(low=1, high=10, size=(global_T,))
grad_output = torch.randn(B, sum(mixed_D_list)).float().cuda()
if cuda:
grad_output = grad_output.cuda()
num_feature_offsets_list = torch.tensor(
[0] + np.cumsum(num_features_per_rank).tolist()
)
dim_sum_per_rank = [
sum(
mixed_D_list[
num_feature_offsets_list[i] : num_feature_offsets_list[i + 1]
]
)
for i in range(W)
]
grad_outputs_by_rank = grad_output.split(dim_sum_per_rank, dim=1)
sharded_grad_output = torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
sharded_grad_output_impl = torch.ops.fbgemm.recat_embedding_grad_output_mixed_D(
grad_output, dim_sum_per_rank
)
torch.testing.assert_close(
sharded_grad_output_impl.cpu(), sharded_grad_output.cpu()
)
@unittest.skipIf(*gpu_unavailable)
# pyre-fixme[56]
@given(
B=st.integers(min_value=1, max_value=20),
W=st.integers(min_value=1, max_value=20),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_recat_embedding_grad_output_mixed_D_batch(self, B: int, W: int) -> None:
num_features_per_rank = np.random.randint(low=1, high=20, size=(W,)).tolist()
global_T = sum(num_features_per_rank)
mixed_D_list = np.random.randint(low=1, high=10, size=(global_T,))
grad_output = torch.randn(B, sum(mixed_D_list)).float().cuda()
num_feature_offsets_list = torch.tensor(
[0] + np.cumsum(num_features_per_rank).tolist()
)
dim_sum_per_rank = [
sum(
mixed_D_list[
num_feature_offsets_list[i] : num_feature_offsets_list[i + 1]
]
)
for i in range(W)
]
dim_sum_per_rank_tensor = torch.cuda.LongTensor(dim_sum_per_rank)
cumsum_dim_sum_per_rank_tensor = torch.cuda.LongTensor(
np.cumsum([0] + dim_sum_per_rank)[:-1]
)
grad_outputs_by_rank = grad_output.split(dim_sum_per_rank, dim=1)
sharded_grad_output = torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
sharded_grad_output_impl = (
torch.ops.fbgemm.recat_embedding_grad_output_mixed_D_batch(
grad_output.cuda(),
dim_sum_per_rank_tensor.cuda(),
cumsum_dim_sum_per_rank_tensor.cuda(),
)
)
torch.testing.assert_close(
sharded_grad_output_impl.cpu(), sharded_grad_output.cpu()
)
num_features_per_rank = np.random.randint(low=1, high=20, size=(W,)).tolist()
global_T = sum(num_features_per_rank)
mixed_D_list = np.random.randint(low=1, high=10, size=(global_T,))
grad_output = torch.randn(B, sum(mixed_D_list)).float().cuda()
num_feature_offsets_list = torch.tensor(
[0] + np.cumsum(num_features_per_rank).tolist()
)
dim_sum_per_rank = [
sum(
mixed_D_list[
num_feature_offsets_list[i] : num_feature_offsets_list[i + 1]
]
)
for i in range(W)
]
dim_sum_per_rank_tensor = torch.cuda.LongTensor(dim_sum_per_rank)
cumsum_dim_sum_per_rank_tensor = torch.cuda.LongTensor(
np.cumsum([0] + dim_sum_per_rank)[:-1]
)
grad_outputs_by_rank = grad_output.split(dim_sum_per_rank, dim=1)
sharded_grad_output = torch.cat(
[
grad_output_by_rank.contiguous().view(-1)
for grad_output_by_rank in grad_outputs_by_rank
],
dim=0,
)
sharded_grad_output_impl = (
torch.ops.fbgemm.recat_embedding_grad_output_mixed_D_batch(
grad_output, dim_sum_per_rank_tensor, cumsum_dim_sum_per_rank_tensor
)
)
torch.testing.assert_close(
sharded_grad_output_impl.cpu(), sharded_grad_output.cpu()
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
"""Check Python source code contains Meta copyright header
"""
from __future__ import annotations
import os
import sys
import click
def process_header(header, comment):
lines = header.split("\n")
new_lines = []
for line in lines:
if line is None or line == "":
new_lines.append(comment)
else:
new_lines.append(comment + " " + line)
return "\n".join(new_lines) + "\n"
HEADER = """Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
HEADER_lines = HEADER.splitlines()[1:]
PY_HEADER = process_header(HEADER, "#")
CPP_HEADER = process_header(HEADER, "//")
def dfs(root_path: str) -> list[str]:
"""DFS source code tree to find python files missing header
Parameters
----------
root_path : str
root source directory path
Returns
-------
list[str]
file list missing header
"""
ret = []
for root, _, files in os.walk(root_path, topdown=False):
for name in files:
path = os.path.join(root, name)
if path.endswith(".py"):
with open(path) as fi:
src = fi.read()
flag = True
for line in HEADER_lines:
if line not in src:
flag = False
break
if not flag:
ret.append(path)
return ret
def fix_header(file_list: list[str]) -> None:
"""Adding Meta header to to source files
Parameters
----------
file_list : list[str]
file list missing header
"""
for path in file_list:
src = ""
with open(path) as fi:
src = fi.read()
with open(path, "w") as fo:
fo.write(PY_HEADER)
fo.write(src)
@click.command()
@click.option(
"--path", help="Root directory of source to be checked", required=True, type=str
)
@click.option(
"--fixit", default=False, help="Fix missing header", required=False, type=bool
)
def check_header(path, fixit):
ret = dfs(path)
if len(ret) == 0:
sys.exit(0)
print("Need to add Meta header to the following files.")
print("----------------File List----------------")
for line in ret:
print(line)
print("-----------------------------------------")
if fixit:
fix_header(ret)
sys.exit(1)
if __name__ == "__main__":
check_header()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import subprocess
import sys
import pytorch_sphinx_theme
for dir_i in os.listdir("../.."):
if dir_i == "fbgemm_gpu":
continue
possible_dir = os.path.join("../..", dir_i)
if os.path.isdir(possible_dir):
sys.path.insert(0, possible_dir)
# Doxygen
subprocess.call("doxygen Doxyfile.in", shell=True)
# -- Project information -----------------------------------------------------
highlight_language = "c++"
project = "fbgemm"
copyright = "2022, FBGEMM team"
author = "FBGEMM team"
# The full version, including alpha/beta/rc tags
release = "0.1.2"
# breathe_projects_source = {"auto": ("../src/", ["auto_function.h", "auto_class.h"])}
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.napoleon", "sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
extensions = ["sphinx.ext.intersphinx", "breathe", "sphinx.ext.autodoc"]
intersphinx_mapping = {"pytorch": ("https://pytorch.org/docs/master", None)}
# Setup absolute paths for communicating with breathe / exhale where
# items are expected / should be trimmed by.
# This file is {repo_root}/docs/cpp/source/conf.py
breathe_projects = {"fbgemm_gpu": "../build/xml/", "codegen": "../build/xml/codegen/"}
breathe_default_project = "fbgemm_gpu"
# Tell sphinx what the primary language being documented is.
primary_domain = "cpp"
# Tell sphinx what the pygments highlight language should be.
highlight_language = "cpp"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pytorch_sphinx_theme"
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
"pytorch_project": "fbgemm",
"collapse_navigation": True,
"analytics_id": "UA-117752657-2",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa F401
from typing import Optional
try:
# Internal
from .embedding_common_code_generator import *
except ImportError:
# OSS
from embedding_common_code_generator import *
def _generate(**kwargs: Any) -> None:
gen_args = kwargs["args"]
kwargs["args"] = gen_args["cuda"]
optimizer = kwargs.get("optimizer")
# Generate cuda host code
template = env.get_template("embedding_optimizer_split_template.cu")
write(
f"gen_embedding_optimizer_{optimizer}_split_cuda.cu", template.render(**kwargs)
)
# Generate host code
template = env.get_template("embedding_optimizer_split_host_template.cpp")
write(f"gen_embedding_optimizer_{optimizer}_split.cpp", template.render(**kwargs))
template = env.get_template("embedding_optimizer_split_kernel_template.cu")
write(
f"gen_embedding_optimizer_{optimizer}_split_kernel.cu",
template.render(**kwargs),
)
# Generates Python invoker for CUDA
template = env.get_template("split_embedding_optimizer_codegen.template")
write(
f"split_embedding_optimizer_{optimizer}.py",
template.render(is_fbcode=args.is_fbcode, **kwargs),
)
# Generate optimizer kernel
template = env.get_template("embedding_optimizer_split_device_kernel_template.cuh")
write(
f"gen_embedding_optimizer_{optimizer}_split_device_kernel.cuh",
template.render(**kwargs),
)
def generate(**kwargs: Any) -> None:
_generate(
optimizer_class_name="".join(
[optim.capitalize() for optim in kwargs["optimizer"].split("_")]
),
**kwargs,
)
def optimizer_codegen(
install_dir: Optional[str] = None, is_fbcode: Optional[bool] = None
) -> None:
if install_dir is not None and len(install_dir) != 0:
args.install_dir = install_dir
if is_fbcode is not None:
args.is_fbcode = is_fbcode
# Generate optimizers
generate(**(rowwise_adagrad()))
def main() -> None:
optimizer_codegen()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.