python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import importlib
import sys
import unittest
from d2go.initializer import initialize_all
from d2go.registry import bootstrap
# manual initialize without bootstrap
initialize_all(boostrap_registries=False)
def _unimport(package_name):
# remove sub modules from sys
modules = [
key
for key in sys.modules
if (
(key == package_name or key.startswith(package_name + "."))
# previent the parent package of this file being removed
and not __name__.startswith(key)
)
]
for key in sorted(modules, reverse=True):
sys.modules.pop(key)
# invalidate the cache of removed sub modules
importlib.invalidate_caches()
class TestRegistryBootstrap(unittest.TestCase):
def setUp(self):
# NOTE: reload this file since the imported modules (eg. `d2go.registry.bootstrap`)
# might be "unimported" during `tearDown`.
importlib.reload(sys.modules[__name__])
def tearDown(self):
# NOTE: "unimport" bootstrapped libraries, so that each test runs like starting
# a new python program.
# TODO: match list with the bootstrapped packages
_unimport("d2go.registry")
_unimport("mobile_cv")
_unimport("detectron2")
def test_bootstrap_core_lib(self):
self.assertFalse(bootstrap._IS_BOOTSTRAPPED)
bootstrap.bootstrap_registries(enable_cache=False, catch_exception=False)
self.assertTrue(bootstrap._IS_BOOTSTRAPPED)
def test_bootstrap_with_cache(self):
self.assertFalse(bootstrap._IS_BOOTSTRAPPED)
bootstrap.bootstrap_registries(enable_cache=True, catch_exception=False)
self.assertTrue(bootstrap._IS_BOOTSTRAPPED)
| d2go-main | tests/skip_init/test_registries_bootstrap.py |
d2go-main | tests/trainer/__init__.py |
|
#!/usr/bin/env fbpython
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import os
import unittest
from typing import Dict, List
import torch
from d2go.config import CfgNode
from d2go.modeling import modeling_hook as mh
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.runner.default_runner import Detectron2GoRunner
from d2go.trainer.activation_checkpointing import (
ActivationCheckpointModelingHook,
add_activation_checkpoint_configs,
)
from d2go.utils.testing.data_loader_helper import create_local_dataset
from d2go.utils.testing.helper import tempdir
from detectron2.structures import ImageList
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
CheckpointWrapper,
)
@META_ARCH_REGISTRY.register()
class MetaArchForTestAC(torch.nn.Module):
def __init__(self, cfg: CfgNode) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.linear = torch.nn.Linear(4, 4)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
@property
def device(self) -> torch._C.device:
return self.conv1.weight.device
def forward(self, inputs: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
images = [x["image"] for x in inputs]
images = ImageList.from_tensors(images, 1)
ret = self.conv(images.tensor)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
return {"loss": ret.norm()}
def _get_cfg(runner, output_dir, dataset_name):
cfg = runner.get_default_cfg()
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "MetaArchForTestAC"
cfg.DATASETS.TRAIN = (dataset_name,)
cfg.DATASETS.TEST = (dataset_name,)
cfg.INPUT.MIN_SIZE_TRAIN = (10,)
cfg.INPUT.MIN_SIZE_TEST = (10,)
cfg.SOLVER.MAX_ITER = 3
cfg.SOLVER.STEPS = []
cfg.SOLVER.WARMUP_ITERS = 1
cfg.SOLVER.CHECKPOINT_PERIOD = 3
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.MODEL_EMA.ENABLED = True
cfg.OUTPUT_DIR = output_dir
return cfg
class TestActivationCheckpointing(unittest.TestCase):
def test_ac_config(self) -> None:
cfg = CfgNode()
add_activation_checkpoint_configs(cfg)
self.assertTrue(isinstance(cfg.ACTIVATION_CHECKPOINT, CfgNode))
self.assertEqual(cfg.ACTIVATION_CHECKPOINT.REENTRANT, False)
self.assertEqual(
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY, "always_wrap_policy"
)
self.assertEqual(cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS, [])
def test_ac_modeling_hook_apply(self) -> None:
"""Check that the hook is registered"""
self.assertTrue("ActivationCheckpointModelingHook" in mh.MODELING_HOOK_REGISTRY)
cfg = CfgNode()
add_activation_checkpoint_configs(cfg)
ac_hook = ActivationCheckpointModelingHook(cfg)
model = MetaArchForTestAC(cfg)
ac_hook.apply(model)
children = list(model.children())
self.assertTrue(len(children) == 5)
for child in children:
self.assertTrue(isinstance(child, CheckpointWrapper))
def test_ac_modeling_hook_autowrap(self) -> None:
cfg = CfgNode()
add_activation_checkpoint_configs(cfg)
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY = "layer_based_auto_wrap_policy"
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS = ["Conv2d", "BatchNorm2d"]
ac_hook = ActivationCheckpointModelingHook(cfg)
model = MetaArchForTestAC(cfg)
ac_hook.apply(model)
self.assertTrue(isinstance(model.conv, CheckpointWrapper))
self.assertTrue(isinstance(model.bn, CheckpointWrapper))
self.assertFalse(isinstance(model.linear, CheckpointWrapper))
@tempdir
def test_ac_runner(self, tmp_dir) -> None:
ds_name = create_local_dataset(tmp_dir, 5, 10, 10)
runner = Detectron2GoRunner()
cfg = _get_cfg(runner, tmp_dir, ds_name)
cfg.MODEL.MODELING_HOOKS = ["ActivationCheckpointModelingHook"]
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY = "layer_based_auto_wrap_policy"
cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS = ["Conv2d", "BatchNorm2d"]
cfg.MODEL_EMA.DECAY_WARM_UP_FACTOR = -1
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=False)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, "model_0000002.pth")))
# resume training onto a non-AC-wrapped model
cfg.MODEL.MODELING_HOOKS = []
cfg.SOLVER.MAX_ITER = 6
model = runner.build_model(cfg)
runner.do_train(cfg, model, resume=True)
self.assertTrue(os.path.exists(os.path.join(tmp_dir, "model_0000005.pth")))
| d2go-main | tests/trainer/test_activation_checkpointing.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from d2go.initializer import initialize_all
# NOTE: by default a list of initializations will run whenever D2Go is first imported,
# so that users don't need to do any manual iniitialization other than importing `d2go`.
# Environment variable can be used to skip initialization for special cases like unit test
skip_initialization = os.environ.get("D2GO_IMPORT_SKIP_INITIALIZATION", "0") == "1"
if not skip_initialization:
initialize_all()
| d2go-main | d2go/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.registry.bootstrap import bootstrap_registries
from mobile_cv.common.misc.oss_utils import fb_overwritable
_INITIALIZED = False
def initialize_all(boostrap_registries: bool = False) -> None:
global _INITIALIZED
if _INITIALIZED:
return
_INITIALIZED = True
_initialize_all(boostrap_registries=boostrap_registries)
def _initialize_all(boostrap_registries: bool) -> None:
_setup_env()
_register_builtin_datasets()
_populate_registries()
if boostrap_registries:
bootstrap_registries(enable_cache=True, catch_exception=True)
# fmt: off
@fb_overwritable()
def _setup_env():
# register torch vision ops
from torchvision.ops import nms # noqa
# setup Detectron2 environments
from detectron2.utils.env import setup_environment as setup_d2_environment # isort:skip
setup_d2_environment()
@fb_overwritable()
def _register_builtin_datasets():
# Register D2 builtin datasets
import detectron2.data # noqa F401
@fb_overwritable()
def _populate_registries():
from d2go import optimizer # noqa
from d2go.data import dataset_mappers # noqa
from d2go.modeling.backbone import fbnet_v2 # noqa
# fmt: on
| d2go-main | d2go/initializer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Extend the mobile_cv.torch.utils_pytorch.distributed_helper to add D2/D2Go specific
features, functions in this module share the same signatures as the ones from mobile_cv.
"""
import logging
from dataclasses import dataclass
from datetime import timedelta
from typing import Any, Callable, Dict, Optional, Tuple, TypeVar
import detectron2.utils.comm as d2_comm
import mobile_cv.torch.utils_pytorch.comm as mcv_comm
import torch
from d2go.config import CfgNode, temp_defrost
from d2go.utils.launch_environment import get_launch_environment
from mobile_cv.torch.utils_pytorch.comm import ( # noqa
BaseSharedContext,
get_shared_context,
set_shared_context,
)
from mobile_cv.torch.utils_pytorch.distributed_helper import (
DEFAULT_TIMEOUT,
DistributedParams,
enable_dist_process_groups,
launch as _launch,
launch_deco as _launch_deco,
save_return_deco,
)
logger = logging.getLogger(__name__)
_RT = TypeVar("_RT") # return type
@dataclass
class D2GoSharedContext(BaseSharedContext):
"""
Shared context that can be initialied before launching the workers
passed to all workers.
"""
runner_shared_context: Any
# BC-compatible
def get_local_rank() -> int:
return mcv_comm.get_local_rank()
# BC-compatible
def get_num_processes_per_machine() -> int:
return mcv_comm.get_local_size()
def _maybe_convert_to_cpu_run(args, backend):
if get_launch_environment() == "local" and not torch.cuda.is_available():
assert len(args) > 0, args
cfg = args[0]
if isinstance(cfg, CfgNode) and cfg.MODEL.DEVICE == "cuda":
logger.warning(
"Detected that CUDA is not available on this machine, set MODEL.DEVICE"
" to cpu and backend to GLOO"
)
with temp_defrost(cfg):
cfg.MODEL.DEVICE = "cpu"
backend = "GLOO"
return args, backend
# Modify mobile_cv's `default_distributed_worker` to also setup D2's comm module
def distributed_worker(
main_func: Callable[..., _RT],
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
backend: str,
init_method: Optional[str] = None,
dist_params: Optional[DistributedParams] = None,
return_save_file: Optional[str] = None,
timeout: timedelta = DEFAULT_TIMEOUT,
shared_context: Optional[BaseSharedContext] = None,
) -> _RT:
if shared_context:
set_shared_context(
shared_context
) # set the global shared context from the args passed in by mp spawn
dist_params = dist_params or DistributedParams.from_environ()
args, backend = _maybe_convert_to_cpu_run(args, backend)
with enable_dist_process_groups(backend, init_method, dist_params, timeout):
d2_comm._LOCAL_PROCESS_GROUP = mcv_comm._LOCAL_PROCESS_GROUP
# Now the D2's comm module should be fully functional
deco = save_return_deco(return_save_file, dist_params.global_rank)
return deco(main_func)(*args, **kwargs)
def launch_deco(**kwargs):
"""
launch_deco for d2go distributed worker
"""
return _launch_deco(launcher=launch, **kwargs)
def launch(
main_func: Callable[..., _RT],
num_processes_per_machine: int,
num_machines: int = 1,
machine_rank: int = 0,
dist_url: Optional[str] = None,
backend: str = "NCCL",
always_spawn: bool = False,
launch_method: str = "multiprocessing",
shared_context: Optional[D2GoSharedContext] = None,
timeout: timedelta = DEFAULT_TIMEOUT,
args: Tuple[Any, ...] = (),
kwargs: Dict[str, Any] = None,
) -> Dict[int, _RT]:
"""
D2Go's specialized launch method, it does a few more things on top of mcv's launch:
- Automatically convert GPU to CPU if CUDA is not available.
- Add D2Go-specific initialziation in the _distributed_worker.
"""
args, backend = _maybe_convert_to_cpu_run(args, backend)
return _launch(
main_func=main_func,
num_processes_per_machine=num_processes_per_machine,
num_machines=num_machines,
machine_rank=machine_rank,
dist_url=dist_url,
backend=backend,
always_spawn=always_spawn,
launch_method=launch_method,
shared_context=shared_context,
timeout=timeout,
args=args,
kwargs=kwargs,
_distributed_worker=distributed_worker,
)
| d2go-main | d2go/distributed.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import os
import time
from typing import Callable, List, Optional, Tuple, Type, TypeVar, Union
import detectron2.utils.comm as comm
import torch
from d2go.config import (
auto_scale_world_size,
CfgNode,
load_full_config_from_file,
reroute_config_path,
temp_defrost,
)
from d2go.config.utils import get_diff_cfg
from d2go.distributed import (
D2GoSharedContext,
get_local_rank,
get_num_processes_per_machine,
)
from d2go.runner import import_runner
from d2go.runner.api import RunnerV2Mixin
from d2go.runner.default_runner import BaseRunner
from d2go.runner.lightning_task import DefaultTask
from d2go.utils.helper import run_once
from d2go.utils.launch_environment import get_launch_environment
from d2go.utils.logging import initialize_logging, replace_print_with_logging
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.env import seed_all_rng
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger as _setup_logger
from detectron2.utils.serialize import PicklableWrapper
from mobile_cv.common.misc.py import FolderLock, MultiprocessingPdb, post_mortem_if_fail
logger = logging.getLogger(__name__)
_RT = TypeVar("_RT")
@run_once()
def setup_root_logger(logging_level: int = logging.INFO) -> None:
"""
Sets up the D2Go root logger. When a new logger is created, it lies in a tree.
If the logger being used does not have a specific level being specified, it
will default to using its parent logger. In this case, by setting the root
logger level to debug, or what is given, we change the default behaviour
for all loggers.
See https://docs.python.org/3/library/logging.html for a more in-depth
description
"""
initialize_logging(logging_level)
replace_print_with_logging()
def basic_argument_parser(
distributed=True,
requires_output_dir=True,
):
"""Basic cli tool parser for Detectron2Go binaries"""
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--runner",
type=str,
default="d2go.runner.GeneralizedRCNNRunner",
help="Full class name, i.e. (package.)module.class",
)
parser.add_argument(
"--config-file",
help="path to config file",
default="",
metavar="FILE",
)
parser.add_argument(
"--output-dir",
help="When given, this will override the OUTPUT_DIR in the config-file",
required=requires_output_dir,
default=None,
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
parser.add_argument(
"--save-return-file",
help="When given, the main function outputs will be serialized and saved to this file",
default=None,
type=str,
)
parser.add_argument(
"--disable-post-mortem",
action="store_true",
help="whether to NOT connect pdb on failure, which only works locally",
)
if distributed:
parser.add_argument(
"--num-processes", type=int, default=1, help="number of gpus per machine"
)
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument("--run-as-worker", type=bool, default=False)
parser.add_argument(
"--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)",
)
parser.add_argument(
"--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time())
)
parser.add_argument("--dist-backend", type=str, default="NCCL")
return parser
def build_basic_cli_args(
config_path: Optional[str] = None,
output_dir: Optional[str] = None,
runner_name: Optional[str] = None,
save_return_file: Optional[str] = None,
num_processes: Optional[Union[int, str]] = None,
num_machines: Optional[Union[int, str]] = None,
machine_rank: Optional[Union[int, str]] = None,
dist_url: Optional[str] = None,
dist_backend: Optional[str] = None,
disable_post_mortem: bool = False,
run_as_worker: bool = False,
# Evaluator args below
predictor_path: Optional[str] = None,
num_threads: Optional[int] = None,
caffe2_engine: Optional[int] = None,
caffe2_logging_print_net_summary: Optional[int] = None,
) -> List[str]:
"""
Returns parameters in the form of CLI arguments for the binary using
basic_argument_parser to set up its argument parser.
For the parameters definition and meaning, see basic_argument_parser.
"""
args: List[str] = []
if config_path is not None:
args += ["--config-file", config_path]
if output_dir is not None:
args += ["--output-dir", output_dir]
if runner_name is not None:
args += ["--runner", runner_name]
if save_return_file is not None:
args += ["--save-return-file", str(save_return_file)]
if disable_post_mortem:
args += ["--disable-post-mortem"]
if run_as_worker:
args += ["--run-as-worker", str(run_as_worker)]
if num_processes is not None:
args += ["--num-processes", str(num_processes)]
if num_machines is not None:
args += ["--num-machines", str(num_machines)]
if machine_rank is not None:
args += ["--machine-rank", str(machine_rank)]
if dist_url is not None:
args += ["--dist-url", str(dist_url)]
if dist_backend is not None:
args += ["--dist-backend", str(dist_backend)]
if predictor_path is not None:
args += ["--predictor-path", predictor_path]
if num_threads is not None:
args += ["--num-threads", int(num_threads)]
if caffe2_engine is not None:
args += ["--caffe2-engine", int(caffe2_engine)]
if caffe2_logging_print_net_summary is not None:
args += [
"--caffe2_logging_print_net_summary",
str(caffe2_logging_print_net_summary),
]
return args
def create_cfg_from_cli(
config_file: str,
overwrites: Optional[List[str]],
runner_class: Union[None, str, Type[BaseRunner], Type[DefaultTask]],
) -> CfgNode:
"""
Centralized function to load config object from config file. It currently supports:
- YACS based config (return yacs's CfgNode)
"""
config_file = reroute_config_path(config_file)
with PathManager.open(config_file, "r") as f:
# TODO: switch to logger, note that we need to initilaize logger outside of main
# for running locally.
print("Loaded config file {}:\n{}".format(config_file, f.read()))
if isinstance(runner_class, str):
print(f"Importing runner: {runner_class} ...")
runner_class = import_runner(runner_class)
if runner_class is None or issubclass(runner_class, RunnerV2Mixin):
# Runner-less API
cfg = load_full_config_from_file(config_file)
else:
# backward compatible for old API
cfg = runner_class.get_default_cfg()
cfg.merge_from_file(config_file)
cfg.merge_from_list(overwrites or [])
cfg.freeze()
return cfg
def prepare_for_launch(
args,
) -> Tuple[CfgNode, str, str]:
"""
Load config, figure out working directory, create runner.
- when args.config_file is empty, returned cfg will be the default one
- returned output_dir will always be non empty, args.output_dir has higher
priority than cfg.OUTPUT_DIR.
"""
logger.info(args)
cfg = create_cfg_from_cli(
config_file=args.config_file,
overwrites=args.opts,
runner_class=args.runner,
)
# overwrite the output_dir based on config if output is not set via cli
assert args.output_dir or args.config_file
output_dir = args.output_dir or cfg.OUTPUT_DIR
return cfg, output_dir, args.runner
def maybe_override_output_dir(cfg: CfgNode, output_dir: str):
if cfg.OUTPUT_DIR != output_dir:
with temp_defrost(cfg):
logger.warning(
"Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format(
cfg.OUTPUT_DIR, output_dir
)
)
cfg.OUTPUT_DIR = output_dir
def setup_before_launch(
cfg: CfgNode,
output_dir: str,
runner_class: Union[None, str, Type[BaseRunner], Type[DefaultTask]],
) -> Union[None, D2GoSharedContext]:
"""
Setup logic before spawning workers. Including:
- Shared context initilization to be passed to all workers
"""
if isinstance(runner_class, str):
logger.info(f"Importing runner: {runner_class} ...")
runner_class = import_runner(runner_class)
if hasattr(runner_class, "create_shared_context"):
return runner_class.create_shared_context(cfg)
return None
def setup_after_launch(
cfg: CfgNode,
output_dir: str,
runner_class: Union[None, str, Type[BaseRunner], Type[DefaultTask]],
) -> Union[None, BaseRunner, Type[DefaultTask]]:
"""
Binary-level setup after entering DDP, including
- creating working directory
- setting up logger
- logging environment
- printing and dumping config
- (optional) initializing runner
"""
create_dir_on_global_main_process(output_dir)
setup_loggers(output_dir)
log_system_info()
cfg.freeze()
maybe_override_output_dir(cfg, output_dir)
logger.info("Running with full config:\n{}".format(cfg))
dump_cfg(cfg, os.path.join(output_dir, "config.yaml"))
if isinstance(runner_class, str):
logger.info(f"Importing runner: {runner_class} ...")
runner_class = import_runner(runner_class)
if issubclass(runner_class, DefaultTask):
# TODO(T123679504): merge this with runner code path to return runner instance
logger.info(f"Importing lightning task: {runner_class} ...")
runner = runner_class
elif issubclass(runner_class, BaseRunner):
logger.info(f"Initializing runner: {runner_class} ...")
runner = runner_class()
runner = initialize_runner(runner, cfg)
logger.info("Running with runner: {}".format(runner))
else:
assert runner_class is None, f"Unsupported runner class: {runner_class}"
runner = None
# save the diff config
default_cfg = (
runner_class.get_default_cfg()
if runner_class and not issubclass(runner_class, RunnerV2Mixin)
else cfg.get_default_cfg()
)
dump_cfg(
get_diff_cfg(default_cfg, cfg),
os.path.join(output_dir, "diff_config.yaml"),
)
# scale the config after dumping so that dumped config files keep original world size
auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
# avoid random pytorch and CUDA algorithms during the training
if cfg.SOLVER.DETERMINISTIC:
logging.warning("Using deterministic training for the reproducibility")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
# reference: https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
if cfg.SEED > 0:
seed_all_rng(cfg.SEED)
return runner
def setup_logger(
module_name: str,
output_dir: str,
abbrev_name: Optional[str] = None,
color: Optional[bool] = None,
) -> logging.Logger:
if not color:
color = get_launch_environment() == "local"
if not abbrev_name:
abbrev_name = module_name
logger = _setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name=module_name,
abbrev_name=abbrev_name,
enable_propagation=True,
configure_stdout=False,
)
return logger
@run_once()
def setup_loggers(output_dir):
# Setup logging in each of the distributed processes.
setup_root_logger()
setup_logger("detectron2", output_dir, abbrev_name="d2")
setup_logger("fvcore", output_dir)
setup_logger("d2go", output_dir)
setup_logger("mobile_cv", output_dir)
# NOTE: all above loggers have FileHandler pointing to the same file as d2_logger.
# Those files are opened upon creation, but it seems fine in 'a' mode.
def log_system_info():
num_processes = get_num_processes_per_machine()
logger.info(
"Using {} processes per machine. Rank of current process: {}".format(
num_processes, comm.get_rank()
)
)
wf_id = os.getenv("WORKFLOW_RUN_ID", None)
if wf_id is not None:
logger.info("FBLearner Flow Run ID: {}".format(wf_id))
logger.info("Environment info:\n" + collect_env_info())
try:
from detectron2.fb.utils import print_fbcode_info
print_fbcode_info()
except ImportError:
pass
def dump_cfg(cfg: CfgNode, path: str) -> None:
if comm.is_main_process():
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(path))
def create_dir_on_global_main_process(path: str) -> None:
if comm.get_rank() == 0 and path:
PathManager.mkdirs(path)
# Add a barrier to make sure the existance of the dir for non-master process
comm.synchronize()
def initialize_runner(runner: BaseRunner, cfg: CfgNode) -> BaseRunner:
assert runner is not None, "now always requires a runner instance"
runner._initialize(cfg)
return runner
def caffe2_global_init(logging_print_net_summary=0, num_threads=None):
if num_threads is None:
if get_num_processes_per_machine() > 1:
# by default use single thread when DDP with multiple processes
num_threads = 1
else:
# GlobalInit will clean PyTorch's num_threads and set it to 1,
# thus keep PyTorch's default value to make it truly default.
num_threads = torch.get_num_threads()
if not get_local_rank() == 0:
logging_print_net_summary = 0 # only enable for local main process
from caffe2.python import workspace
workspace.GlobalInit(
[
"caffe2",
"--caffe2_log_level=2",
"--caffe2_logging_print_net_summary={}".format(logging_print_net_summary),
"--caffe2_omp_num_threads={}".format(num_threads),
"--caffe2_mkl_num_threads={}".format(num_threads),
]
)
logger.info("Using {} threads after GlobalInit".format(torch.get_num_threads()))
def post_mortem_if_fail_for_main(main_func: Callable[..., _RT]) -> Callable[..., _RT]:
def new_main_func(cfg, output_dir, *args, **kwargs) -> _RT:
pdb_ = (
MultiprocessingPdb(FolderLock(output_dir))
if comm.get_world_size() > 1
else None # fallback to use normal pdb for single process
)
return post_mortem_if_fail(pdb_)(main_func)(cfg, output_dir, *args, **kwargs)
return PicklableWrapper(new_main_func)
| d2go-main | d2go/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import uuid
from contextlib import ContextDecorator
from d2go.checkpoint.log_checkpoint import log_checkpoint
logger = logging.getLogger(__name__)
class instrument_checkpoint(ContextDecorator):
def __init__(
self,
checkpoint_type: str,
) -> None:
super().__init__()
self.unique_id = uuid.uuid1().int >> 97
self.checkpoint_type = checkpoint_type
def __enter__(self) -> "instrument_checkpoint":
log_checkpoint(
checkpoint_type=self.checkpoint_type,
unique_id=self.unique_id,
state="begin",
)
return self
def __exit__(self, exc_type, exc_value, tb) -> bool:
log_checkpoint(
checkpoint_type=self.checkpoint_type,
unique_id=self.unique_id,
state="end",
)
if exc_value is not None:
# Re-raising the exception, otherwise it will be swallowed
raise exc_value
return True
| d2go-main | d2go/checkpoint/checkpoint_instrumentation.py |
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import json
import os
from typing import Callable, cast, IO
import detectron2.utils.comm as comm
import torch
from d2go.checkpoint.checkpoint_instrumentation import instrument_checkpoint
from d2go.checkpoint.utils import (
gather_ema_state_dict,
gather_optimizer_state_dict,
scatter_ema_state_dict,
scatter_optimizer_state_dict,
)
from d2go.quantization.modeling import QATCheckpointer
from d2go.trainer.fsdp import FSDPWrapper
from d2go.utils.misc import _log_api_usage_on_main_process
from mobile_cv.torch.utils_pytorch.distributed_helper import interleave_by_rank
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
LOG_API_IDENTIFIER = "checkpointing.FSDPCheckpointer"
def get_max_checkpoint_concurrency() -> int:
return comm.get_world_size()
# TODO: replace FSDPCheckpointer with central D2GoCheckpointer
class FSDPCheckpointer(QATCheckpointer):
"""
Extend the Checkpointer to support saving/loading FSDP models
"""
def __init__(
self,
*args,
concurrency_limit_fetcher: Callable[[], int] = get_max_checkpoint_concurrency,
**kwargs,
):
super().__init__(*args, **kwargs)
self._concurrency_limit_fetcher: Callable[[], int] = concurrency_limit_fetcher
def is_distributed(self) -> bool:
return True
@instrument_checkpoint("load")
def load(self, path: str, checkpointables=None):
"""
Add support for loading sharded optimizer states in FSDP.
.. note:: Loading optimizer states from regular checkpoints into FSDP models is currently not supported.
In general users should not resume non-FSDP training with FSDP.
"""
if isinstance(self.model, FSDPWrapper):
load_path = path
if path:
# loading path is a directory: local or sharded state dict is used
if self.path_manager.isdir(path):
# Get state dict type from metadata file
metadata = self._load_metadata(path)
state_dict_type = (
metadata["state_dict_type"] if metadata else "LOCAL_STATE_DICT"
)
assert state_dict_type in ["LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
self.logger.info(
f"[FSDPCheckpointer] Loading from {state_dict_type} checkpoint ..."
)
self.model.load_state_dict_type = StateDictType[state_dict_type]
load_path = os.path.join(path, f"rank{comm.get_rank()}.pth")
# loading path is a file: full global state dict is used
else:
self.logger.info(
"[FSDPCheckpointer] Loading from FULL_STATE_DICT checkpoint ..."
)
self.model.load_state_dict_type = StateDictType.FULL_STATE_DICT
_log_api_usage_on_main_process(
f"{LOG_API_IDENTIFIER}.load.fsdp.{self.model.load_state_dict_type.name}" # pyre-ignore
)
# Convert local ckpt to global ckpt when we load from a local ckpt but want to save to global ckpt
convert_local_ckpt_to_global = (
path
and self.model.load_state_dict_type == StateDictType.LOCAL_STATE_DICT
and self.model.state_dict_type == StateDictType.FULL_STATE_DICT
)
# Load all checkpointables from local ckpt if we want to convert to global ckpt
checkpointables_iter = (
self.checkpointables.keys()
if checkpointables is None or convert_local_ckpt_to_global
else checkpointables
)
checkpointables_filtered = [
name
for name in checkpointables_iter
if name not in ["optimizer", "ema_state"]
]
checkpoint = super().load(
load_path, checkpointables=checkpointables_filtered
)
if "optimizer" in checkpointables_iter:
self.logger.info(
f"[FSDPCheckpointer] Loading optimizer from {load_path} ..."
)
optimizer = self.checkpointables["optimizer"]
osd = checkpoint.pop("optimizer")
scatter_optimizer_state_dict(optimizer, osd, self.model)
if "ema_state" in checkpointables_iter:
self.logger.info(
f"[FSDPCheckpointer] Loading ema_state from {load_path} ..."
)
ema_state = checkpoint.pop("ema_state")
scatter_ema_state_dict(ema_state, self.model)
# Convert local ckpt by resaving the current state
if convert_local_ckpt_to_global:
self.logger.info(
"[FSDPCheckpointer] Converting local FSDP checkpoint to global checkpoint ..."
)
self.save(os.path.basename(path), tag_last_ckpt=False, **checkpoint)
self.logger.info(
"[FSDPCheckpointer] Local-to-global checkpoint conversion finishes"
)
# return all remaining checkpoints
return checkpoint
else:
_log_api_usage_on_main_process(f"{LOG_API_IDENTIFIER}.load.ddp")
return super().load(path, checkpointables=checkpointables)
@instrument_checkpoint("save")
def save(self, name: str, tag_last_ckpt=True, **kwargs) -> None:
"""
Add support for saving sharding models and optimizers.
The rest of the code is copied from implementation in the superclass
"""
# checkpoint_type is used to annotate preemption checkpoints for internal checkpointer. Ignore it here
kwargs.pop("checkpoint_type", None)
# If no sharding, only the main process enters the saving codepath;
# otherwise, all processes need to call state_dict() to enable state broadcasting among ranks
if not isinstance(self.model, FSDPWrapper):
_log_api_usage_on_main_process(f"{LOG_API_IDENTIFIER}.save.ddp")
if comm.is_main_process():
return super().save(name, **kwargs)
return
_log_api_usage_on_main_process(
f"{LOG_API_IDENTIFIER}.save.fsdp.{self.model.state_dict_type.name}"
)
data = {}
# FSDP: model.state_dict() needs to be called by all ranks before saving
data["model"] = self.model.state_dict()
for key, obj in self.checkpointables.items():
if key == "optimizer":
data[key] = gather_optimizer_state_dict(obj, self.model)
elif key == "ema_state":
data[key] = gather_ema_state_dict(obj, self.model)
else:
data[key] = obj.state_dict()
data.update(kwargs)
# If using full state dict, only the main process does checkpoint saving; Otherwise, all processes do
if self.model.state_dict_type != StateDictType.FULL_STATE_DICT:
# Main process creates directory for local saves
new_save_dir = os.path.join(self.save_dir, name)
if comm.is_main_process():
if not self.path_manager.exists(new_save_dir):
self.path_manager.mkdirs(new_save_dir)
comm.synchronize()
# Saving checkpoints
basename = "rank{}.pth".format(comm.get_rank())
save_file = os.path.join(new_save_dir, basename)
assert os.path.basename(save_file) == basename, basename
# Limit the write concurrency to avoid QPS overload
with interleave_by_rank(
concurrency_limit=self._concurrency_limit_fetcher()
):
self._save_file(data, save_file)
# Main process tags last checkpoint if no errors in all processes
comm.synchronize()
if comm.is_main_process():
self._save_metadata(new_save_dir)
if tag_last_ckpt:
self.tag_last_checkpoint(name)
elif comm.is_main_process():
basename = "{}.pth".format(name)
save_file = os.path.join(self.save_dir, basename)
assert os.path.basename(save_file) == basename, basename
self._save_file(data, save_file)
if tag_last_ckpt:
self.tag_last_checkpoint(basename)
def _save_file(self, data, filename):
self.logger.info("Saving checkpoint to {}".format(filename))
with self.path_manager.open(filename, "wb") as f:
torch.save(data, cast(IO[bytes], f))
def _load_file(self, f: str):
# Limit the read concurrency to avoid QPS overload
with interleave_by_rank(concurrency_limit=self._concurrency_limit_fetcher()):
return super()._load_file(f)
def _save_metadata(self, path):
metadata_file = os.path.join(path, "metadata.json")
obj = {"state_dict_type": self.model.state_dict_type.name}
with self.path_manager.open(metadata_file, "w") as f:
json.dump(obj, f)
def _load_metadata(self, path):
metadata_file = os.path.join(path, "metadata.json")
if self.path_manager.exists(metadata_file):
with self.path_manager.open(metadata_file, "r") as f:
return json.load(f)
else:
return None
| d2go-main | d2go/checkpoint/fsdp_checkpoint.py |
from d2go.checkpoint.api import is_distributed_checkpoint
from d2go.checkpoint.fsdp_checkpoint import FSDPCheckpointer
__all__ = [
"is_distributed_checkpoint",
"FSDPCheckpointer",
]
| d2go-main | d2go/checkpoint/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from mobile_cv.common.misc.oss_utils import fb_overwritable
logger = logging.getLogger(__name__)
@fb_overwritable()
def log_checkpoint(checkpoint_type=str, unique_id=int, state=str) -> None:
logger.info(f"Checkpoint:{unique_id} {checkpoint_type} {state} ")
| d2go-main | d2go/checkpoint/log_checkpoint.py |
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
from fvcore.common.checkpoint import Checkpointer
def is_distributed_checkpoint(checkpointer: Checkpointer) -> bool:
"""
Check if checkpointer supports distributed checkpointing,
in which case all ops need to be invoked in every rank.
"""
if hasattr(checkpointer, "is_distributed"):
return checkpointer.is_distributed()
return False
| d2go-main | d2go/checkpoint/api.py |
import copy
from d2go.modeling.ema import EMAState
from d2go.trainer.fsdp import FSDPWrapper
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
StateDictType,
)
def gather_optimizer_state_dict(optimizer, model: FSDPWrapper):
"""
Get full/local optimizer state dict from an FSDP model.
"""
# FSDP: full_optim_state_dict() needs to be called by all ranks
if model.state_dict_type == StateDictType.FULL_STATE_DICT:
return FSDP.full_optim_state_dict(
model, optim=optimizer, rank0_only=model.rank0_only
)
elif model.state_dict_type == StateDictType.SHARDED_STATE_DICT:
return FSDP.sharded_optim_state_dict(model, optim=optimizer)
return optimizer.state_dict()
def scatter_optimizer_state_dict(optimizer, optim_state_dict, model: FSDPWrapper):
"""
Load a full/local optimizer state dict to a FSDP model.
If using full state dict, shard and scatter the optimizer state dict before loading
"""
if model.load_state_dict_type == StateDictType.FULL_STATE_DICT:
optim_state_dict = FSDP.shard_full_optim_state_dict(
optim_state_dict, model, optim=optimizer
)
elif model.load_state_dict_type == StateDictType.SHARDED_STATE_DICT:
optim_state_dict = FSDP.flatten_sharded_optim_state_dict(
optim_state_dict, model, optim=optimizer
)
optimizer.load_state_dict(optim_state_dict)
def gather_ema_state_dict(ema_state, model: FSDPWrapper):
"""
Get full/local EMA state dict from an FSDP model.
If using full state dict, gather local sharded EMA states from all FSDP processes and aggregate them into a full EMA state dict
"""
if model.state_dict_type == StateDictType.FULL_STATE_DICT:
# Apply local ema states to the model and unshard them
with ema_state.apply_and_restore(model):
with FSDP.summon_full_params(
model,
writeback=False,
offload_to_cpu=model.offload_to_cpu,
rank0_only=model.rank0_only,
):
state = EMAState.FromModel(model)
return state.state
elif model.state_dict_type == StateDictType.SHARDED_STATE_DICT:
with ema_state.apply_and_restore(model):
# must deepcopy the state dict, else we return a reference to the model state
return dict(copy.deepcopy(model.state_dict()))
else:
return ema_state.state_dict()
def scatter_ema_state_dict(ema_state_dict, model: FSDPWrapper):
"""
Load a full/sharded/local EMA state dict to a FSDP model.
If loading full state dict, ema_state_dict needs to be properly sharded for each FSDP process to store locally
Note that, at load-time, model.state_dict_type is automatically set to the type of the state dict being loaded
by accessing metadata, so there's no possibility of a save-load mismatch
"""
if model.load_state_dict_type == StateDictType.FULL_STATE_DICT:
# Store the current model state.
old_local_state = EMAState.FromModel(model)
# Apply ema_state as a FULL state dict to the model so it can be properly sharded
# Currently only [offload_to_cpu=False, rank0_only=False] is supported
with FSDP.summon_full_params(
model,
writeback=True,
offload_to_cpu=False,
rank0_only=False,
):
ema_state = EMAState()
ema_state.load_state_dict(ema_state_dict)
ema_state.apply_to(model)
# Load ema_state from model
model.ema_state.save_from(model)
# Restore the old model state
old_local_state.apply_to(model)
elif model.load_state_dict_type == StateDictType.SHARDED_STATE_DICT:
# Store current model state temporarily
old_state = EMAState.FromModel(model)
# Load the ema state dict into the model
model.load_state_dict(ema_state_dict)
# Save ema state with correct FQNs via EMAState.save_from
model.ema_state.save_from(model)
# restore old model state
old_state.apply_to(model)
else:
model.ema_state.load_state_dict(ema_state_dict)
| d2go-main | d2go/checkpoint/utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import logging
import os
from typing import Any, Dict, List, Optional, Union
import detectron2.utils.comm as comm
import torch
from d2go.utils.parse_module_params import iterate_module_named_parameters
from detectron2.solver.build import (
maybe_add_gradient_clipping as d2_maybe_add_gradient_clipping,
reduce_param_groups,
)
from detectron2.utils.file_io import PathManager
from detectron2.utils.registry import Registry
D2GO_OPTIM_MAPPER_REGISTRY = Registry("D2GO_OPTIM_MAPPER")
logger = logging.getLogger(__name__)
OptimizerModelsType = Union[torch.nn.Module, torch.nn.parallel.DistributedDataParallel]
def get_optimizer_param_groups(model: OptimizerModelsType, cfg):
"""
Get override optimizer parameter groups
* Get all default parameters
# Get parameter groups for normalization and bias
# Get parameter groups from model if the model implements `get_optimizer_param_groups()`
Parameters appear later will override parameters appear earlier
"""
# get all parameters that requires gradient
params = get_optimizer_param_groups_default(model)
# parameter groups for lr
params += get_optimizer_param_groups_lr(
model,
base_lr=cfg.SOLVER.BASE_LR,
bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
lr_multipliers_overwrite=_merge_dict(cfg.SOLVER.LR_MULTIPLIER_OVERWRITE),
)
# parameter groups for normalization, bias, and embedding
params += get_optimizer_param_groups_weight_decay(
model,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
weight_decay_embed=cfg.SOLVER.WEIGHT_DECAY_EMBED,
weight_decay_overwrite=_merge_dict(cfg.SOLVER.WEIGHT_DECAY_OVERWRITE),
)
# parameter groups from model function `model.get_optimizer_param_groups(opts)`
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model = model.module
if hasattr(model, "get_optimizer_param_groups"):
logger.info(
"Getting optimizer parameter groups from model.get_optimizer_param_groups()"
)
params += model.get_optimizer_param_groups(cfg)
return reduce_param_groups(params)
def get_optimizer_param_groups_default(model: OptimizerModelsType):
ret = [
{
"params": list(
filter(
lambda x: x.requires_grad,
model.parameters(),
)
),
"param_names": [
name
for name, _param in filter(
lambda x: x[1].requires_grad, model.named_parameters()
)
],
}
]
return ret
def get_optimizer_param_groups_lr(
model: OptimizerModelsType,
base_lr: float,
bias_lr_factor: float = 1.0,
lr_multipliers_overwrite: Optional[Dict[str, float]] = None,
):
"""
Allow setting up lr for modules
base_lr: lr for all modules
bias_lr_factor: scale factor for lr for bias term
lr_multipliers_overwrite (dict: str-> float):
Applying different lr multiplier to a set of parameters whose names
containing certain keys. For example, if lr_multipliers_overwrite={'backbone': 0.1},
the LR for the parameters whose names containing 'backbone' will be scaled to 0.1x.
Set lr_multipliers_overwrite=None if no multipliers required.
"""
params: List[Dict[str, Any]] = []
for (
module_name,
_module,
module_param_name,
value,
) in iterate_module_named_parameters(model):
cur_lr = base_lr
if module_param_name == "bias":
cur_lr = base_lr * bias_lr_factor
if lr_multipliers_overwrite is not None:
for kname, mult in lr_multipliers_overwrite.items():
if kname in module_name:
# apply multiplier for the params containing kname, e.g. backbone
cur_lr = cur_lr * mult
params += [
{
"param_names": [module_name + "." + module_param_name],
"params": [value],
"lr": cur_lr,
}
]
return params
def get_optimizer_param_groups_weight_decay(
model: OptimizerModelsType,
weight_decay: Optional[float],
weight_decay_norm: Optional[float] = None,
weight_decay_bias: Optional[float] = None,
weight_decay_embed: Optional[float] = None,
weight_decay_overwrite: Optional[Dict[str, float]] = None,
):
"""
Allow setting up weight decay for normalization, embedding and bias
"""
if weight_decay_norm is None:
weight_decay_norm = weight_decay
if weight_decay_bias is None:
weight_decay_bias = weight_decay
if weight_decay_embed is None:
weight_decay_embed = weight_decay
norm_module_types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
torch.nn.GroupNorm,
torch.nn.InstanceNorm1d,
torch.nn.InstanceNorm2d,
torch.nn.InstanceNorm3d,
torch.nn.LayerNorm,
torch.nn.LocalResponseNorm,
)
params: List[Dict[str, Any]] = []
for (
module_name,
module,
module_param_name,
value,
) in iterate_module_named_parameters(model):
cur_wd = weight_decay
if isinstance(module, norm_module_types):
cur_wd = weight_decay_norm
elif isinstance(module, torch.nn.Embedding):
cur_wd = weight_decay_embed
elif module_param_name == "bias":
cur_wd = weight_decay_bias
if weight_decay_overwrite is not None:
for kname, wd in weight_decay_overwrite.items():
if kname in module_param_name:
cur_wd = wd
if cur_wd is not None:
params += [
{
"param_names": [module_name + "." + module_param_name],
"params": [value],
"weight_decay": cur_wd,
}
]
return params
def get_optimizer_param_groups_override(
model: OptimizerModelsType,
overrides: Optional[Dict[str, Dict[str, float]]] = None,
):
"""
Allow setting up overrides for parameter groups
overrides (dict: str -> (dict: str -> float)):
if not `None`, provides values for optimizer hyperparameters
(LR, weight decay) for module parameters with a given name; e.g.
{"embedding": {"lr": 0.01, "weight_decay": 0.1}} will set the LR and
weight decay values for all module parameters named `embedding` (default: None)
"""
params: List[Dict[str, Any]] = []
if overrides is None:
return params
for (
_module_name,
_module,
module_param_name,
value,
) in iterate_module_named_parameters(model):
schedule_params = {}
if module_param_name in overrides:
schedule_params.update(overrides[module_param_name])
params += [{"params": [value], **schedule_params}]
return params
def maybe_add_gradient_clipping(cfg, optim): # optim: the optimizer class
# detectron2 doesn't have full model gradient clipping now
clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
enable = (
cfg.SOLVER.CLIP_GRADIENTS.ENABLED
and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
and clip_norm_val > 0.0
)
class FullModelGradientClippingOptimizer(optim):
def step(self, closure=None):
all_params = itertools.chain(*[x["params"] for x in self.param_groups])
torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
super().step(closure=closure)
if enable:
return FullModelGradientClippingOptimizer
return d2_maybe_add_gradient_clipping(cfg, optim)
def _merge_dict(in_dict):
ret_dict = {}
assert all(isinstance(x, dict) for x in in_dict)
for dic in in_dict:
ret_dict.update(dic)
return ret_dict
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def sgd(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
params=params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
foreach=True,
)
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def adam(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim.Adam)(
params=params, lr=cfg.SOLVER.BASE_LR, betas=cfg.SOLVER.BETAS, eps=cfg.SOLVER.EPS
)
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def adamw(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build an optimizer from config.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim.AdamW)(
params=params,
lr=cfg.SOLVER.BASE_LR,
betas=cfg.SOLVER.BETAS,
eps=cfg.SOLVER.EPS,
foreach=True if cfg.SOLVER.FUSED is False else False,
fused=True if cfg.SOLVER.FUSED else False,
)
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def sgd_mt(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build a multi_tensor SGD optimizer that works significantly faster.
This version is expected to be the default implementation for SGD
optimizer by end of H1'21. To benefit from the speedup, the number
of parameter groups needs to be reduced using `reduce_param_groups`.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim._multi_tensor.SGD)(
params=params,
lr=cfg.SOLVER.BASE_LR,
momentum=cfg.SOLVER.MOMENTUM,
nesterov=cfg.SOLVER.NESTEROV,
)
@D2GO_OPTIM_MAPPER_REGISTRY.register()
def adamw_mt(cfg, model: torch.nn.Module) -> torch.optim.Optimizer:
"""
Build a multi_tensor adamw optimizer that works significantly faster.
This version is expected to be the default implementation for adamw
optimizer by end of H1'21. To benefit from the speedup, the number
of parameter groups needs to be reduced using `reduce_param_groups`.
"""
params = get_optimizer_param_groups(model, cfg)
return maybe_add_gradient_clipping(cfg, torch.optim._multi_tensor.AdamW)(
params=params, lr=cfg.SOLVER.BASE_LR, eps=cfg.SOLVER.EPS
)
def build_optimizer_mapper(cfg, model):
name = cfg.SOLVER.OPTIMIZER
optimizer = D2GO_OPTIM_MAPPER_REGISTRY.get(name.lower())(cfg, model)
def _param_group_str(group, verbose=False):
ret = {x: y for x, y in group.items() if x != "params" and x != "param_names"}
ret["params"] = len(group["params"])
ret = sorted(ret.items())
ret = [f"{x[0]}: {x[1]}" for x in ret]
if verbose and "param_names" in group:
param_name_str = "\n" + "\n".join(group["param_names"]) + "\n"
ret.append(f"param_names: {param_name_str}")
ret = "{" + ", ".join(ret) + "}"
return ret
def _param_groups_str(groups, verbose=False):
ret = ""
for idx, group in enumerate(groups):
ret += f"Param group {idx}: {_param_group_str(group, verbose=verbose)}\n"
return ret
logger.info(f"Using optimizer:\n{optimizer}")
logger.info(
f"optimizer parameter groups:\n{_param_groups_str(optimizer.param_groups)}"
)
if (
comm.is_main_process()
and hasattr(cfg, "OUTPUT_DIR")
and PathManager.isdir(cfg.OUTPUT_DIR)
):
param_groups_str_verbose = _param_groups_str(
optimizer.param_groups, verbose=True
)
output_file = os.path.join(cfg.OUTPUT_DIR, "param_groups.txt")
if PathManager.isfile(output_file):
logger.warning("param_groups.txt already exists")
else:
logger.info(f"Write parameter groups to file: {output_file}")
with PathManager.open(output_file, "w") as f:
f.write(param_groups_str_verbose)
return optimizer
| d2go-main | d2go/optimizer/build.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.optimizer.build import build_optimizer_mapper
__all__ = ["build_optimizer_mapper"]
| d2go-main | d2go/optimizer/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import copy
import logging
from typing import List
from unittest import mock
import yaml
from d2go.config.utils import reroute_config_path, resolve_default_config
from detectron2.config import CfgNode as _CfgNode
from fvcore.common.registry import Registry
logger = logging.getLogger(__name__)
CONFIG_CUSTOM_PARSE_REGISTRY = Registry("CONFIG_CUSTOM_PARSE")
def _opts_to_dict(opts: List[str]):
ret = {}
for full_key, v in zip(opts[0::2], opts[1::2]):
keys = full_key.split(".")
cur = ret
for key in keys[:-1]:
if key not in cur:
cur[key] = {}
cur = cur[key]
cur[keys[-1]] = v
return ret
class CfgNode(_CfgNode):
@classmethod
def cast_from_other_class(cls, other_cfg):
"""Cast an instance of other CfgNode to D2Go's CfgNode (or its subclass)"""
new_cfg = cls(other_cfg)
# copy all fields inside __dict__, this will preserve fields like __deprecated_keys__
for k, v in other_cfg.__dict__.items():
new_cfg.__dict__[k] = v
return new_cfg
def merge_from_file(self, cfg_filename: str, *args, **kwargs):
cfg_filename = reroute_config_path(cfg_filename)
with reroute_load_yaml_with_base():
res = super().merge_from_file(cfg_filename, *args, **kwargs)
self._run_custom_processing(is_dump=False)
return res
def merge_from_list(self, cfg_list: List[str]):
# NOTE: YACS's orignal merge_from_list could not handle non-existed keys even if
# new_allow is set, override the method for support this.
override_cfg = _opts_to_dict(cfg_list)
res = super().merge_from_other_cfg(CfgNode(override_cfg))
self._run_custom_processing(is_dump=False)
return res
def dump(self, *args, **kwargs):
cfg = copy.deepcopy(self)
cfg._run_custom_processing(is_dump=True)
return super(CfgNode, cfg).dump(*args, **kwargs)
@staticmethod
def load_yaml_with_base(filename: str, *args, **kwargs):
filename = reroute_config_path(filename)
with reroute_load_yaml_with_base():
return _CfgNode.load_yaml_with_base(filename, *args, **kwargs)
def __hash__(self):
# dump follows alphabetical order, thus good for hash use
return hash(self.dump())
def _run_custom_processing(self, is_dump=False):
"""Apply config load post custom processing from registry"""
frozen = self.is_frozen()
self.defrost()
for name, process_func in CONFIG_CUSTOM_PARSE_REGISTRY:
logger.info(f"Apply config processing: {name}, is_dump={is_dump}")
process_func(self, is_dump)
if frozen:
self.freeze()
def get_default_cfg(self):
"""Return the defaults for this instance of CfgNode"""
return resolve_default_config(self)
@contextlib.contextmanager
def temp_defrost(cfg):
is_frozen = cfg.is_frozen()
if is_frozen:
cfg.defrost()
yield cfg
if is_frozen:
cfg.freeze()
@contextlib.contextmanager
def temp_new_allowed(cfg: CfgNode):
is_new_allowed = cfg.is_new_allowed()
cfg.set_new_allowed(True)
yield cfg
cfg.set_new_allowed(is_new_allowed)
@contextlib.contextmanager
def reroute_load_yaml_with_base():
BASE_KEY = "_BASE_"
_safe_load = yaml.safe_load
_unsafe_load = yaml.unsafe_load
def _reroute_base(cfg):
if BASE_KEY in cfg:
if isinstance(cfg[BASE_KEY], list):
cfg[BASE_KEY] = [reroute_config_path(x) for x in cfg[BASE_KEY]]
else:
cfg[BASE_KEY] = reroute_config_path(cfg[BASE_KEY])
return cfg
def mock_safe_load(f):
cfg = _safe_load(f)
cfg = _reroute_base(cfg)
return cfg
def mock_unsafe_load(f):
cfg = _unsafe_load(f)
cfg = _reroute_base(cfg)
return cfg
with mock.patch("yaml.safe_load", side_effect=mock_safe_load):
with mock.patch("yaml.unsafe_load", side_effect=mock_unsafe_load):
yield
CONFIG_SCALING_METHOD_REGISTRY = Registry("CONFIG_SCALING_METHOD")
def auto_scale_world_size(cfg, new_world_size):
"""
Usually the config file is written for a specific number of devices, this method
scales the config (in-place!) according to the actual world size using the
pre-registered scaling methods specified as cfg.SOLVER.AUTO_SCALING_METHODS.
Note for registering scaling methods:
- The method will only be called when scaling is needed. It won't be called
if SOLVER.REFERENCE_WORLD_SIZE is 0 or equal to target world size. Thus
cfg.SOLVER.REFERENCE_WORLD_SIZE will always be positive.
- The method updates cfg in-place, no return is required.
- No need for changing SOLVER.REFERENCE_WORLD_SIZE.
Args:
cfg (CfgNode): original config which contains SOLVER.REFERENCE_WORLD_SIZE and
SOLVER.AUTO_SCALING_METHODS.
new_world_size: the target world size
"""
old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
if old_world_size == 0 or old_world_size == new_world_size:
return cfg
if len(cfg.SOLVER.AUTO_SCALING_METHODS) == 0:
return cfg
original_cfg = cfg.clone()
frozen = original_cfg.is_frozen()
cfg.defrost()
assert len(cfg.SOLVER.AUTO_SCALING_METHODS) > 0, cfg.SOLVER.AUTO_SCALING_METHODS
for scaling_method in cfg.SOLVER.AUTO_SCALING_METHODS:
logger.info("Applying auto scaling method: {}".format(scaling_method))
CONFIG_SCALING_METHOD_REGISTRY.get(scaling_method)(cfg, new_world_size)
assert (
cfg.SOLVER.REFERENCE_WORLD_SIZE == cfg.SOLVER.REFERENCE_WORLD_SIZE
), "Runner's scale_world_size shouldn't change SOLVER.REFERENCE_WORLD_SIZE"
cfg.SOLVER.REFERENCE_WORLD_SIZE = new_world_size
if frozen:
cfg.freeze()
from d2go.config.utils import get_cfg_diff_table
table = get_cfg_diff_table(cfg, original_cfg)
logger.info("Auto-scaled the config according to the actual world size: \n" + table)
def load_full_config_from_file(filename: str) -> CfgNode:
loaded_cfg = CfgNode.load_yaml_with_base(filename)
loaded_cfg = CfgNode(loaded_cfg) # cast Dict to CfgNode
cfg = loaded_cfg.get_default_cfg()
cfg.merge_from_other_cfg(loaded_cfg)
return cfg
def convert_cfg_to_dict(cfg):
if not isinstance(cfg, CfgNode):
return cfg
else:
cfg_dict = dict(cfg)
for k, v in cfg_dict.items():
cfg_dict[k] = convert_cfg_to_dict(v)
return cfg_dict
| d2go-main | d2go/config/config.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# forward the namespace to avoid `d2go.config.config`
from d2go.config.config import (
auto_scale_world_size,
CfgNode,
CONFIG_CUSTOM_PARSE_REGISTRY,
CONFIG_SCALING_METHOD_REGISTRY,
convert_cfg_to_dict,
load_full_config_from_file,
temp_defrost,
temp_new_allowed,
)
from d2go.config.utils import reroute_config_path
__all__ = [
"CONFIG_CUSTOM_PARSE_REGISTRY",
"CONFIG_SCALING_METHOD_REGISTRY",
"CfgNode",
"auto_scale_world_size",
"convert_cfg_to_dict",
"load_full_config_from_file",
"reroute_config_path",
"temp_defrost",
"temp_new_allowed",
]
| d2go-main | d2go/config/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from enum import Enum
from typing import Any, Dict, List
import pkg_resources
from d2go.registry.builtin import CONFIG_UPDATER_REGISTRY
from mobile_cv.common.misc.oss_utils import fb_overwritable
logger = logging.getLogger(__name__)
DEFAULTS_KEY = "_DEFAULTS_"
def reroute_config_path(path: str) -> str:
"""
Supporting rerouting the config files for convenience:
d2go:// -> mobile-vision/d2go/...
detectron2go:// -> mobile-vision/d2go/configs/...
detectron2:// -> vision/fair/detectron2/configs/...
Those config are considered as code, so they'll reflect your current checkout,
try using canary if you have local changes.
"""
assert isinstance(path, str), path
if path.startswith("d2go://"):
rel_path = path[len("d2go://") :]
return pkg_resources.resource_filename("d2go", rel_path)
elif path.startswith("detectron2go://"):
rel_path = path[len("detectron2go://") :]
return pkg_resources.resource_filename(
"d2go", os.path.join("configs", rel_path)
)
elif path.startswith("detectron2://"):
rel_path = path[len("detectron2://") :]
return pkg_resources.resource_filename(
"detectron2.model_zoo", os.path.join("configs", rel_path)
)
else:
return reroute_fb_config_path(path)
@fb_overwritable()
def reroute_fb_config_path(path: str) -> str:
return path
def flatten_config_dict(dic, reorder=True):
"""
Flattens nested dict into single layer dict, for example:
flatten_config_dict({
"MODEL": {
"FBNET_V2": {
"ARCH_DEF": "val0",
"ARCH": "val1:,
},
}
})
=> {"MODEL.FBNET_V2.ARCH_DEF": "val0", "MODEL.FBNET_V2.ARCH": "val1"}
Args:
dic (dict or CfgNode): a nested dict whose keys are strings.
reorder (bool): if True, the returned dict will be sorted according to the keys;
otherwise original order will be preserved.
Returns:
dic: a single-layer dict
"""
return _flatten_config_dict(dic, reorder=reorder, prefix="")
def _flatten_config_dict(x, reorder, prefix):
if not isinstance(x, dict):
return {prefix: x}
d = {}
for k in sorted(x.keys()) if reorder else x.keys():
v = x[k]
new_key = f"{prefix}.{k}" if prefix else k
d.update(_flatten_config_dict(v, reorder, new_key))
return d
def config_dict_to_list_str(config_dict: Dict) -> List[str]:
"""Creates a list of str given configuration dict
This can be useful to generate pretraining or overwrite opts
in D2Go when a user has config_dict
"""
d = flatten_config_dict(config_dict)
str_list = []
for k, v in d.items():
str_list.append(k)
str_list.append(str(v))
return str_list
def get_from_flattened_config_dict(dic, flattened_key, default=None):
"""
Reads out a value from the nested config dict using flattened config key (i.e. all
keys from each level put together with "." separator), the default value is returned
if the flattened key doesn't exist.
e.g. if the config dict is
MODEL:
TEST:
SCORE_THRESHOLD: 0.7
Then to access the value of SCORE_THRESHOLD, this API should be called
>> score_threshold = get_from_flattened_config_dict(cfg, "MODEL.TEST.SCORE_THRESHOLD")
"""
for k in flattened_key.split("."):
if k not in dic:
return default
dic = dic[k]
return dic
def get_cfg_diff_table(cfg, original_cfg):
"""
Print the different of two config dicts side-by-side in a table
"""
all_old_keys = list(flatten_config_dict(original_cfg, reorder=True).keys())
all_new_keys = list(flatten_config_dict(cfg, reorder=True).keys())
diff_table = []
if all_old_keys != all_new_keys:
logger = logging.getLogger(__name__)
mismatched_old_keys = set(all_old_keys) - set(all_new_keys)
mismatched_new_keys = set(all_new_keys) - set(all_old_keys)
logger.warning(
"Config key mismatched.\n"
f"Mismatched old keys: {mismatched_old_keys}\n"
f"Mismatched new keys: {mismatched_new_keys}"
)
for old_key in mismatched_old_keys:
old_value = get_from_flattened_config_dict(original_cfg, old_key)
diff_table.append([old_key, old_value, "Key not exists"])
for new_key in mismatched_new_keys:
new_value = get_from_flattened_config_dict(cfg, new_key)
diff_table.append([new_key, "Key not exists", new_value])
# filter out mis-matched keys
all_old_keys = [x for x in all_old_keys if x not in mismatched_old_keys]
all_new_keys = [x for x in all_new_keys if x not in mismatched_new_keys]
for full_key in all_new_keys:
old_value = get_from_flattened_config_dict(original_cfg, full_key)
new_value = get_from_flattened_config_dict(cfg, full_key)
if old_value != new_value:
diff_table.append([full_key, old_value, new_value])
from tabulate import tabulate
table = tabulate(
diff_table,
tablefmt="pipe",
headers=["config key", "old value", "new value"],
)
return table
def get_diff_cfg(old_cfg, new_cfg):
"""
outputs a CfgNode containing keys, values appearing in new_cfg and not in old_cfg.
If `new_allowed` is not set, then new keys will throw a KeyError
old_cfg: CfgNode, the original config, usually the dafulat
new_cfg: CfgNode, the full config being passed by the user
if new allowed is not set on new_cfg, key error is raised
returns: CfgNode, a config containing only key, value changes between old_cfg and new_cfg
example:
Cfg1:
SYSTEM:
NUM_GPUS: 2
TRAIN:
SCALES: (1, 2)
DATASETS:
train_2017:
17: 1
18: 1
Cfg2:
SYSTEM:
NUM_GPUS: 2
TRAIN:
SCALES: (4, 5, 8)
DATASETS:
train_2017:
17: 1
18: 1
get_diff_cfg(Cfg1, Cfg2) gives:
TRAIN:
SCALES: (8, 16, 32)
"""
def get_diff_cfg_rec(old_cfg, new_cfg, out):
for key in new_cfg.keys():
if key not in old_cfg.keys() and old_cfg.is_new_allowed():
out[key] = new_cfg[key]
elif old_cfg[key] != new_cfg[key]:
if type(new_cfg[key]) is type(out):
out[key] = out.__class__()
out[key] = get_diff_cfg_rec(old_cfg[key], new_cfg[key], out[key])
else:
out[key] = new_cfg[key]
return out
out = new_cfg.__class__()
diff_cfg = get_diff_cfg_rec(old_cfg, new_cfg, out)
# Keep the `_DEFAULTS_` even though they should be the same
old_defaults = old_cfg.get(DEFAULTS_KEY, None)
new_defaults = new_cfg.get(DEFAULTS_KEY, None)
assert (
old_defaults == new_defaults
), f"{DEFAULTS_KEY} doesn't match! old ({old_defaults}) vs new ({new_defaults})"
if new_defaults is not None:
diff_cfg[DEFAULTS_KEY] = new_defaults
return diff_cfg
def namedtuple_to_dict(obj: Any):
"""Convert NamedTuple or dataclass to dict so it can be used as config"""
res = {}
for k, v in obj.__dict__.items():
if isinstance(v, Enum):
# in case of enum, serialize the enum value
res[k] = v.value
else:
res[k] = v
return res
def resolve_default_config(cfg):
if DEFAULTS_KEY not in cfg:
raise ValueError(
f"Can't resolved default config because `{DEFAULTS_KEY}` is"
f" missing from cfg: \n{cfg}"
)
updater_names: List[str] = cfg[DEFAULTS_KEY]
assert isinstance(updater_names, list), updater_names
assert [isinstance(x, str) for x in updater_names], updater_names
logger.info(f"Resolving default config by applying updaters: {updater_names} ...")
# starting from a empty CfgNode, sequentially apply the generator
cfg = type(cfg)()
for name in updater_names:
updater = CONFIG_UPDATER_REGISTRY.get(name)
cfg = updater(cfg)
# the resolved default config should keep the same default generator
cfg[DEFAULTS_KEY] = updater_names
return cfg
| d2go-main | d2go/config/utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import List
from d2go.config import CfgNode
from d2go.utils.gpu_memory_profiler import log_memory_snapshot, record_memory_history
from detectron2.engine.train_loop import HookBase
from detectron2.utils.registry import Registry
logger = logging.getLogger(__name__)
# List of functions to add hooks for trainer, all functions in the registry will
# be called to add hooks
# func(hooks: List[HookBase]) -> None
TRAINER_HOOKS_REGISTRY = Registry("TRAINER_HOOKS_REGISTRY")
def update_hooks_from_registry(hooks: List[HookBase], cfg: CfgNode):
for name, hook_func in TRAINER_HOOKS_REGISTRY:
logger.info(f"Update trainer hooks from {name}...")
hook_func(hooks, cfg)
class D2GoGpuMemorySnapshot(HookBase):
"""
A profiler that logs GPU memory snapshot during training.
There are three places that logging could happen:
1. start of training
d2go records memory snapshots before model instantiation and logs snapshots after `log_n_steps` iterations.
This is to capture the typical memory peak at model instantiation and the first few iterations
2. during training
d2go records memory snapshots at `log_during_train_at` iteration and logs snapshots after `log_n_steps` iterations.
This is to capture the stabilized memory utilization during training.
3. OOM
Right before OOM, the GPU memory snapshot will be logged to help diagnose OOM issues.
"""
def __init__(
self,
output_dir,
log_n_steps: int = 3,
log_during_train_at: int = 550,
trace_max_entries: int = 1000000,
) -> None:
self.output_dir = output_dir
self.step = 0
self.log_n_steps = log_n_steps
self.log_during_train_at = log_during_train_at
self.trace_max_entries = trace_max_entries
logger.warning(
"WARNING: Memory snapshot profiler is enabled. This may cause ranks to die and training jobs to get stuck. Please use with caution."
)
def before_step(self):
if self.trainer.iter == self.log_during_train_at:
record_memory_history(self.trace_max_entries)
def after_step(self):
if self.step == self.log_n_steps - 1:
log_memory_snapshot(self.output_dir, file_prefix=f"iter{self.trainer.iter}")
if self.trainer.iter == self.log_during_train_at + self.log_n_steps - 1:
log_memory_snapshot(self.output_dir, file_prefix=f"iter{self.trainer.iter}")
self.step += 1
| d2go-main | d2go/runner/training_hooks.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import importlib
from typing import Optional, Type, Union
from d2go.runner.api import RunnerV2Mixin
from d2go.runner.default_runner import (
BaseRunner,
Detectron2GoRunner,
GeneralizedRCNNRunner,
)
from d2go.runner.lightning_task import DefaultTask
from d2go.runner.training_hooks import TRAINER_HOOKS_REGISTRY
__all__ = [
"RunnerV2Mixin",
"BaseRunner",
"Detectron2GoRunner",
"GeneralizedRCNNRunner",
"TRAINER_HOOKS_REGISTRY",
"create_runner",
"import_runner",
]
# TODO: remove this function
def create_runner(
class_full_name: Optional[str], *args, **kwargs
) -> Union[BaseRunner, Type[DefaultTask]]:
"""Constructs a runner instance if class is a d2go runner. Returns class
type if class is a Lightning module.
"""
if class_full_name is None:
runner_class = GeneralizedRCNNRunner
else:
runner_class = import_runner(class_full_name)
if issubclass(runner_class, DefaultTask):
# Return runner class for Lightning module since it requires config
# to construct
return runner_class
return runner_class(*args, **kwargs)
def import_runner(
class_full_name: str, check: bool = True
) -> Type[Union[BaseRunner, DefaultTask]]:
runner_module_name, runner_class_name = class_full_name.rsplit(".", 1)
runner_module = importlib.import_module(runner_module_name)
runner_class = getattr(runner_module, runner_class_name)
if check and not (
issubclass(runner_class, BaseRunner) ^ issubclass(runner_class, DefaultTask)
):
raise ValueError(
f"The runner must be subclass of either `BaseRunner` or `DefaultTaks`,"
f" found: {runner_class}"
)
return runner_class
| d2go-main | d2go/runner/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import final
from d2go.config import CfgNode
class RunnerV2Mixin(object):
"""
Interface for (V2) Runner:
- `get_default_cfg` is not a runner method anymore.
"""
@classmethod
@final
def get_default_cfg(cls) -> CfgNode:
raise NotImplementedError("")
| d2go-main | d2go/runner/api.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.config import CfgNode as CN
from d2go.data.build import (
add_random_subset_training_sampler_default_configs,
add_weighted_training_sampler_default_configs,
)
from d2go.data.config import add_d2go_data_default_configs
from d2go.modeling.backbone.fbnet_cfg import add_fbnet_v2_default_configs
from d2go.modeling.ema import add_model_ema_configs
from d2go.modeling.kmeans_anchors import add_kmeans_anchors_cfg
from d2go.modeling.meta_arch.fcos import add_fcos_configs
from d2go.modeling.model_freezing_utils import add_model_freezing_configs
from d2go.modeling.subclass import add_subclass_configs
from d2go.quantization.modeling import add_quantization_default_configs
from d2go.registry.builtin import CONFIG_UPDATER_REGISTRY
from d2go.trainer.activation_checkpointing import add_activation_checkpoint_configs
from d2go.trainer.fsdp import add_fsdp_configs
from d2go.utils.gpu_memory_profiler import (
add_memory_profiler_configs,
add_zoomer_default_config,
)
from d2go.utils.visualization import add_tensorboard_default_configs
from detectron2.config import get_cfg as get_d2_cfg
from mobile_cv.common.misc.oss_utils import fb_overwritable
def _add_abnormal_checker_configs(_C: CN) -> None:
_C.ABNORMAL_CHECKER = CN()
# check and log the iteration with bad losses if enabled
_C.ABNORMAL_CHECKER.ENABLED = False
@fb_overwritable()
def _add_detectron2go_runner_default_fb_cfg(_C: CN) -> None:
pass
@fb_overwritable()
def _add_base_runner_default_fb_cfg(_C: CN) -> None:
pass
def add_distillation_configs(_C: CN) -> None:
"""Add default parameters to config
The TEACHER.CONFIG field allows us to build a PyTorch model using an
existing config. We can build any model that is normally supported by
D2Go (e.g., FBNet) because we just use the same config
"""
_C.DISTILLATION = CN()
_C.DISTILLATION.ALGORITHM = "LabelDistillation"
_C.DISTILLATION.HELPER = "BaseDistillationHelper"
_C.DISTILLATION.TEACHER = CN()
_C.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME = ""
_C.DISTILLATION.TEACHER.DEVICE = ""
_C.DISTILLATION.TEACHER.TYPE = "torchscript"
_C.DISTILLATION.TEACHER.CONFIG_FNAME = ""
_C.DISTILLATION.TEACHER.RUNNER_NAME = "d2go.runner.GeneralizedRCNNRunner"
_C.DISTILLATION.TEACHER.OVERWRITE_OPTS = []
def _add_detectron2go_runner_default_cfg(_C: CN) -> None:
# _C.MODEL.FBNET_V2...
add_fbnet_v2_default_configs(_C)
# _C.MODEL.FROZEN_LAYER_REG_EXP
add_model_freezing_configs(_C)
# _C.MODEL other models
add_model_ema_configs(_C)
# _C.D2GO_DATA...
add_d2go_data_default_configs(_C)
# _C.TENSORBOARD...
add_tensorboard_default_configs(_C)
# _C.MODEL.KMEANS...
add_kmeans_anchors_cfg(_C)
# _C.QUANTIZATION
add_quantization_default_configs(_C)
# _C.DATASETS.TRAIN_REPEAT_FACTOR
add_weighted_training_sampler_default_configs(_C)
# _C.DATALOADER.RANDOM_SUBSET_RATIO
add_random_subset_training_sampler_default_configs(_C)
# _C.ABNORMAL_CHECKER
_add_abnormal_checker_configs(_C)
# _C.MODEL.SUBCLASS
add_subclass_configs(_C)
# _C.MODEL.FCOS
add_fcos_configs(_C)
# _C.DISTILLATION
add_distillation_configs(_C)
# _C.FSDP
add_fsdp_configs(_C)
# _C.ACTIVATION_CHECKPOINT
add_activation_checkpoint_configs(_C)
# Set find_unused_parameters for DistributedDataParallel.
_C.MODEL.DDP_FIND_UNUSED_PARAMETERS = False
# Set FP16 gradient compression for DistributedDataParallel.
_C.MODEL.DDP_FP16_GRAD_COMPRESS = False
# Specify the gradients as views
_C.MODEL.DDP_GRADIENT_AS_BUCKET_VIEW = False
# Set default optimizer
_C.SOLVER.OPTIMIZER = "sgd"
_C.SOLVER.LR_MULTIPLIER_OVERWRITE = []
_C.SOLVER.WEIGHT_DECAY_EMBED = 0.0
_C.SOLVER.WEIGHT_DECAY_OVERWRITE = []
assert not _C.SOLVER.AMP.ENABLED
# AMP precision is used by both D2 and lightning backend. Can be "float16" or "bfloat16".
_C.SOLVER.AMP.PRECISION = "float16"
# log the grad scalar to the output
_C.SOLVER.AMP.LOG_GRAD_SCALER = False
# Betas are used in the AdamW optimizer
_C.SOLVER.BETAS = (0.9, 0.999)
_C.SOLVER.EPS = 1e-08
_C.SOLVER.FUSED = False
_C.SOLVER.DETERMINISTIC = False
# RECOMPUTE_BOXES for LSJ Training
_C.INPUT.RECOMPUTE_BOXES = False
# Default world size in D2 is 0, which means scaling is not applied. For D2Go
# auto scale is encouraged, setting it to 8
assert _C.SOLVER.REFERENCE_WORLD_SIZE == 0
_C.SOLVER.REFERENCE_WORLD_SIZE = 8
# Besides scaling default D2 configs, also scale quantization configs
_C.SOLVER.AUTO_SCALING_METHODS = [
"default_scale_d2_configs",
"default_scale_quantization_configs",
]
# Modeling hooks
# List of modeling hook names
_C.MODEL.MODELING_HOOKS = []
# Profiler
_C.PROFILERS = ["default_flop_counter"]
# GPU memory profiler
add_memory_profiler_configs(_C)
# Zoomer memory profiling
add_zoomer_default_config(_C)
# Checkpointing-specific config
_C.LOAD_CKPT_TO_GPU = False
# Add FB specific configs
_add_detectron2go_runner_default_fb_cfg(_C)
# Specify whether to perform NUMA binding
_C.NUMA_BINDING = False
# Specify whether to zero the gradients before forward
_C.ZERO_GRAD_BEFORE_FORWARD = False
# Whether to enforce rebuilding data loaders for datasets that have expiration
_C.DATALOADER.ENFORE_EXPIRATION = False
def _add_rcnn_default_config(_C: CN) -> None:
_C.EXPORT_CAFFE2 = CN()
_C.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT = False
# Options about how to export the model
_C.RCNN_EXPORT = CN()
# whether or not to include the postprocess (GeneralizedRCNN._postprocess) step
# inside the exported model
_C.RCNN_EXPORT.INCLUDE_POSTPROCESS = False
_C.RCNN_PREPARE_FOR_EXPORT = "default_rcnn_prepare_for_export"
_C.RCNN_PREPARE_FOR_QUANT = "default_rcnn_prepare_for_quant"
_C.register_deprecated_key("RCNN_PREPARE_FOR_QUANT_CONVERT")
@CONFIG_UPDATER_REGISTRY.register("BaseRunner")
def get_base_runner_default_cfg(cfg: CN) -> CN:
assert len(cfg) == 0, f"start from scratch, but previous cfg is non-empty: {cfg}"
cfg = get_d2_cfg()
# upgrade from D2's CfgNode to D2Go's CfgNode
cfg = CN.cast_from_other_class(cfg)
cfg.SOLVER.AUTO_SCALING_METHODS = ["default_scale_d2_configs"]
# Frequency of metric gathering in trainer.
cfg.GATHER_METRIC_PERIOD = 1
# Frequency of metric printer, tensorboard writer, etc.
cfg.WRITER_PERIOD = 20
# Enable async writing metrics to tensorboard and logs to speed up training
cfg.ASYNC_WRITE_METRICS = False
# train_net specific arguments, define in runner but used in train_net
# run evaluation after training is done
cfg.TEST.FINAL_EVAL = True
_add_base_runner_default_fb_cfg(cfg)
return cfg
@CONFIG_UPDATER_REGISTRY.register("Detectron2GoRunner")
def get_detectron2go_runner_default_cfg(cfg: CN) -> CN:
assert len(cfg) == 0, f"start from scratch, but previous cfg is non-empty: {cfg}"
cfg = get_base_runner_default_cfg(cfg)
_add_detectron2go_runner_default_cfg(cfg)
return cfg
@CONFIG_UPDATER_REGISTRY.register("GeneralizedRCNNRunner")
def get_generalized_rcnn_runner_default_cfg(cfg: CN) -> CN:
assert len(cfg) == 0, f"start from scratch, but previous cfg is non-empty: {cfg}"
cfg = get_detectron2go_runner_default_cfg(cfg)
_add_rcnn_default_config(cfg)
return cfg
| d2go-main | d2go/runner/config_defaults.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import logging
import os
from collections import OrderedDict
from functools import lru_cache
from typing import Any, List, Optional, Type, Union
import detectron2.utils.comm as comm
import torch
from d2go.checkpoint.api import is_distributed_checkpoint
from d2go.checkpoint.fsdp_checkpoint import FSDPCheckpointer
from d2go.config import CfgNode, CONFIG_SCALING_METHOD_REGISTRY, temp_defrost
from d2go.config.utils import get_cfg_diff_table
from d2go.data.build import build_d2go_train_loader
from d2go.data.dataset_mappers.build import build_dataset_mapper
from d2go.data.datasets import inject_coco_datasets, register_dynamic_datasets
from d2go.data.transforms.build import build_transform_gen
from d2go.data.utils import (
configure_dataset_creation,
maybe_subsample_n_images,
update_cfg_if_using_adhoc_dataset,
)
from d2go.distributed import D2GoSharedContext
from d2go.evaluation.evaluator import inference_on_dataset
from d2go.modeling import ema
from d2go.modeling.api import build_d2go_model
from d2go.modeling.kmeans_anchors import compute_kmeans_anchors_hook
from d2go.modeling.model_freezing_utils import freeze_matched_bn, set_requires_grad
from d2go.optimizer.build import build_optimizer_mapper
from d2go.quantization.modeling import QATHook, setup_qat_model
from d2go.runner.config_defaults import (
get_base_runner_default_cfg,
get_detectron2go_runner_default_cfg,
get_generalized_rcnn_runner_default_cfg,
)
from d2go.runner.training_hooks import (
D2GoGpuMemorySnapshot,
TRAINER_HOOKS_REGISTRY,
update_hooks_from_registry,
)
from d2go.trainer.fsdp import get_grad_scaler
from d2go.trainer.helper import parse_precision_from_string
from d2go.utils.abnormal_checker import (
AbnormalLossChecker,
AbnormalLossCheckerWrapper,
get_writers,
)
from d2go.utils.flop_calculator import attach_profilers
from d2go.utils.gpu_memory_profiler import attach_oom_logger
from d2go.utils.helper import D2Trainer, TensorboardXWriter
from d2go.utils.misc import get_tensorboard_log_dir
from d2go.utils.visualization import DataLoaderVisWrapper, VisualizationEvaluator
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.data import (
build_detection_test_loader as d2_build_detection_test_loader,
build_detection_train_loader as d2_build_detection_train_loader,
MetadataCatalog,
)
from detectron2.engine import hooks
from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer
from detectron2.evaluation import (
COCOEvaluator,
DatasetEvaluators,
LVISEvaluator,
print_csv_format,
RotatedCOCOEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
from detectron2.solver import build_lr_scheduler as d2_build_lr_scheduler
from detectron2.utils.events import CommonMetricPrinter, JSONWriter
from mobile_cv.common.misc.oss_utils import fb_overwritable
from mobile_cv.predictor.api import PredictorWrapper
from torch import nn
logger = logging.getLogger(__name__)
ALL_TB_WRITERS = []
@lru_cache()
def _get_tbx_writer(log_dir, window_size=20):
ret = TensorboardXWriter(log_dir, window_size=window_size)
ALL_TB_WRITERS.append(ret)
return ret
def _close_all_tbx_writers():
for x in ALL_TB_WRITERS:
x.close()
ALL_TB_WRITERS.clear()
@CONFIG_SCALING_METHOD_REGISTRY.register()
def default_scale_d2_configs(cfg, new_world_size):
gpu_scale = new_world_size / cfg.SOLVER.REFERENCE_WORLD_SIZE
base_lr = cfg.SOLVER.BASE_LR
base_lr_end = cfg.SOLVER.BASE_LR_END
max_iter = cfg.SOLVER.MAX_ITER
steps = cfg.SOLVER.STEPS
eval_period = cfg.TEST.EVAL_PERIOD
ims_per_batch_train = cfg.SOLVER.IMS_PER_BATCH
warmup_iters = cfg.SOLVER.WARMUP_ITERS
# lr scale
lr_scales = {
"sgd": gpu_scale,
"sgd_mt": gpu_scale,
}
optim_name = cfg.SOLVER.OPTIMIZER.lower()
# only scale the lr for the optimizers specified in `lr_scales`
lr_scale = lr_scales.get(optim_name, 1.0)
# default configs in D2
cfg.SOLVER.BASE_LR = base_lr * lr_scale
cfg.SOLVER.BASE_LR_END = base_lr_end * lr_scale
cfg.SOLVER.MAX_ITER = int(round(max_iter / gpu_scale))
cfg.SOLVER.STEPS = tuple(int(round(s / gpu_scale)) for s in steps)
cfg.TEST.EVAL_PERIOD = int(round(eval_period / gpu_scale))
cfg.SOLVER.IMS_PER_BATCH = int(round(ims_per_batch_train * gpu_scale))
cfg.SOLVER.WARMUP_ITERS = int(round(warmup_iters / gpu_scale))
@CONFIG_SCALING_METHOD_REGISTRY.register()
def default_scale_quantization_configs(cfg, new_world_size):
gpu_scale = new_world_size / cfg.SOLVER.REFERENCE_WORLD_SIZE
# Scale QUANTIZATION related configs
cfg.QUANTIZATION.QAT.START_ITER = int(
round(cfg.QUANTIZATION.QAT.START_ITER / gpu_scale)
)
cfg.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER = int(
round(cfg.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER / gpu_scale)
)
cfg.QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER = int(
round(cfg.QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER / gpu_scale)
)
cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER = int(
round(cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER / gpu_scale)
)
cfg.QUANTIZATION.QAT.FREEZE_BN_ITER = int(
round(cfg.QUANTIZATION.QAT.FREEZE_BN_ITER / gpu_scale)
)
@TRAINER_HOOKS_REGISTRY.register()
def add_memory_profiler_hook(hooks, cfg: CfgNode):
# Add GPU memory snapshot profiler to diagnose GPU OOM issues and benchmark memory usage during model training
if cfg.get("MEMORY_PROFILER", CfgNode()).get("ENABLED", False):
hooks.append(
D2GoGpuMemorySnapshot(
cfg.OUTPUT_DIR,
log_n_steps=cfg.MEMORY_PROFILER.LOG_N_STEPS,
log_during_train_at=cfg.MEMORY_PROFILER.LOG_DURING_TRAIN_AT,
trace_max_entries=cfg.MEMORY_PROFILER.TRACE_MAX_ENTRIES,
)
)
@fb_overwritable()
def prepare_fb_model(cfg: CfgNode, model: torch.nn.Module) -> torch.nn.Module:
return model
@fb_overwritable()
def get_monitoring_service() -> Any:
return contextlib.nullcontext()
class BaseRunner(object):
def __init__(self):
identifier = f"D2Go.Runner.{self.__class__.__name__}"
torch._C._log_api_usage_once(identifier)
def _initialize(self, cfg):
"""Runner should be initialized in the sub-process in ddp setting"""
if getattr(self, "_has_initialized", False):
logger.warning("Runner has already been initialized, skip initialization.")
return
self._has_initialized = True
self.register(cfg)
def register(self, cfg):
"""
Override `register` in order to run customized code before other things like:
- registering datasets.
- registering model using Registry.
"""
pass
@classmethod
def create_shared_context(cls, cfg) -> D2GoSharedContext:
"""
Override `create_shared_context` in order to run customized code to create distributed shared context that can be accessed by all workers
"""
pass
@classmethod
def get_default_cfg(cls):
return get_base_runner_default_cfg(CfgNode())
def build_model(self, cfg, eval_only=False) -> nn.Module:
# cfg may need to be reused to build trace model again, thus clone
model = build_d2go_model(cfg.clone()).model
if eval_only:
checkpointer = DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR)
checkpointer.load(cfg.MODEL.WEIGHTS)
model.eval()
return model
def do_test(self, *args, **kwargs):
raise NotImplementedError()
def do_train(self, *args, **kwargs):
raise NotImplementedError()
@classmethod
def build_detection_test_loader(cls, *args, **kwargs):
return d2_build_detection_test_loader(*args, **kwargs)
@classmethod
def build_detection_train_loader(cls, *args, **kwargs):
return d2_build_detection_train_loader(*args, **kwargs)
class D2GoDataAPIMixIn:
@staticmethod
def get_mapper(cfg, is_train):
tfm_gens = build_transform_gen(cfg, is_train)
mapper = build_dataset_mapper(cfg, is_train, tfm_gens=tfm_gens)
return mapper
@classmethod
def build_detection_test_loader(
cls, cfg, dataset_name: Union[str, List[str]], mapper=None, collate_fn=None
):
logger.info(
"Building detection test loader for dataset: {} ...".format(dataset_name)
)
with configure_dataset_creation(cfg):
mapper = mapper or cls.get_mapper(cfg, is_train=False)
logger.info("Using dataset mapper:\n{}".format(mapper))
return d2_build_detection_test_loader(
cfg, dataset_name, mapper=mapper, collate_fn=collate_fn
)
@classmethod
def build_detection_train_loader(cls, cfg, *args, mapper=None, **kwargs):
with configure_dataset_creation(cfg):
mapper = mapper or cls.get_mapper(cfg, is_train=True)
data_loader = build_d2go_train_loader(cfg, mapper)
return cls._attach_visualizer_to_data_loader(cfg, data_loader)
@classmethod
def _attach_visualizer_to_data_loader(cls, cfg, data_loader):
if comm.is_main_process():
data_loader_type = cls.get_data_loader_vis_wrapper()
if data_loader_type is not None:
tbx_writer = cls.get_tbx_writer(cfg)
data_loader = data_loader_type(cfg, tbx_writer, data_loader)
return data_loader
@classmethod
def get_tbx_writer(cls, cfg):
return _get_tbx_writer(
get_tensorboard_log_dir(cfg.OUTPUT_DIR),
window_size=cfg.get("WRITER_PERIOD", 20),
)
@staticmethod
def get_data_loader_vis_wrapper() -> Optional[Type[DataLoaderVisWrapper]]:
return DataLoaderVisWrapper
@staticmethod
def get_visualization_evaluator() -> Optional[Type[VisualizationEvaluator]]:
return VisualizationEvaluator
class Detectron2GoRunner(D2GoDataAPIMixIn, BaseRunner):
def register(self, cfg):
super().register(cfg)
self.original_cfg = cfg.clone()
inject_coco_datasets(cfg)
register_dynamic_datasets(cfg)
update_cfg_if_using_adhoc_dataset(cfg)
@classmethod
def get_default_cfg(cls):
return get_detectron2go_runner_default_cfg(CfgNode())
# temporary API
def _build_model(self, cfg, eval_only=False):
# build_model might modify the cfg, thus clone
cfg = cfg.clone()
model = build_d2go_model(cfg).model
ema.may_build_model_ema(cfg, model)
if cfg.QUANTIZATION.QAT.ENABLED:
# Disable fake_quant and observer so that the model will be trained normally
# before QAT being turned on (controlled by QUANTIZATION.QAT.START_ITER).
if hasattr(model, "get_rand_input"):
imsize = cfg.INPUT.MAX_SIZE_TRAIN
rand_input = model.get_rand_input(imsize)
example_inputs = (rand_input, {})
model = setup_qat_model(
cfg,
model,
enable_fake_quant=eval_only,
enable_observer=True,
)
model(*example_inputs)
else:
imsize = cfg.INPUT.MAX_SIZE_TRAIN
model = setup_qat_model(
cfg,
model,
enable_fake_quant=eval_only,
enable_observer=False,
)
if cfg.MODEL.FROZEN_LAYER_REG_EXP:
set_requires_grad(model, cfg.MODEL.FROZEN_LAYER_REG_EXP, False)
model = freeze_matched_bn(model, cfg.MODEL.FROZEN_LAYER_REG_EXP)
if eval_only:
checkpointer = self.build_checkpointer(cfg, model, save_dir=cfg.OUTPUT_DIR)
checkpointer.load(cfg.MODEL.WEIGHTS)
model.eval()
if cfg.MODEL_EMA.ENABLED and cfg.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY:
ema.apply_model_ema(model)
return model
def build_model(self, cfg, eval_only=False):
# Attach memory profiler to GPU OOM events
if cfg.get("MEMORY_PROFILER", CfgNode()).get("ENABLED", False):
attach_oom_logger(
cfg.OUTPUT_DIR, trace_max_entries=cfg.MEMORY_PROFILER.TRACE_MAX_ENTRIES
)
model = self._build_model(cfg, eval_only)
model = prepare_fb_model(cfg, model)
# Note: the _visualize_model API is experimental
if comm.is_main_process():
if hasattr(model, "_visualize_model"):
logger.info("Adding model visualization ...")
tbx_writer = self.get_tbx_writer(cfg)
model._visualize_model(tbx_writer)
return model
def build_checkpointer(self, cfg, model, save_dir, **kwargs):
kwargs.update(ema.may_get_ema_checkpointer(cfg, model))
checkpointer = FSDPCheckpointer(model, save_dir=save_dir, **kwargs)
return checkpointer
def build_optimizer(self, cfg, model):
return build_optimizer_mapper(cfg, model)
def build_lr_scheduler(self, cfg, optimizer):
return d2_build_lr_scheduler(cfg, optimizer)
def _create_evaluators(
self,
cfg,
dataset_name,
output_folder,
train_iter,
model_tag,
model=None,
):
evaluator = self.get_evaluator(cfg, dataset_name, output_folder=output_folder)
if not isinstance(evaluator, DatasetEvaluators):
evaluator = DatasetEvaluators([evaluator])
if comm.is_main_process():
# Add evaluator for visualization only to rank 0
tbx_writer = self.get_tbx_writer(cfg)
logger.info("Adding visualization evaluator ...")
mapper = self.get_mapper(cfg, is_train=False)
vis_eval_type = self.get_visualization_evaluator()
if vis_eval_type is not None:
evaluator._evaluators.append(
vis_eval_type(
cfg,
tbx_writer,
mapper,
dataset_name,
train_iter=train_iter,
tag_postfix=model_tag,
)
)
return evaluator
def _do_test(self, cfg, model, train_iter=None, model_tag="default"):
"""train_iter: Current iteration of the model, None means final iteration"""
assert len(cfg.DATASETS.TEST)
assert cfg.OUTPUT_DIR
is_final = (train_iter is None) or (train_iter == cfg.SOLVER.MAX_ITER - 1)
logger.info(
f"Running evaluation for model tag {model_tag} at iter {train_iter}..."
)
def _get_inference_dir_name(base_dir, inference_type, dataset_name):
return os.path.join(
base_dir,
inference_type,
model_tag,
str(train_iter) if train_iter is not None else "final",
dataset_name,
)
attach_profilers(cfg, model)
results = OrderedDict()
results[model_tag] = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
# Evaluator will create output folder, no need to create here
output_folder = _get_inference_dir_name(
cfg.OUTPUT_DIR, "inference", dataset_name
)
# NOTE: creating evaluator after dataset is loaded as there might be dependency. # noqa
data_loader = self.build_detection_test_loader(cfg, dataset_name)
evaluator = self._create_evaluators(
cfg,
dataset_name,
output_folder,
train_iter,
model_tag,
model.module
if isinstance(model, nn.parallel.DistributedDataParallel)
else model,
)
results_per_dataset = inference_on_dataset(model, data_loader, evaluator)
if comm.is_main_process():
results[model_tag][dataset_name] = results_per_dataset
if is_final:
print_csv_format(results_per_dataset)
if is_final and cfg.TEST.AUG.ENABLED:
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
output_folder = _get_inference_dir_name(
cfg.OUTPUT_DIR, "inference_TTA", dataset_name
)
logger.info("Running inference with test-time augmentation ...")
data_loader = self.build_detection_test_loader(
cfg, dataset_name, mapper=lambda x: x
)
evaluator = self.get_evaluator(
cfg, dataset_name, output_folder=output_folder
)
inference_on_dataset(
GeneralizedRCNNWithTTA(cfg, model), data_loader, evaluator
)
if is_final and cfg.TEST.EXPECTED_RESULTS and comm.is_main_process():
assert len(results) == 1, "Results verification only supports one dataset!"
verify_results(cfg, results[model_tag][cfg.DATASETS.TEST[0]])
# write results to tensorboard
if comm.is_main_process() and results:
from detectron2.evaluation.testing import flatten_results_dict
flattened_results = flatten_results_dict(results)
for k, v in flattened_results.items():
tbx_writer = self.get_tbx_writer(cfg)
tbx_writer._writer.add_scalar("eval_{}".format(k), v, train_iter)
if comm.is_main_process():
tbx_writer = self.get_tbx_writer(cfg)
tbx_writer._writer.flush()
return results
def do_test(self, cfg, model, train_iter=None):
"""do_test does not load the weights of the model.
If you want to use it outside the regular training routine,
you will have to load the weights through a checkpointer.
"""
results = OrderedDict()
with maybe_subsample_n_images(cfg) as new_cfg:
# default model
cur_results = self._do_test(
new_cfg, model, train_iter=train_iter, model_tag="default"
)
results.update(cur_results)
# model with ema weights
if cfg.MODEL_EMA.ENABLED and not isinstance(model, PredictorWrapper):
logger.info("Run evaluation with EMA.")
with ema.apply_model_ema_and_restore(model):
cur_results = self._do_test(
new_cfg, model, train_iter=train_iter, model_tag="ema"
)
results.update(cur_results)
return results
def _get_trainer_hooks(
self, cfg, model, optimizer, scheduler, periodic_checkpointer, trainer
):
return [
hooks.IterationTimer(),
ema.EMAHook(cfg, model) if cfg.MODEL_EMA.ENABLED else None,
self._create_data_loader_hook(cfg),
self._create_after_step_hook(
cfg, model, optimizer, scheduler, periodic_checkpointer
),
hooks.EvalHook(
cfg.TEST.EVAL_PERIOD,
lambda: self.do_test(cfg, model, train_iter=trainer.iter),
eval_after_train=False, # done by a separate do_test call in tools/train_net.py
),
compute_kmeans_anchors_hook(self, cfg),
self._create_qat_hook(cfg) if cfg.QUANTIZATION.QAT.ENABLED else None,
]
def do_train(self, cfg, model, resume):
with get_monitoring_service():
# Note that flops at the beginning of training is often inaccurate,
# if a model has input-dependent logic
attach_profilers(cfg, model)
if cfg.NUMA_BINDING is True:
import numa
num_gpus_per_node = comm.get_local_size()
num_sockets = numa.get_max_node() + 1
socket_id = torch.cuda.current_device() // (
max(num_gpus_per_node // num_sockets, 1)
)
node_mask = set([socket_id])
numa.bind(node_mask)
optimizer = self.build_optimizer(cfg, model)
scheduler = self.build_lr_scheduler(cfg, optimizer)
checkpointer = self.build_checkpointer(
cfg,
model,
save_dir=cfg.OUTPUT_DIR,
load_ckpt_to_gpu=cfg.LOAD_CKPT_TO_GPU,
optimizer=optimizer,
scheduler=scheduler,
)
checkpoint = checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume)
start_iter = (
checkpoint.get("iteration", -1)
if resume and checkpointer.has_checkpoint()
else -1
)
del checkpoint
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration (or iter zero if there's no checkpoint).
start_iter += 1
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
data_loader = self.build_detection_train_loader(cfg)
def _get_model_with_abnormal_checker(model):
if not cfg.ABNORMAL_CHECKER.ENABLED:
return model
tbx_writer = self.get_tbx_writer(cfg)
writers = get_writers(cfg, tbx_writer)
checker = AbnormalLossChecker(start_iter, writers)
ret = AbnormalLossCheckerWrapper(model, checker)
return ret
if cfg.SOLVER.AMP.ENABLED:
trainer = AMPTrainer(
_get_model_with_abnormal_checker(model),
data_loader,
optimizer,
gather_metric_period=cfg.GATHER_METRIC_PERIOD,
zero_grad_before_forward=cfg.ZERO_GRAD_BEFORE_FORWARD,
grad_scaler=get_grad_scaler(cfg),
precision=parse_precision_from_string(
cfg.SOLVER.AMP.PRECISION, lightning=False
),
log_grad_scaler=cfg.SOLVER.AMP.LOG_GRAD_SCALER,
async_write_metrics=cfg.ASYNC_WRITE_METRICS,
)
else:
trainer = SimpleTrainer(
_get_model_with_abnormal_checker(model),
data_loader,
optimizer,
gather_metric_period=cfg.GATHER_METRIC_PERIOD,
zero_grad_before_forward=cfg.ZERO_GRAD_BEFORE_FORWARD,
async_write_metrics=cfg.ASYNC_WRITE_METRICS,
)
if cfg.SOLVER.AMP.ENABLED and torch.cuda.is_available():
# Allow to use the TensorFloat32 (TF32) tensor cores, available on A100 GPUs.
# For more details https://pytorch.org/docs/stable/notes/cuda.html#tf32-on-ampere.
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
elif cfg.SOLVER.DETERMINISTIC:
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
trainer_hooks = self._get_trainer_hooks(
cfg, model, optimizer, scheduler, periodic_checkpointer, trainer
)
if comm.is_main_process():
assert (
cfg.GATHER_METRIC_PERIOD <= cfg.WRITER_PERIOD
and cfg.WRITER_PERIOD % cfg.GATHER_METRIC_PERIOD == 0
), "WRITER_PERIOD needs to be divisible by GATHER_METRIC_PERIOD"
tbx_writer = self.get_tbx_writer(cfg)
writers = [
CommonMetricPrinter(max_iter, window_size=cfg.WRITER_PERIOD),
JSONWriter(
os.path.join(cfg.OUTPUT_DIR, "metrics.json"),
window_size=cfg.WRITER_PERIOD,
),
tbx_writer,
]
trainer_hooks.append(hooks.PeriodicWriter(writers, cfg.WRITER_PERIOD))
update_hooks_from_registry(trainer_hooks, cfg)
trainer.register_hooks(trainer_hooks)
trainer.train(start_iter, max_iter)
if hasattr(self, "original_cfg"):
table = get_cfg_diff_table(cfg, self.original_cfg)
logger.info(
"GeneralizeRCNN Runner ignoring training config change: \n" + table
)
trained_cfg = self.original_cfg.clone()
else:
trained_cfg = cfg.clone()
with temp_defrost(trained_cfg):
trained_cfg.MODEL.WEIGHTS = checkpointer.get_checkpoint_file()
return {"model_final": trained_cfg}
@staticmethod
def get_evaluator(cfg, dataset_name, output_folder):
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["coco", "coco_panoptic_seg"]:
# D2 is in the process of reducing the use of cfg.
dataset_evaluators = COCOEvaluator(
dataset_name,
output_dir=output_folder,
kpt_oks_sigmas=cfg.TEST.KEYPOINT_OKS_SIGMAS,
max_dets_per_image=cfg.TEST.DETECTIONS_PER_IMAGE,
)
elif evaluator_type in ["rotated_coco"]:
dataset_evaluators = DatasetEvaluators(
[RotatedCOCOEvaluator(dataset_name, cfg, True, output_folder)]
)
elif evaluator_type in ["lvis"]:
dataset_evaluators = LVISEvaluator(
dataset_name,
output_dir=output_folder,
max_dets_per_image=cfg.TEST.DETECTIONS_PER_IMAGE,
)
else:
dataset_evaluators = D2Trainer.build_evaluator(
cfg, dataset_name, output_folder
)
if not isinstance(dataset_evaluators, DatasetEvaluators):
dataset_evaluators = DatasetEvaluators([dataset_evaluators])
return dataset_evaluators
@staticmethod
def final_model_name():
return "model_final"
def _create_after_step_hook(
self, cfg, model, optimizer, scheduler, periodic_checkpointer
):
"""
Create a hook that performs some pre-defined tasks used in this script
(evaluation, LR scheduling, checkpointing).
"""
def after_step_callback(trainer):
trainer.storage.put_scalar(
"lr", optimizer.param_groups[0]["lr"], smoothing_hint=False
)
if trainer.iter < cfg.SOLVER.MAX_ITER - 1:
# Since scheduler.step() is called after the backward at each iteration,
# this will cause "where = 1.0" in the scheduler after the last interation,
# which will trigger "IndexError: list index out of range" in StepParamScheduler.
# See test_warmup_stepwithfixedgamma in vision/fair/detectron2/tests:test_scheduler for an example
scheduler.step()
# Note: when precise BN is enabled, some checkpoints will have more precise
# statistics than others, if they are saved immediately after eval.
# Note: FSDP requires all ranks to execute saving/loading logic
if comm.is_main_process() or is_distributed_checkpoint(
periodic_checkpointer.checkpointer
):
periodic_checkpointer.step(trainer.iter)
return hooks.CallbackHook(after_step=after_step_callback)
def _create_data_loader_hook(self, cfg):
"""
Create a hook for manipulating data loader
"""
return None
def _create_qat_hook(self, cfg) -> Optional[QATHook]:
"""
Create a hook to start QAT (during training) and/or change the phase of QAT.
"""
if not cfg.QUANTIZATION.QAT.ENABLED:
return None
return QATHook(cfg, self.build_detection_train_loader)
class GeneralizedRCNNRunner(Detectron2GoRunner):
@classmethod
def get_default_cfg(cls):
return get_generalized_rcnn_runner_default_cfg(CfgNode())
| d2go-main | d2go/runner/default_runner.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from copy import deepcopy
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
import pytorch_lightning as pl
import torch
from d2go.config import CfgNode
from d2go.data.datasets import inject_coco_datasets, register_dynamic_datasets
from d2go.data.utils import update_cfg_if_using_adhoc_dataset
from d2go.modeling.api import build_meta_arch
from d2go.modeling.model_freezing_utils import set_requires_grad
from d2go.optimizer.build import build_optimizer_mapper
from d2go.runner.api import RunnerV2Mixin
from d2go.runner.callbacks.quantization import maybe_prepare_for_quantization, PREPARED
from d2go.runner.default_runner import (
_get_tbx_writer,
D2GoDataAPIMixIn,
Detectron2GoRunner,
GeneralizedRCNNRunner,
)
from d2go.utils.ema_state import EMAState
from d2go.utils.misc import get_tensorboard_log_dir
from detectron2.engine.train_loop import HookBase
from detectron2.solver import build_lr_scheduler as d2_build_lr_scheduler
from mobile_cv.common.misc.oss_utils import fb_overwritable
from pytorch_lightning.strategies import DDPStrategy, SingleDeviceStrategy
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only
from pytorch_lightning.utilities.logger import _flatten_dict
_STATE_DICT_KEY = "state_dict"
_OLD_STATE_DICT_KEY = "model"
_OLD_EMA_KEY = "ema_state"
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def _is_lightning_checkpoint(checkpoint: Dict[str, Any]) -> bool:
"""Returns true if we believe this checkpoint to be a Lightning checkpoint."""
return _STATE_DICT_KEY in checkpoint
def _is_d2go_checkpoint(checkpoint: Dict[str, Any]) -> bool:
"""Returns true if we believe this to be a D2Go checkpoint."""
d2_go_keys = [_OLD_STATE_DICT_KEY, "iteration"]
for key in d2_go_keys:
if key not in checkpoint:
return False
return True
def _convert_to_lightning(d2_checkpoint: Dict[str, Any]) -> None:
"""Converst a D2Go Checkpoint to Lightning in-place by renaming keys."""
prefix = "model" # based on DefaultTask.model.
old_keys = list(d2_checkpoint[_OLD_STATE_DICT_KEY])
for key in old_keys:
d2_checkpoint[_OLD_STATE_DICT_KEY][f"{prefix}.{key}"] = d2_checkpoint[
_OLD_STATE_DICT_KEY
][key]
del d2_checkpoint[_OLD_STATE_DICT_KEY][key]
if "model.pixel_mean" in d2_checkpoint[_OLD_STATE_DICT_KEY]:
del d2_checkpoint[_OLD_STATE_DICT_KEY]["model.pixel_mean"]
if "model.pixel_std" in d2_checkpoint[_OLD_STATE_DICT_KEY]:
del d2_checkpoint[_OLD_STATE_DICT_KEY]["model.pixel_std"]
for old, new in zip(
[_OLD_STATE_DICT_KEY, "iteration"], [_STATE_DICT_KEY, "global_step"]
):
d2_checkpoint[new] = d2_checkpoint[old]
del d2_checkpoint[old]
for old, new in zip(
["optimizer", "scheduler"], ["optimizer_states", "lr_schedulers"]
):
if old not in d2_checkpoint:
continue
d2_checkpoint[new] = [d2_checkpoint[old]]
del d2_checkpoint[old]
if _OLD_EMA_KEY in d2_checkpoint:
d2_checkpoint["model_ema"] = d2_checkpoint[_OLD_EMA_KEY]
del d2_checkpoint[_OLD_EMA_KEY]
d2_checkpoint["epoch"] = 0
class ModelTag(str, Enum):
DEFAULT = "default"
EMA = "ema"
@fb_overwritable()
def get_gpu_profiler(cfg: CfgNode) -> Optional[HookBase]:
return None
class DefaultTask(D2GoDataAPIMixIn, pl.LightningModule):
def __init__(self, cfg: CfgNode):
super().__init__()
self.register(cfg)
self.cfg = cfg
self.model = self._build_model()
self.storage = None
# evaluators for validation datasets, split by model tag(default, ema),
# in the order of DATASETS.TEST
self.dataset_evaluators = {ModelTag.DEFAULT: []}
self.save_hyperparameters()
self.eval_res = None
# Support custom training step in meta arch
if hasattr(self.model, "training_step"):
# activate manual optimization for custom training step
self.automatic_optimization = False
self.ema_state: Optional[EMAState] = None
if cfg.MODEL_EMA.ENABLED:
self.ema_state = EMAState(
decay=cfg.MODEL_EMA.DECAY,
device=cfg.MODEL_EMA.DEVICE or cfg.MODEL.DEVICE,
)
self.dataset_evaluators[ModelTag.EMA] = []
self.gpu_profiler: Optional[HookBase] = get_gpu_profiler(cfg)
def _build_model(self) -> torch.nn.Module:
model = build_meta_arch(self.cfg)
if self.cfg.MODEL.FROZEN_LAYER_REG_EXP:
set_requires_grad(model, self.cfg.MODEL.FROZEN_LAYER_REG_EXP, value=False)
return model
@classmethod
def from_config(cls, cfg: CfgNode, eval_only=False):
"""Builds Lightning module including model from config.
To load weights from a pretrained checkpoint, please specify checkpoint
path in `MODEL.WEIGHTS`.
Args:
cfg: D2go config node.
eval_only: True if module should be in eval mode.
"""
if eval_only and not cfg.MODEL.WEIGHTS:
logger.warning("MODEL.WEIGHTS is missing for eval only mode.")
if cfg.MODEL.WEIGHTS:
# only load model weights from checkpoint
logger.info(f"Load model weights from checkpoint: {cfg.MODEL.WEIGHTS}.")
task = cls.load_from_checkpoint(cfg.MODEL.WEIGHTS, cfg=cfg, strict=False)
else:
task = cls(cfg)
if cfg.MODEL_EMA.ENABLED and cfg.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY:
assert task.ema_state, "EMA state is not loaded from checkpoint."
task.ema_state.apply_to(task.model)
if eval_only:
task.eval()
return task
def training_step(self, batch, batch_idx):
if hasattr(self.model, "training_step"):
return self._meta_arch_training_step(batch, batch_idx)
return self._standard_training_step(batch, batch_idx)
def _standard_training_step(self, batch, batch_idx):
loss_dict = self.forward(batch)
losses = sum(loss_dict.values())
loss_dict["total_loss"] = losses
self.storage.step()
self.log_dict(loss_dict, prog_bar=True)
return losses
def _meta_arch_training_step(self, batch, batch_idx):
opt = self.optimizers()
loss_dict = self.model.training_step(
batch, batch_idx, opt, self.manual_backward
)
sch = self.lr_schedulers()
sch.step()
self.storage.step()
self.log_dict(loss_dict, prog_bar=True)
return loss_dict
def test_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> None:
self._evaluation_step(batch, batch_idx, dataloader_idx)
def validation_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> None:
self._evaluation_step(batch, batch_idx, dataloader_idx)
def _evaluation_step(self, batch, batch_idx: int, dataloader_idx: int) -> None:
if not isinstance(batch, List):
batch = [batch]
outputs = self.forward(batch)
self.dataset_evaluators[ModelTag.DEFAULT][dataloader_idx].process(
batch, outputs
)
if self.ema_state:
ema_outputs = self.model_ema(batch)
self.dataset_evaluators[ModelTag.EMA][dataloader_idx].process(
batch, ema_outputs
)
def _log_dataset_evaluation_results(self) -> None:
nested_res = {}
for tag, evaluators in self.dataset_evaluators.items():
res = {}
for idx, evaluator in enumerate(evaluators):
dataset_name = self.cfg.DATASETS.TEST[idx]
res[dataset_name] = evaluator.evaluate()
nested_res[tag.lower()] = res
self.eval_res = nested_res
flattened = _flatten_dict(nested_res)
if self.trainer.global_rank:
assert (
len(flattened) == 0
), "evaluation results should have been reduced on rank 0."
self.log_dict(flattened, rank_zero_only=True)
def test_epoch_end(self, _outputs) -> None:
self._evaluation_epoch_end()
def validation_epoch_end(self, _outputs) -> None:
self._evaluation_epoch_end()
def _evaluation_epoch_end(self) -> None:
self._log_dataset_evaluation_results()
self._reset_dataset_evaluators()
def configure_optimizers(
self,
) -> Tuple[List[torch.optim.Optimizer], List]:
model = self.model
if hasattr(self, PREPARED):
# train the prepared model for FX quantization
model = getattr(self, PREPARED)
optim = build_optimizer_mapper(self.cfg, model)
lr_scheduler = d2_build_lr_scheduler(self.cfg, optim)
return [optim], [{"scheduler": lr_scheduler, "interval": "step"}]
def train_dataloader(self):
return self.build_detection_train_loader(self.cfg)
def _reset_dataset_evaluators(self):
"""reset validation dataset evaluator to be run in EVAL_PERIOD steps"""
assert isinstance(self.trainer.strategy, (SingleDeviceStrategy, DDPStrategy)), (
"Only Single Device or DDP strategies are supported,"
f" instead found: {self.trainer.strategy}"
)
def _get_inference_dir_name(
base_dir, inference_type, dataset_name, model_tag: ModelTag
):
next_eval_iter = self.trainer.global_step + self.cfg.TEST.EVAL_PERIOD
if self.trainer.global_step == 0:
next_eval_iter -= 1
return os.path.join(
base_dir,
inference_type,
model_tag,
str(next_eval_iter),
dataset_name,
)
@rank_zero_only
def _setup_visualization_evaluator(
evaluator,
dataset_name: str,
model_tag: ModelTag,
) -> None:
logger.info("Adding visualization evaluator ...")
mapper = self.get_mapper(self.cfg, is_train=False)
vis_eval_type = self.get_visualization_evaluator()
# TODO: replace tbx_writter with Lightning's self.logger.experiment
tbx_writter = _get_tbx_writer(get_tensorboard_log_dir(self.cfg.OUTPUT_DIR))
if vis_eval_type is not None:
evaluator._evaluators.append(
vis_eval_type(
self.cfg,
tbx_writter,
mapper,
dataset_name,
train_iter=self.trainer.global_step,
tag_postfix=model_tag,
)
)
for tag, dataset_evaluators in self.dataset_evaluators.items():
dataset_evaluators.clear()
assert self.cfg.OUTPUT_DIR, "Expect output_dir to be specified in config"
for dataset_name in self.cfg.DATASETS.TEST:
# setup evaluator for each dataset
output_folder = _get_inference_dir_name(
self.cfg.OUTPUT_DIR, "inference", dataset_name, tag
)
evaluator = self.get_evaluator(
self.cfg, dataset_name, output_folder=output_folder
)
evaluator.reset()
dataset_evaluators.append(evaluator)
_setup_visualization_evaluator(evaluator, dataset_name, tag)
def _evaluation_dataloader(self):
# TODO: Support subsample n images
assert len(self.cfg.DATASETS.TEST)
dataloaders = []
for dataset_name in self.cfg.DATASETS.TEST:
dataloaders.append(self.build_detection_test_loader(self.cfg, dataset_name))
self._reset_dataset_evaluators()
return dataloaders
def test_dataloader(self):
return self._evaluation_dataloader()
def val_dataloader(self):
return self._evaluation_dataloader()
def forward(self, input):
return self.model(input)
# ---------------------------------------------------------------------------
# Runner methods
# ---------------------------------------------------------------------------
def register(self, cfg: CfgNode):
inject_coco_datasets(cfg)
register_dynamic_datasets(cfg)
update_cfg_if_using_adhoc_dataset(cfg)
@classmethod
def build_model(cls, cfg: CfgNode, eval_only=False):
"""Builds D2go model instance from config.
NOTE: For backward compatible with existing D2Go tools. Prefer
`from_config` in other use cases.
Args:
cfg: D2go config node.
eval_only: True if model should be in eval mode.
"""
task = cls.from_config(cfg, eval_only)
if hasattr(task, PREPARED):
task = getattr(task, PREPARED)
return task.model
@classmethod
def get_default_cfg(cls):
return Detectron2GoRunner.get_default_cfg()
@staticmethod
def _initialize(cfg: CfgNode):
pass
@staticmethod
def get_evaluator(cfg: CfgNode, dataset_name: str, output_folder: str):
return Detectron2GoRunner.get_evaluator(
cfg=cfg, dataset_name=dataset_name, output_folder=output_folder
)
# ---------------------------------------------------------------------------
# Hooks
# ---------------------------------------------------------------------------
def on_fit_start(self) -> None:
if self.cfg.MODEL_EMA.ENABLED:
if self.ema_state and self.ema_state.has_inited():
# ema_state could have been loaded from checkpoint
# move to the current CUDA device if not on CPU
self.ema_state.to(self.ema_state.device)
return
self.ema_state = EMAState.from_model(
self.model,
decay=self.cfg.MODEL_EMA.DECAY,
device=self.cfg.MODEL_EMA.DEVICE or self.cfg.MODEL.DEVICE,
)
def on_train_batch_start(self, *_) -> None:
if self.gpu_profiler is not None:
self.gpu_profiler.before_step()
def on_train_batch_end(self, *_) -> None:
if self.ema_state:
self.ema_state.update(self.model)
if self.gpu_profiler is not None:
# NOTE: keep this last in function to include all ops in this iteration of the trace
self.gpu_profiler.after_step()
def on_test_epoch_start(self):
self._on_evaluation_epoch_start()
def on_validation_epoch_start(self):
self._on_evaluation_epoch_start()
def _on_evaluation_epoch_start(self):
if self.ema_state:
self.model_ema = deepcopy(self.model)
self.ema_state.apply_to(self.model_ema)
def on_validation_epoch_end(self):
if self.ema_state and hasattr(self, "model_ema"):
del self.model_ema
def on_test_epoch_end(self):
if self.ema_state and hasattr(self, "model_ema"):
del self.model_ema
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
if self.ema_state:
checkpoint["model_ema"] = self.ema_state.state_dict()
def on_load_checkpoint(self, checkpointed_state: Dict[str, Any]) -> None:
"""
Called before model state is restored. Explicitly handles old model
states so we can resume training from D2Go checkpoints transparently.
Args:
checkpointed_state: The raw checkpoint state as returned by torch.load
or equivalent.
"""
# If this is a non-Lightning checkpoint, we need to convert it.
if not _is_lightning_checkpoint(checkpointed_state) and not _is_d2go_checkpoint(
checkpointed_state
):
raise ValueError(
f"Invalid checkpoint state with keys: {checkpointed_state.keys()}"
)
if not _is_lightning_checkpoint(checkpointed_state):
_convert_to_lightning(checkpointed_state)
maybe_prepare_for_quantization(self, checkpointed_state)
if self.ema_state:
if "model_ema" not in checkpointed_state:
rank_zero_info(
"EMA is enabled but EMA state is not found in given checkpoint"
)
else:
self.ema_state = EMAState(
decay=self.cfg.MODEL_EMA.DECAY,
device=self.cfg.MODEL_EMA.DEVICE or self.cfg.MODEL.DEVICE,
)
self.ema_state.load_state_dict(checkpointed_state["model_ema"])
rank_zero_info("Loaded EMA state from checkpoint.")
# TODO(T123654122): subclass of DefaultTask will be refactored
class GeneralizedRCNNTask(DefaultTask):
@classmethod
def get_default_cfg(cls):
return GeneralizedRCNNRunner.get_default_cfg()
# TODO(T123654122): subclass of DefaultTask will be refactored
class GeneralizedRCNNTaskNoDefaultConfig(RunnerV2Mixin, DefaultTask):
"""
Similar to `GeneralizedRCNNTask` but allowing specifying default config in yaml via `_defaults_`
"""
pass
| d2go-main | d2go/runner/lightning_task.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import random
import torch
import torch.nn as nn
from d2go.quantization.modeling import QATCheckpointer
from d2go.runner.default_runner import BaseRunner
from d2go.utils.visualization import add_tensorboard_default_configs
from detectron2.utils.file_io import PathManager
class DebugRunner(BaseRunner):
@classmethod
def get_default_cfg(cls):
_C = super().get_default_cfg()
# _C.TENSORBOARD...
add_tensorboard_default_configs(_C)
# target metric
_C.TEST.TARGET_METRIC = "dataset0:dummy0:metric1"
return _C
def build_model(self, cfg, eval_only=False):
return nn.Sequential()
def do_test(self, cfg, model, train_iter=None):
return {
"dataset0": {
"dummy0": {"metric0": random.random(), "metric1": random.random()}
}
}
def do_train(self, cfg, model, resume):
# save a dummy checkpoint file
save_file = os.path.join(cfg.OUTPUT_DIR, "model_123.pth")
with PathManager.open(save_file, "wb") as f:
torch.save({"model": model.state_dict()}, f)
save_file = os.path.join(cfg.OUTPUT_DIR, "model_12345.pth")
with PathManager.open(save_file, "wb") as f:
torch.save({"model": model.state_dict()}, f)
save_file = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
with PathManager.open(save_file, "wb") as f:
torch.save({"model": model.state_dict()}, f)
def build_checkpointer(self, cfg, model, save_dir, **kwargs):
checkpointer = QATCheckpointer(model, save_dir=save_dir, **kwargs)
return checkpointer
@staticmethod
def final_model_name():
return "model_final"
| d2go-main | d2go/runner/debug_runner.py |
# pyre-ignore-all-errors
import functools
from abc import ABC
from copy import deepcopy
from dataclasses import dataclass
from types import MethodType
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import torch
from d2go.config import CfgNode
from d2go.quantization.modeling import prepare_fake_quant_model
from d2go.utils.misc import mode
from mobile_cv.arch.quantization.observer import update_stat as observer_update_stat
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities import rank_zero_info
from torch.ao.quantization.qconfig import (
get_default_qat_qconfig,
get_default_qconfig,
QConfig,
QConfigDynamic,
)
from torch.ao.quantization.quant_type import QuantType
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
from torch.ao.quantization.utils import get_fqn_to_example_inputs, get_quant_type
QConfigDicts = Dict[str, Dict[str, Union[QConfig, QConfigDynamic]]]
PREPARED = "_prepared"
def rsetattr(obj: Any, attr: str, val: Any) -> None:
"""Same as setattr but supports deeply nested objects."""
pre, _, post = attr.rpartition(".")
return setattr(rgetattr(obj, pre) if pre else obj, post, val)
def rgetattr(obj: Any, attr: str, *args) -> Any:
"""Same as getattr but supports deeply nested objects."""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split("."))
def rhasattr(obj: Any, attr: str, *args) -> bool:
"""Same as hasattr but supports deeply nested objects."""
try:
_ = rgetattr(obj, attr, *args)
except AttributeError:
return False
return True
def _quantized_forward(self, *args, **kwargs):
"""Forward method for a quantized module."""
if not self.training and hasattr(self, "_quantized"):
return self._quantized(*args, **kwargs)
return self._prepared(*args, **kwargs)
def _requires_calibration(config_dicts: QConfigDicts) -> bool:
"""Returns whether the given config_dicts for quantization requires calibration.
A config_dicts requires calibration if at least one of the configs in the
dictioary is a QConfig with an activation observer.
Args:
config: The config dictionary to check.
Returns:
Boolean as described.
"""
for qconfig_dict in config_dicts.values():
for qconfig in qconfig_dict.values():
qtype = get_quant_type(qconfig)
if qtype == QuantType.STATIC:
return True
return False
def checkpoint_has_prepared(checkpoint: Dict[str, Any]) -> bool:
return any(k.startswith(PREPARED) for k in checkpoint["state_dict"].keys())
def maybe_prepare_for_quantization(model: LightningModule, checkpoint: Dict[str, Any]):
if checkpoint_has_prepared(checkpoint) and not hasattr(model, PREPARED):
# model has been prepared for QAT before saving into checkpoint
copied = deepcopy(model)
prepared = prepare_fake_quant_model(copied.cfg, copied.model, is_qat=True)
copied.model = prepared
setattr(model, PREPARED, copied)
class QuantizationMixin(ABC):
"""Mixin defining an overrideable API for quantization customization.
For example, suppose our model contains traceable and non-traceable modules:
>>> class MyNonTraceableModel(LightningModule):
... def __init__(self):
... self.traceable = ...
... self.non_traceable = ...
...
... def forward(self, x):
... x = self.traceable(x)
... return self.non_traceable(x)
Then using FX-mode quantization, we can only quantize the traceable pieces.
As such, we could do something like the below, shown here for QAT.
>>> class MyQuantizationCallback(QuantizedAwareTraining):
... def prepare(self, model, config, attrs):
... model.traceable = prepare_qat_fx(model.traceable, config)
... return model
...
... def convert(self, model, attr):
... model.traceable = convert_fx(model.traceable)
... return model
We can then use this callback as with any other.:
Example::
>>> model = MyNonTraceableModel(...)
>>> quantization = MyQuantizationCallback()
>>> trainer = Trainer(
... callbacks=[quantization],
... )
>>> trainer.fit(model)
"""
def prepare(
self, root: LightningModule, configs: QConfigDicts, attrs: Set[str]
) -> torch.nn.Module:
"""Prepares the root user modules for quantization.
By default, this tries to prepare the entire LightningModule. If this is
not possible (eg, due to traceability, etc.), the recommended method to
use is to override the `prepare` method to prepare the root as
appropriate, and also override the `quantize` method to only quantize
the prepared pieces of the root.
Args:
root: The LightningModule as given to the lightning Trainer in train mode.
configs: Specification to be used when preparing the model, as provided by the user.
It is guaranteed that no key is a suffix of another.
attrs: The list of attributes to maintain for the module.
Returns:
The prepared Module to be used for quantized aware training.
"""
is_qat = isinstance(self, QuantizationAwareTraining)
self._convert_fx_callback = None
if hasattr(root.model, "custom_prepare_fx"):
prepared, convert_fx_callback = root.model.custom_prepare_fx(
root.cfg, is_qat
)
self._convert_fx_callback = convert_fx_callback
root.model = prepared
return root
prep_fn = prepare_qat_fx if is_qat else prepare_fx
old_attrs = {
attr: rgetattr(root, attr) for attr in attrs if rhasattr(root, attr)
}
prepared = root
if "" in configs:
prepared = prep_fn(root, configs[""], root.example_input_array)
else:
fqn_to_example_inputs = get_fqn_to_example_inputs(
root, root.example_input_array
)
for name, config in configs.items():
submodule = rgetattr(root, name)
rsetattr(
root, name, prep_fn(submodule, config, fqn_to_example_inputs[name])
)
for attr, value in old_attrs.items():
rsetattr(prepared, attr, value)
return prepared
def convert(
self, root: torch.nn.Module, submodules: Set[str], attrs: Set[str]
) -> torch.nn.Module:
"""Quantizes a previously prepared module (as returned by `prepare`).
By default, this simply quantizes the entire root. If the `prepare`
method was customized, this will need to be changed as well.
Args:
root: The prepared model as returned by `prepare`, after training.
submodules: An iterator of fully qualified submodules names that require
converting.
attrs: The list of attributes to maintain for the module across this call.
Returns:
The quantized model.
"""
if self._convert_fx_callback is not None:
return self._convert_fx_callback(root)
old_attrs = {
attr: rgetattr(root, attr) for attr in attrs if rhasattr(root, attr)
}
converted = root
if "" in submodules:
converted = convert_fx(root)
else:
for name in submodules:
prepared = rgetattr(root, name)
rsetattr(root, name, convert_fx(prepared))
for attr, value in old_attrs.items():
rsetattr(converted, attr, value)
rsetattr(root, attr, value)
return converted
@dataclass(frozen=True)
class ModelTransform:
"""Defines a step or interval at which fn should be .apply(fn)'ed and a message to log.
Properties:
fn: The function to apply. Must be passable to torch.nn.Module.apply(fn).
step: Only one of `step` or `interval` must be defined. If step is defined,
`fn` will be applied exactly once right before `step` step begins.
interval: Only one of `step` or `interval` must be defined. If `interval`
is defined, the transform will be applied periodically every
`interval` steps.
message: A short non-punctuated message to log in the master worker when
this transform is triggered.
"""
fn: Callable[[torch.nn.Module], None]
message: str
step: Optional[int] = None
interval: Optional[int] = None
def __post_init__(self) -> None:
"""Validate a few properties for early failure."""
if (self.step is None and self.interval is None) or (
self.step is not None and self.interval is not None
):
raise TypeError("Exactly one of step or interval must be defined.")
if self.step is not None and self.step < 0:
raise ValueError("step must be non-negative.")
if self.interval is not None and self.interval <= 0:
raise ValueError("interval must be positive.")
class QuantizationAwareTraining(Callback, QuantizationMixin):
"""Enable QAT of a model using the STL Trainer.
Node that this callback makes changes during training in order to properly
quantize the provided LightningModule.
Example::
>>> from stl.lightning.callbacks.quantization import QuantizationAwareTraining
>>> from pytorch_lightning import Trainer
>>> from stl.lightning.utilities.model import mode
...
# MyLightningModule must define val_dataloader() which is used both for
# validation as well as calibration of the quantized model.
>>> model = MyLightningModule(...)
>>> qat = QuantizationAwareTraining()
>>> trainer = Trainer(
... callbacks=[qat],
... )
# This will convert the model into one that is quantizeable, train it,
# and then quantize it after training is done.
>>> trainer.fit(model)
# You can use the model directly.
>>> input = ...
>>> with mode(model, training=False) as m:
... quantized_out = m(input)
If you only wish to quantize parts of your model, please see QuantizationMixin
for an example of how to do this.
Properties:
transforms: A list of ModelTransform's applied to the model exactly once
as specified during training. Example transforms are enabling/disabling
observers/quants, which are added to this list based on the init
parameters to this callback. Users can further augment the list
with more custom modules.
prepared: If set, this is the prepared model. Only available
after .fit() starts.
qconfig_dicts:
This is a map from the `module_qualified_name` to the corresponding QConfigDict
to apply to that module. For example, suppose your LightningModule contains
two submodules module.scriptable and module.not_scriptable. You'd provide
a qconfig_dicts like:
{
"scriptable": ...
}
This will quantize just module.scriptable using the provided QConfigDict,
or a default one. If you wish to quantize the entire LightningModule,
simply use "" as the qualified name. The name should match the names
returned by module.named_modules().
quantized: If set, this is the fully quantized model. Only available
after .fit() finishes.
"""
def __init__(
self,
start_step: int = 0,
enable_observer: Tuple[int, Optional[int]] = (0, None),
freeze_bn_step: Optional[int] = None,
qconfig_dicts: Optional[
Dict[str, Optional[Dict[str, Union[QConfig, QConfigDynamic]]]]
] = None,
preserved_attrs: Optional[List[str]] = None,
skip_conversion: bool = False,
) -> None:
"""
Args:
start_step: The training step at which QAT is enabled. The model is
always mutated with the appropriate stubs, but they are disabled
until the start of this training step.
See FakeQuantizeBase.fake_quant_enabled
enable_observer: The half-open interval [a, b) in steps during which the
observers are enabled. See FakeQuantizeBase.observer_enabled. If
b is None, the observer is never disabled once enabled.
freeze_bn_step: If specified, the step at which we apply freeze the
collection of batch normalization layer statistics for QAT.
qconfig_dicts: If given, used for quantization of the model during training.
preserved_attrs: If provided, a list of attributes to preserve across
quantized modules. These are preserved only if they already exists.
"""
if start_step < 0:
raise ValueError(
f"The starting step of QAT must be non-negative. Got {start_step}."
)
start_observer, end_observer = enable_observer
if start_observer < 0:
raise ValueError(
f"The starting step for the observer must be non-negative. Got {start_observer}."
)
if end_observer and end_observer <= start_observer:
raise ValueError(
f"The observation interval must contain at least one step. Got [{start_step}, {end_observer})."
)
if freeze_bn_step and freeze_bn_step < 0:
raise ValueError(
f"The step at which batch norm layers are frozen must be non-negative. Got {freeze_bn_step}."
)
self.transforms: List[ModelTransform] = []
if start_step > 0:
self.transforms.extend(
[
# Enabled by default, so the assumption for > 0 is that the
# user wants it disabled then enabled.
ModelTransform(
fn=torch.ao.quantization.disable_fake_quant,
step=0,
message="Disable fake quant",
),
ModelTransform(
fn=torch.ao.quantization.enable_fake_quant,
step=start_step,
message="Enable fake quant to start QAT",
),
]
)
if start_observer > 0:
self.transforms.extend(
# See comment for start_step above.
[
ModelTransform(
fn=torch.ao.quantization.disable_observer,
step=0,
message="Disable observer",
),
ModelTransform(
fn=torch.ao.quantization.enable_observer,
step=start_observer,
message="Start observer",
),
]
)
if end_observer is not None:
self.transforms.append(
ModelTransform(
fn=torch.ao.quantization.disable_observer,
step=end_observer,
message="End observer",
)
)
if freeze_bn_step is not None:
self.transforms.append(
ModelTransform(
fn=torch.nn.intrinsic.qat.freeze_bn_stats,
step=freeze_bn_step,
message="Freeze BN",
)
)
self.prepared: Optional[torch.nn.Module] = None
self.preserved_attrs = set([] if preserved_attrs is None else preserved_attrs)
if not qconfig_dicts:
self.qconfig_dicts: QConfigDicts = {"": {"": get_default_qat_qconfig()}}
else:
self.qconfig_dicts: QConfigDicts = {
key: value if value else {"": get_default_qat_qconfig()}
for key, value in qconfig_dicts.items()
}
self.quantized: Optional[torch.nn.Module] = None
self.skip_conversion = skip_conversion
@classmethod
def from_config(cls, cfg: CfgNode):
qat = cfg.QUANTIZATION.QAT
callback = cls(
qconfig_dicts={submodule: None for submodule in cfg.QUANTIZATION.MODULES}
if cfg.QUANTIZATION.MODULES
else None,
# We explicitly pass this to maintain properties for now.
preserved_attrs=["model.backbone.size_divisibility"],
start_step=qat.START_ITER,
enable_observer=(qat.ENABLE_OBSERVER_ITER, qat.DISABLE_OBSERVER_ITER),
freeze_bn_step=qat.FREEZE_BN_ITER,
skip_conversion=True, # convert_fx will be handled by D2Go exporter
)
if qat.UPDATE_OBSERVER_STATS_PERIODICALLY:
callback.transforms.append(
ModelTransform(
interval=qat.UPDATE_OBSERVER_STATS_PERIOD,
fn=observer_update_stat,
message="Updating observers.",
)
)
return callback
def setup(self, trainer: Trainer, pl_module: LightningModule, stage: str) -> None:
"""Override the model with a quantized-aware version on setup.
This is the earliest place we can override this model which allows for
appropriate behavior when restoring from checkpoints, as well as connecting
to accelerators, etc.
The model is only prepared once.
"""
# Only prepare the model once.
if hasattr(pl_module, "_prepared"):
return
with mode(pl_module, training=True) as train:
prepared = self.prepare(
deepcopy(train),
configs=self.qconfig_dicts,
attrs=self.preserved_attrs,
)
# freeze the original model since only the prepared model will
# participate in forward.
for x in train.parameters():
x.requires_grad = False
pl_module._prepared = prepared
pl_module.forward = MethodType(_quantized_forward, pl_module)
self.prepared = pl_module._prepared
def on_train_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
) -> None:
"""Applies model transforms at as specified during training."""
apply_only_once = []
current_step = trainer.global_step
for i, transform in enumerate(self.transforms):
if (transform.step is not None and transform.step <= current_step) or (
transform.interval is not None
and current_step % transform.interval == 0
):
self.prepared.apply(transform.fn)
rank_zero_info(
f"[QAT] {transform.message} at step={trainer.global_step}."
)
if transform.step is not None and transform.step <= current_step:
apply_only_once.append(i)
if apply_only_once:
self.transforms = [
transform
for i, transform in enumerate(self.transforms)
if i not in set(apply_only_once)
]
def on_fit_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Quantize the weights since training has finalized."""
if hasattr(pl_module, "_quantized") or self.skip_conversion:
return
pl_module._quantized = self.convert(
pl_module._prepared, self.qconfig_dicts.keys(), attrs=self.preserved_attrs
)
self.quantized = pl_module._quantized
def on_test_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Make sure we have a quantized version.
This handles the edge case where a user does .test() without .fit() first.
"""
if hasattr(pl_module, "_quantized"):
return
pl_module._quantized = self.convert(
pl_module._prepared, self.qconfig_dicts.keys(), attrs=self.preserved_attrs
)
self.quantized = pl_module._quantized
class PostTrainingQuantization(Callback, QuantizationMixin):
"""Enable post-training quantization, such as dynamic, static, and weight-only.
This is an idempotent callback (to contrast with QuantizationAwareTraining).
If calibration is required, we will use the validation data set provided to
the STL Trainer, and this occurs on each validation run.
The quantized model is made available as a property of the callback.
Example::
>>> from stl.lightning.callbacks.quantization import PostTrainingQuantization
>>> from pytorch_lightning import Trainer
>>> from stl.lightning.utilities.model import mode
...
# MyLightningModule must define val_dataloader() which is used both for
# validation as well as calibration of the quantized model.
>>> model = MyLightningModule(...)
>>> post_training_quant = PostTrainingQuantization()
>>> trainer = Trainer(
... callbacks=[post_training_quant],
... )
# This will both train the model + create a *separate* quantized version.
# The original model is left unchaged.
>>> trainer.fit(model)
# You can access the quantized version of the model directly.
>>> input = ...
>>> with mode(post_training_quant.quantized, training=False) as m:
... quantized_out = m(input)
If you only wish to quantize parts of your model, please see QuantizationMixin
for an example of how to do this.
Properties:
prepared: If set, this is the prepared model which can be used for
calibration. Only available after validation start.
qconfig_dicts: See `QuantizedAwareTraining` for full description.
quantized: If set, this is the fully quantized model calibrated using
the validation data. Only available after validation has ended.
"""
def __init__(
self,
qconfig_dicts: Optional[QConfigDicts] = None,
preserved_attrs: Optional[List[str]] = None,
) -> None:
"""Initialize the callback."""
self.qconfig_dicts = qconfig_dicts or {"": {"": get_default_qconfig()}}
self.preserved_attrs = set([] if preserved_attrs is None else preserved_attrs)
self.prepared: Optional[torch.nn.Module] = None
self.quantized: Optional[torch.nn.Module] = None
self.should_calibrate = _requires_calibration(self.qconfig_dicts)
@classmethod
def from_config(cls, cfg: CfgNode):
return cls(
qconfig_dicts={submodule: None for submodule in cfg.QUANTIZATION.MODULES}
if cfg.QUANTIZATION.MODULES
else None,
# We explicitly pass this to maintain properties for now.
preserved_attrs=["model.backbone.size_divisibility"],
)
def on_validation_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""
On validation start, prepare a module for quantization by adding
observers and loading weights from current model.
"""
# Pass a copy to quantization APIs.
self.prepared = self.prepare(
deepcopy(pl_module).eval(),
configs=self.qconfig_dicts,
attrs=self.preserved_attrs,
)
def on_validation_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
"""Convert the calibrated model to its finalized quantized version."""
self.quantized = self.convert(
self.prepared, self.qconfig_dicts.keys(), attrs=self.preserved_attrs
)
def on_validation_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: Any,
batch: Any,
batch_idx: int,
dataloader_idx: int,
) -> None:
"""Also run the validation batch through the quantized model for calibration."""
if self.should_calibrate:
with torch.no_grad():
self.prepared(batch)
| d2go-main | d2go/runner/callbacks/quantization.py |
d2go-main | d2go/runner/callbacks/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import builtins
import logging
import sys
import time
import uuid
from functools import wraps
from typing import Any, Callable, Optional, TypeVar
from mobile_cv.common.misc.oss_utils import fb_overwritable
# Saving the builtin print to wrap it up later.
BUILTIN_PRINT = builtins.print
_T = TypeVar("_T")
@fb_overwritable()
def initialize_logging(logging_level: int) -> None:
root_logger = logging.getLogger()
root_logger.setLevel(logging_level)
def replace_print_with_logging() -> None:
builtins.print = _print_to_logging
def _print_to_logging(
*objects: Any,
sep: Optional[str] = " ",
end: Optional[str] = "\n",
file: Optional[Any] = None,
flush: bool = False,
) -> None:
"""Wraps built-in print to replace it with using the logging module. Only
writing to stdout and stderr are replaced, printing to a file will be
executed unmodified.
This function is on the module level because otherwise numba breaks.
"""
# Mimicking the behavior of Python's built-in print function.
if sep is None:
sep = " "
if end is None:
end = "\n"
# Don't replace prints to files.
if file is not None and file != sys.stdout and file != sys.stderr:
BUILTIN_PRINT(*objects, sep=sep, end=end, file=file, flush=flush)
return
logging.info(sep.join(map(str, objects)), stacklevel=3)
@fb_overwritable()
def _log_enter(category: str, name: str, unique_id: str) -> None:
logging.info(f"Entering logging context, {category=}, {name=}, {unique_id=}")
@fb_overwritable()
def _log_exit(category: str, name: str, unique_id: str, duration: float) -> None:
logging.info(
f"Exiting logging context, {category=}, {name=}, {unique_id=}, {duration=}"
)
def log_interval(
category: Optional[str] = None, name: Optional[str] = None
) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
_unique_id = uuid.uuid1().int >> 97
_overwrite_category = category
_overwrite_name = name
def log_interval_deco(func: Callable[..., _T]) -> Callable[..., _T]:
_category = _overwrite_category or func.__qualname__.split(".")[0]
_name = _overwrite_name or func.__name__
@wraps(func)
def wrapper(*args, **kwargs) -> _T:
_log_enter(_category, _name, _unique_id)
_start = time.perf_counter()
ret = func(*args, **kwargs)
_log_exit(_category, _name, _unique_id, time.perf_counter() - _start)
return ret
return wrapper
return log_interval_deco
| d2go-main | d2go/utils/logging.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
import warnings
from contextlib import contextmanager
from typing import Any, Callable, Dict, Iterator, Optional
import detectron2.utils.comm as comm
import torch
from d2go.config.config import CfgNode
from d2go.utils.tensorboard_log_util import get_tensorboard_log_dir # noqa: forwarding
from detectron2.utils.file_io import PathManager
from tabulate import tabulate
logger = logging.getLogger(__name__)
# Subdirectory with model configurations dumped by the training binary.
TRAINED_MODEL_CONFIGS_DIR: str = "trained_model_configs"
def check_version(library, min_version, warning_only=False):
"""Check the version of the library satisfies the provided minimum version.
An exception is thrown if the check does not pass.
Parameters
----------
min_version : str
Minimum version
warning_only : bool
Printing a warning instead of throwing an exception.
"""
from distutils.version import LooseVersion
version = library.__version__
bad_version = LooseVersion(version) < LooseVersion(min_version)
if bad_version:
msg = (
f"Installed {library.__name__} version {version} does not satisfy the "
f"minimum required version {min_version}"
)
if warning_only:
warnings.warn(msg)
else:
raise AssertionError(msg)
return False
return True
def metrics_dict_to_metrics_table(dic):
assert isinstance(dic, dict)
ret = []
for key in sorted(dic.keys()):
value = dic[key]
if isinstance(value, dict):
for sub_metrics in metrics_dict_to_metrics_table(value):
ret.append([key] + sub_metrics)
else:
ret.append([key, value])
return ret
def print_metrics_table(metrics_dic):
metrics_table = metrics_dict_to_metrics_table(metrics_dic)
metrics_tabulate = tabulate(
metrics_table,
tablefmt="pipe",
headers=["model", "dataset", "task", "metric", "score"],
)
logger.info("Metrics table: \n" + metrics_tabulate)
def dump_trained_model_configs(
output_dir: str, trained_cfgs: Dict[str, CfgNode]
) -> Dict[str, str]:
"""Writes trained model config files to output_dir.
Args:
output_dir: output file directory.
trained_cfgs: map from model name to the config of trained model.
Returns:
A map of model name to model config path.
"""
trained_model_configs = {}
trained_model_config_dir = os.path.join(output_dir, TRAINED_MODEL_CONFIGS_DIR)
PathManager.mkdirs(trained_model_config_dir)
for name, trained_cfg in trained_cfgs.items():
config_file = os.path.join(trained_model_config_dir, "{}.yaml".format(name))
trained_model_configs[name] = config_file
if comm.is_main_process():
logger.info("Dumping trained config file: {}".format(config_file))
with PathManager.open(config_file, "w") as f:
f.write(trained_cfg.dump())
comm.synchronize()
logger.info("Finished dumping trained config file")
return trained_model_configs
def save_binary_outputs(filename: str, outputs: Any) -> None:
"""Helper function to serialize and save function outputs in binary format."""
with PathManager.open(filename, "wb") as f:
torch.save(outputs, f)
def load_binary_outputs(filename: str) -> Any:
"""Helper function to load and deserialize function outputs saved in binary format."""
with PathManager.open(filename, "rb") as f:
return torch.load(f)
@contextmanager
def mode(net: torch.nn.Module, training: bool) -> Iterator[torch.nn.Module]:
"""Temporarily switch to training/evaluation mode."""
istrain = net.training
try:
net.train(training)
yield net
finally:
net.train(istrain)
def _log_api_usage(identifier: str):
"""
Internal function used to log the usage of different d2go components
inside facebook's infra.
"""
torch._C._log_api_usage_once("d2go." + identifier)
def _log_api_usage_on_main_process(identifier: str):
"""
Log the usage of d2go API only on the main process.
"""
if comm.is_main_process():
_log_api_usage(identifier)
| d2go-main | d2go/utils/misc.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
def iterate_module_named_parameters(model, check_requires_grad=True):
"""Iterate over all parameters for the model"""
memo = set()
for module_name, module in model.named_modules():
for module_param_name, value in module.named_parameters(recurse=False):
if check_requires_grad and not value.requires_grad:
continue
# Avoid duplicating parameters
if value in memo:
continue
memo.add(value)
yield module_name, module, module_param_name, value
| d2go-main | d2go/utils/parse_module_params.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import deque
import cv2
import detectron2.data.transforms as T
import torch
from d2go.model_zoo import model_zoo
from detectron2.data import MetadataCatalog
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class DemoPredictor:
def __init__(self, model, min_size_test=224, max_size_test=320, input_format="RGB"):
self.model = model
self.model.eval()
self.aug = T.ResizeShortestEdge([min_size_test, min_size_test], max_size_test)
self.input_format = input_format
def __call__(self, original_image):
"""
Args:
original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
Returns:
predictions (dict):
the output of the model for one image only.
See :doc:`/tutorials/models` for details about the format.
"""
with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
# Apply pre-processing to image.
if self.input_format == "RGB":
# whether the model expects BGR inputs or RGB
original_image = original_image[:, :, ::-1]
height, width = original_image.shape[:2]
image = self.aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
predictions = self.model([inputs])[0]
return predictions
class VisualizationDemo(object):
def __init__(self, cfg, config_file, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
model = model_zoo.get(config_file, trained=True) # runner.build_model(cfg)
self.predictor = DemoPredictor(model)
def run_on_image(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(
frame, predictions
)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
| d2go-main | d2go/utils/demo_predictor.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
import detectron2.utils.comm as comm
import torch
from d2go.utils.visualization import VisualizerWrapper
from detectron2.utils.file_io import PathManager
logger = logging.getLogger(__name__)
def get_rel_loss_checker(rel_thres=1.0):
def _loss_delta_exceeds_thresh(prev_loss, loss):
if prev_loss is None:
return True
prev_sum = sum(prev_loss.values())
cur_sum = sum(loss.values())
if prev_sum <= 0:
return True
if (cur_sum - prev_sum) / prev_sum >= rel_thres:
return False
return True
return _loss_delta_exceeds_thresh
class TrainImageWriter(object):
def __init__(self, cfg, tbx_writer, max_count=5):
"""max_count: max number of data written to tensorboard, additional call
will be ignored
"""
self.visualizer = VisualizerWrapper(cfg)
self.writer = tbx_writer
self.max_count = max_count
self.counter = 0
def __call__(self, all_data):
if self.max_count > 0 and self.counter >= self.max_count:
return
data = all_data["data"]
step = all_data["step"]
for idx, cur_data in enumerate(data):
name = f"train_abnormal_losses/{step}/img_{idx}/{cur_data['file_name']}"
vis_img = self.visualizer.visualize_train_input(cur_data)
self.writer._writer.add_image(name, vis_img, step, dataformats="HWC")
logger.warning(
"Train images with bad losses written to tensorboard 'train_abnormal_losses'"
)
self.counter += 1
class FileWriter(object):
def __init__(self, output_dir, max_count=5):
"""max_count: max number of data written to tensorboard, additional call
will be ignored
"""
self.output_dir = output_dir
self.max_count = max_count
self.counter = 0
def __call__(self, all_data):
if self.max_count > 0 and self.counter >= self.max_count:
return
output_dir = self.output_dir
step = all_data["step"]
losses = all_data["losses"]
file_name = f"train_abnormal_losses_{step}_{comm.get_rank()}.pth"
out_file = os.path.join(output_dir, file_name)
with PathManager.open(out_file, "wb") as fp:
torch.save(all_data, fp)
logger.warning(
f"Iteration {step} has bad losses {losses}. "
f"all information saved to {out_file}."
)
self.counter += 1
def get_writers(cfg, tbx_writer):
writers = [TrainImageWriter(cfg, tbx_writer), FileWriter(cfg.OUTPUT_DIR)]
return writers
class AbnormalLossChecker(object):
def __init__(self, start_iter, writers, valid_loss_checker=None):
self.valid_loss_checker = valid_loss_checker or get_rel_loss_checker()
self.writers = writers or []
assert isinstance(self.writers, list)
self.prev_index = start_iter
self.prev_loss = None
def check_step(self, losses, data=None, model=None):
with torch.no_grad():
is_valid = self.valid_loss_checker(self.prev_loss, losses)
if not is_valid:
self._write_invalid_info(losses, data, model)
self.prev_index += 1
self.prev_loss = losses
return is_valid
def _write_invalid_info(self, losses, data, model):
all_info = {
"losses": losses,
"data": data,
"model": getattr(model, "module", model),
"prev_loss": self.prev_loss,
"step": self.prev_index + 1,
}
for writer in self.writers:
writer(all_info)
class AbnormalLossCheckerWrapper(torch.nn.Module):
def __init__(self, model, checker):
super().__init__()
self.checker = checker
self.model = model
self.training = model.training
def forward(self, x):
losses = self.model(x)
self.checker.check_step(losses, data=x, model=self.model)
return losses
| d2go-main | d2go/utils/abnormal_checker.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from mobile_cv.common.misc.oss_utils import fb_overwritable
@fb_overwritable()
def get_model_zoo_storage_prefix() -> str:
return "https://mobile-cv.s3-us-west-2.amazonaws.com/d2go/models/"
@fb_overwritable()
def get_launch_environment():
return "local"
MODEL_ZOO_STORAGE_PREFIX = get_model_zoo_storage_prefix()
| d2go-main | d2go/utils/launch_environment.py |
#!/usr/bin/env python3
import itertools
from typing import Any, Dict, Optional
import torch
import torch.nn as nn
class EMAState(object):
"""Stores Exponential Moving Average state for a model.
Args:
decay: EMA decay factor, should be in [0, 1]. A decay of 0 corresponds to
always using the latest value (no EMA) and a decay of 1 corresponds to
not updating weights after initialization. Default to 0.999.
device: If not None, move model EMA state to device.
"""
def __init__(self, decay: float = 0.999, device: Optional[str] = None):
if decay < 0 or decay > 1.0:
raise ValueError(f"Decay should be in [0, 1], {decay} was given.")
self.decay: float = decay
self.state: Dict[str, Any] = {}
self.device: Optional[str] = device
@classmethod
def from_model(
cls,
model: nn.Module,
decay: float = 0.999,
device: Optional[str] = None,
) -> "EMAState":
"""Constructs model state from the model and move to device if given."""
ret = cls(decay, device)
ret.load_from(model)
return ret
def load_from(self, model: nn.Module) -> None:
"""Load state from the model."""
self.state.clear()
for name, val in self._get_model_state_iterator(model):
val = val.detach().clone()
self.state[name] = val.to(self.device) if self.device else val
def has_inited(self) -> bool:
return len(self.state) > 0
def apply_to(self, model: nn.Module) -> None:
"""Apply EMA state to the model."""
with torch.no_grad():
for name, val in self._get_model_state_iterator(model):
assert (
name in self.state
), f"Name {name} does not exist, available names are {self.state.keys()}"
val.copy_(self.state[name])
def state_dict(self) -> Dict[str, Any]:
return self.state
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.state.clear()
for name, val in state_dict.items():
self.state[name] = val.to(self.device) if self.device else val
def to(self, device: torch.device) -> None:
"""moves EMA state to device."""
for name, val in self.state.items():
self.state[name] = val.to(device)
def _get_model_state_iterator(self, model: nn.Module):
param_iter = model.named_parameters()
# pyre-fixme[16]: `nn.Module` has no attribute `named_buffers`.
buffer_iter = model.named_buffers()
return itertools.chain(param_iter, buffer_iter)
def update(self, model: nn.Module) -> None:
with torch.no_grad():
for name, val in self._get_model_state_iterator(model):
ema_val = self.state[name]
if self.device:
val = val.to(self.device)
ema_val.copy_(ema_val * self.decay + val * (1.0 - self.decay))
| d2go-main | d2go/utils/ema_state.py |
import logging
import os
import pickle
import torch
from d2go.config import CfgNode as CN
from detectron2.utils.file_io import PathManager
from mobile_cv.torch.utils_pytorch import comm
from torch.cuda._memory_viz import segment_plot, trace_plot
logger: logging.Logger = logging.getLogger(__name__)
def add_memory_profiler_configs(_C: CN):
_C.MEMORY_PROFILER = CN()
_C.MEMORY_PROFILER.ENABLED = False
# max number of trace entries in memory snapshot
_C.MEMORY_PROFILER.TRACE_MAX_ENTRIES = 1000000
# Configs to be used by d2go.utils.gpu_memory_profiler.D2GoGpuMemorySnapshot
# determine the number of iterations to log memory snapshots for
_C.MEMORY_PROFILER.LOG_N_STEPS = 3
# determine at what iteration to start recording gpu memory
_C.MEMORY_PROFILER.LOG_DURING_TRAIN_AT = 550
def add_zoomer_default_config(_C: CN):
_C.ZOOMER = CN()
_C.ZOOMER.ENABLE_STACK_TRACING = (
False # Do not enable by default, since it may cause performance regression
)
_C.ZOOMER.ENABLE_MEMORY_PROFILING = False
def omm_logger_wrapper(output_dir):
def oom_logger(
device: int, alloc: int, device_alloc: int, device_free: int
) -> None:
"""
Log memory snapshot in the event of CUDA OOM.
"""
logger.info(
f"Saving memory snapshot device: {device}, alloc: {alloc}, device_alloc: {device_alloc}, device_free: {device_free}"
)
try:
log_memory_snapshot(output_dir, file_prefix="oom")
except Exception as e:
logger.error(f"Failed to log memory snapshot during OOM {e}")
return oom_logger
def log_memory_snapshot(output_dir: str, file_prefix: str = "") -> None:
"""
Log memory snapshots to output_dir
"""
if not torch.cuda.is_available():
logger.info("CUDA unavailable. Not logging snapshot")
return
try:
rank = comm.get_rank()
save_dir = os.path.join(
output_dir, "memory_snapshot", f"{file_prefix}_rank{rank}"
)
logger.info(f"Logging memory snapshot to {save_dir}")
snapshot = torch.cuda.memory._snapshot()
dump_snapshot(save_dir, snapshot)
except Exception as e:
logger.error(f"Failed to log memory snapshot to {save_dir}: {e}")
def dump_snapshot(save_dir: str, snapshot):
"""
Dump memory snapshot and useful plots to save_dir.
This is a rewrite of torch.cuda.memory._dump_snapshot() with PathManager.
"""
if not PathManager.exists(save_dir):
PathManager.mkdirs(save_dir)
with PathManager.open(os.path.join(save_dir, "snapshot.pickle"), "wb") as f:
pickle.dump(snapshot, f)
with PathManager.open(os.path.join(save_dir, "trace_plot.html"), "w") as f:
f.write(trace_plot(snapshot))
with PathManager.open(os.path.join(save_dir, "segment_plot.html"), "w") as f:
f.write(segment_plot(snapshot))
logger.info(f"Saved memory snapshot to {save_dir}")
def record_memory_history(trace_max_entries=1000000) -> None:
"""
Start recording memory history and stack traces.
"""
if not torch.cuda.is_available():
logger.info("CUDA unavailable. Not recording memory history")
return
torch.cuda.memory._record_memory_history(
enabled="all", max_entries=trace_max_entries
)
logger.info("Started recording memory history")
def attach_oom_logger(output_dir, trace_max_entries=1000000) -> None:
"""
Start recording memory history and attach the OOM logger.
"""
if not torch.cuda.is_available():
logger.info("CUDA unavailable. Not attaching OOM logger")
return
record_memory_history(trace_max_entries)
torch._C._cuda_attach_out_of_memory_observer(omm_logger_wrapper(output_dir))
logger.info("Attached GPU OOM logger")
| d2go-main | d2go/utils/gpu_memory_profiler.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.utils import flop_calculator as _flop_calculator # noqa
# @fb-only: from d2go.utils import fb as _fb # isort:skip # noqa
| d2go-main | d2go/utils/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import os
import traceback
import detectron2.utils.comm as comm
import mobile_cv.lut.lib.pt.flops_utils as flops_utils
import torch
from d2go.utils.helper import run_once
from detectron2.utils.analysis import FlopCountAnalysis
from detectron2.utils.file_io import PathManager
from detectron2.utils.registry import Registry
from fvcore.nn import flop_count_str, flop_count_table
PROFILER_REGISTRY = Registry("PROFILER")
logger = logging.getLogger(__name__)
@torch.no_grad()
def dump_flops_info(model, inputs, output_dir, use_eval_mode=True):
"""
Dump flops information about model, using the given model inputs.
Information are dumped to output_dir using various flop counting tools
in different formats. Only a simple table is printed to terminal.
Args:
inputs: a tuple of positional arguments used to call model with.
use_eval_mode: turn the model into eval mode for flop counting. Otherwise,
will use the original mode. It's recommended to use eval mode, because
training mode typically follows a different codepath.
"""
if not comm.is_main_process():
return
logger.info("Evaluating model's number of parameters and FLOPS")
try:
model = copy.deepcopy(model)
except Exception:
logger.info("Failed to deepcopy the model and skip FlopsEstimation.")
return
# Delete other forward_pre_hooks so they are not simultaneously called.
# The keys are wrapped in a list to avoid mutating ordered_dict during iteration.
# See https://github.com/pytorch/pytorch/issues/49739 for more details.
for hook_key in list(model._forward_pre_hooks.keys()):
logger.warning(f"Forward hook with key {hook_key} was removed in flop counter.")
model._forward_pre_hooks.pop(hook_key)
if use_eval_mode:
model.eval()
inputs = copy.deepcopy(inputs)
# 1. using mobile_cv flop counter
try:
fest = flops_utils.FlopsEstimation(model)
with fest.enable():
model(*inputs)
fest.add_flops_info()
model_str = str(model)
output_file = os.path.join(output_dir, "flops_str_mobilecv.txt")
with PathManager.open(output_file, "w") as f:
f.write(model_str)
logger.info(f"Flops info written to {output_file}")
except Exception:
logger.exception("Failed to estimate flops using mobile_cv's FlopsEstimation")
# 2. using d2/fvcore's flop counter
output_file = os.path.join(output_dir, "flops_str_fvcore.txt")
try:
flops = FlopCountAnalysis(model, inputs)
# 2.1: dump as model str
model_str = flop_count_str(flops)
with PathManager.open(output_file, "w") as f:
f.write(model_str)
logger.info(f"Flops info written to {output_file}")
# 2.2: dump as table
flops_table = flop_count_table(flops, max_depth=10)
output_file = os.path.join(output_dir, "flops_table_fvcore.txt")
with PathManager.open(output_file, "w") as f:
f.write(flops_table)
logger.info(f"Flops table (full version) written to {output_file}")
# 2.3: print a table with a shallow depth
flops_table = flop_count_table(flops, max_depth=3)
logger.info("Flops table:\n" + flops_table)
except Exception:
with PathManager.open(output_file, "w") as f:
traceback.print_exc(file=f)
logger.warning(
"Failed to estimate flops using detectron2's FlopCountAnalysis. "
f"Error written to {output_file}."
)
flops = float("nan")
return flops
def add_flop_printing_hook(
model,
output_dir: str,
):
"""
Add a pytorch module forward hook that will print/save flops of the whole model
at the first time the model is called.
Args:
output_dir: directory to save more detailed flop info
"""
def hook(module, input):
handle.remove()
dump_flops_info(module, input, output_dir)
return input
handle = model.register_forward_pre_hook(hook)
@PROFILER_REGISTRY.register()
def default_flop_counter(model, cfg):
from torch.distributed.fsdp.fully_sharded_data_parallel import (
FullyShardedDataParallel as FSDP,
)
# TODO: deepcopy() not supported for FSDP yet (https://github.com/pytorch/pytorch/issues/82070), so we disable flop counter for now
if isinstance(model, FSDP):
logger.warn(
"Default flop counter is disabled because it's not supported for FSDP yet. "
)
return
return add_flop_printing_hook(model, cfg.OUTPUT_DIR)
# NOTE: the logging can be too long and messsy when printing flops multiple
# times, especially when running eval during training, thus using `run_once`
# to limit it. `dump_flops_info` can log flops more concisely.
@run_once()
def add_print_flops_callback(cfg, model, disable_after_callback=True):
def _print_flops_callback(self, model, model_data):
self.add_flops_info()
logger.info("Callback: model flops info:\n{}".format(model))
def _guess_batch_size():
# Inputs are meta-arch dependent, the most general solution will be
# adding a function like `get_batch_size()` to each meta arch
ret = 1
try:
model_input_shapes = model_data(model)["input_shapes"]
assert isinstance(model_input_shapes, list)
assert len(model_input_shapes) > 0
# assuming the first input is a list of images
ret = len(model_input_shapes[0])
except Exception:
ret = cfg.SOLVER.IMS_PER_BATCH // comm.get_world_size()
logger.warning(
"Could not get batch size, compute from"
f" `cfg.SOLVER.IMS_PER_BATCH`={ret}"
)
pass
return ret
nparams, nflops = self.get_flops()
batch_size = _guess_batch_size()
nflops_single = nflops / batch_size
logger.info(
f"Model parameters (M): {nparams}, "
f"MFlops (batch_size={batch_size}): {nflops}, "
f"MFlops (batch_size=1): {nflops_single}"
)
if disable_after_callback:
self.set_enable(False)
fest = flops_utils.FlopsEstimation(model).set_callback(_print_flops_callback)
logger.info("Added callback to log flops info after the first inference")
fest.set_enable(True)
return fest
def attach_profiler(profiler_name):
return PROFILER_REGISTRY.get(profiler_name)
def attach_profilers(cfg, model):
for profiler in cfg.PROFILERS:
attach_profiler(profiler)(model, cfg)
| d2go-main | d2go/utils/flop_calculator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from functools import lru_cache
from mobile_cv.common.misc.oss_utils import fb_overwritable
@fb_overwritable()
def get_tensorboard_log_dir(output_dir):
return output_dir
| d2go-main | d2go/utils/tensorboard_log_util.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Optional, Type
from d2go.config import CfgNode as CN
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.data import DatasetCatalog, detection_utils as utils, MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils.events import get_event_storage
from detectron2.utils.visualizer import Visualizer
def add_tensorboard_default_configs(_C):
_C.TENSORBOARD = CN()
# Output from dataloader will be written to tensorboard at this frequency
_C.TENSORBOARD.TRAIN_LOADER_VIS_WRITE_PERIOD = 20
# This controls max number of images over all batches, be considerate when
# increasing this number because it takes disk space and slows down the training
_C.TENSORBOARD.TRAIN_LOADER_VIS_MAX_IMAGES = 16
# This controls the max number of images to visualize each write period
_C.TENSORBOARD.TRAIN_LOADER_VIS_MAX_BATCH_IMAGES = 16
# Max number of images per dataset to visualize in tensorboard during evaluation
_C.TENSORBOARD.TEST_VIS_MAX_IMAGES = 16
# Frequency of sending data to tensorboard during evaluation
_C.TENSORBOARD.TEST_VIS_WRITE_PERIOD = 1
# TENSORBOARD.LOG_DIR will be determined solely by OUTPUT_DIR
_C.register_deprecated_key("TENSORBOARD.LOG_DIR")
return _C
class VisualizerWrapper(object):
"""
D2's Visualizer provides low-level APIs to draw common structures, such as
draw_instance_predictions/draw_sem_seg/overlay_instances. This class provides
the high-level interface for visualizing.
"""
def __init__(self, cfg, custom_visualizer: Optional[Type[Visualizer]] = None):
self.cfg = cfg
self.visualizer = custom_visualizer or Visualizer
def _get_meta_arch_class(self):
return META_ARCH_REGISTRY.get(self.cfg.MODEL.META_ARCHITECTURE)
def visualize_train_input(self, input_dict):
"""
Visulize a single input image of model (also the output from train loader)
used for training, this contains the data augmentation.
"""
per_image = input_dict
cfg = self.cfg
# customization
if hasattr(self._get_meta_arch_class(), "visualize_train_input"):
return self._get_meta_arch_class().visualize_train_input(self, input_dict)
img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy()
img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)
if "dataset_name" in input_dict:
metadata = MetadataCatalog.get(input_dict["dataset_name"])
else:
metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
scale = 2.0
visualizer = self.visualizer(img, metadata=metadata, scale=scale)
if "instances" in per_image:
target_fields = per_image["instances"].get_fields()
labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
visualizer.overlay_instances(
labels=labels,
boxes=target_fields.get("gt_boxes", None),
masks=target_fields.get("gt_masks", None),
keypoints=target_fields.get("gt_keypoints", None),
)
if "sem_seg" in per_image:
visualizer.draw_sem_seg(per_image["sem_seg"], area_threshold=0, alpha=0.5)
return visualizer.get_output().get_image()
def visualize_test_output(
self, dataset_name, dataset_mapper, input_dict, output_dict
):
"""
Visualize the output of model
"""
# customization
if hasattr(self._get_meta_arch_class(), "visualize_test_output"):
return self._get_meta_arch_class().visualize_test_output(
self, dataset_name, dataset_mapper, input_dict, output_dict
)
image = dataset_mapper._read_image(input_dict, "RGB")
visualizer = self.visualizer(image, metadata=MetadataCatalog.get(dataset_name))
if "panoptic_seg" in output_dict:
panoptic_seg, segments_info = output_dict["panoptic_seg"]
visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to("cpu"), segments_info
)
if "instances" in output_dict:
visualizer.draw_instance_predictions(output_dict["instances"].to("cpu"))
if "sem_seg" in output_dict:
visualizer.draw_sem_seg(
output_dict["sem_seg"].argmax(dim=0).to("cpu"),
area_threshold=0,
alpha=0.5,
)
return visualizer.get_output().get_image()
def visualize_dataset_dict(self, dataset_name, dataset_mapper, dataset_dict):
"""
Visualize the dataset_dict
"""
image = dataset_mapper._read_image(dataset_dict, "RGB")
visualizer = self.visualizer(image, metadata=MetadataCatalog.get(dataset_name))
visualizer.draw_dataset_dict(dataset_dict)
return visualizer.get_output().get_image()
class DataLoaderVisWrapper:
"""
Wrap the data loader to visualize its output via TensorBoardX at given frequency.
"""
def __init__(
self,
cfg,
tbx_writer,
data_loader,
visualizer: Optional[Type[VisualizerWrapper]] = None,
):
self.tbx_writer = tbx_writer
self.data_loader = data_loader
self._visualizer = visualizer(cfg) if visualizer else VisualizerWrapper(cfg)
self.log_frequency = cfg.TENSORBOARD.TRAIN_LOADER_VIS_WRITE_PERIOD
self.log_limit = cfg.TENSORBOARD.TRAIN_LOADER_VIS_MAX_IMAGES
self.batch_log_limit = cfg.TENSORBOARD.TRAIN_LOADER_VIS_MAX_BATCH_IMAGES
assert self.log_frequency >= 0
assert self.log_limit >= 0
assert self.batch_log_limit >= 0
self._remaining = self.log_limit
def __iter__(self):
for data in self.data_loader:
self._maybe_write_vis(data)
yield data
def _maybe_write_vis(self, data):
try:
storage = get_event_storage()
except AssertionError:
# wrapped data loader might be used outside EventStorage, don't visualize
# anything
return
if (
self.log_frequency == 0
or not storage.iter % self.log_frequency == 0
or self._remaining <= 0
):
return
length = min(len(data), min(self.batch_log_limit, self._remaining))
data = data[:length]
self._remaining -= length
for i, per_image in enumerate(data):
vis_image = self._visualizer.visualize_train_input(per_image)
tag = [f"train_loader_batch_{storage.iter}"]
if "dataset_name" in per_image:
tag += [per_image["dataset_name"]]
if "file_name" in per_image:
tag += [f"img_{i}", per_image["file_name"]]
if isinstance(vis_image, dict):
for k in vis_image:
self.tbx_writer._writer.add_image(
tag="/".join(tag + [k]),
img_tensor=vis_image[k],
global_step=storage.iter,
dataformats="HWC",
)
else:
self.tbx_writer._writer.add_image(
tag="/".join(tag),
img_tensor=vis_image,
global_step=storage.iter,
dataformats="HWC",
)
class VisualizationEvaluator(DatasetEvaluator):
"""
Visualize GT and prediction during evaluation. It doesn't calculate any
metrics, just uses evaluator's interface as hook.
"""
# NOTE: the evaluator will be created for every eval (during training and
# after training), so the images will be logged multiple times, use a global
# counter to differentiate them in TB.
_counter = 0
def __init__(
self,
cfg,
tbx_writer,
dataset_mapper,
dataset_name,
train_iter=None,
tag_postfix=None,
visualizer: Optional[Type[VisualizerWrapper]] = None,
):
self.tbx_writer = tbx_writer
self.dataset_mapper = dataset_mapper
self.dataset_name = dataset_name
self._visualizer = visualizer(cfg) if visualizer else VisualizerWrapper(cfg)
self.train_iter = train_iter or VisualizationEvaluator._counter
self.tag_postfix = tag_postfix or ""
self.log_limit = max(cfg.TENSORBOARD.TEST_VIS_MAX_IMAGES, 0)
self.log_frequency = cfg.TENSORBOARD.TEST_VIS_WRITE_PERIOD
self._metadata = None
self._dataset_dict = None
self._file_name_to_dataset_dict = None
if self.log_limit > 0:
self._initialize_dataset_dict(dataset_name)
VisualizationEvaluator._counter += 1
self.reset()
def _initialize_dataset_dict(self, dataset_name: str) -> None:
# Enable overriding defaults in case the dataset hasn't been registered.
self._metadata = MetadataCatalog.get(dataset_name)
# NOTE: Since there's no GT from test loader, we need to get GT from
# the dataset_dict, this assumes the test data loader uses the item from
# dataset_dict in the default way.
self._dataset_dict = DatasetCatalog.get(dataset_name)
self._file_name_to_dataset_dict = {
dic["file_name"]: dic for dic in self._dataset_dict
}
def reset(self):
self._iter = 0
self._log_remaining = self.log_limit
def process(self, inputs, outputs):
if (
self.log_frequency == 0
or self._iter % self.log_frequency != 0
or self._log_remaining <= 0
):
self._iter += 1
return
for input, output in zip(inputs, outputs):
file_name = input["file_name"]
dataset_dict = self._file_name_to_dataset_dict[file_name]
gt_img = self._visualizer.visualize_dataset_dict(
self.dataset_name, self.dataset_mapper, dataset_dict
)
pred_img = self._visualizer.visualize_test_output(
self.dataset_name, self.dataset_mapper, input, output
)
tag_base = f"{self.dataset_name}{self.tag_postfix}/eval_iter_{self._iter}/{file_name}"
self.tbx_writer._writer.add_image(
f"{tag_base}/GT",
gt_img,
self.train_iter,
dataformats="HWC",
)
if not isinstance(pred_img, dict):
pred_img = {"Pred": pred_img}
for img_type in pred_img.keys():
self.tbx_writer._writer.add_image(
f"{tag_base}/{img_type}",
pred_img[img_type],
self.train_iter,
dataformats="HWC",
)
self._log_remaining -= 1
self._iter += 1
def has_finished_process(self):
return True
| d2go-main | d2go/utils/visualization.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#!/usr/bin/python
import importlib
import os
from functools import wraps
from typing import Any, Callable, List, TypeVar
import detectron2.utils.comm as comm
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultTrainer
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
)
from detectron2.utils.events import TensorboardXWriter
from mobile_cv.common.misc.oss_utils import fb_overwritable
T = TypeVar("T")
FuncType = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
__all__ = [
"run_once",
"retryable",
"get_dir_path",
"TensorboardXWriter", # TODO: move to D2Go's vis utils if needed
"D2Trainer", # TODO: move to trainer folder
]
class MultipleFunctionCallError(Exception):
pass
@fb_overwritable()
def run_once(
raise_on_multiple: bool = False,
# pyre-fixme[34]: `Variable[T]` isn't present in the function's parameters.
) -> Callable[[Callable[..., T]], Callable[..., T]]:
"""
A decorator to wrap a function such that it only ever runs once
Useful, for example, with exit handlers that could be run via atexit or
via a signal handler. The decorator will cache the result of the first call
and return it on subsequent calls. If `raise_on_multiple` is set, any call
to the function after the first one will raise a
`MultipleFunctionCallError`.
"""
def decorator(func: Callable[..., T]) -> (Callable[..., T]):
signal: List[T] = []
@wraps(func)
def wrapper(*args, **kwargs) -> T:
if signal:
if raise_on_multiple:
raise MultipleFunctionCallError(
"Function %s was called multiple times" % func.__name__
)
return signal[0]
signal.append(func(*args, **kwargs))
return signal[0]
return wrapper
return decorator
@fb_overwritable()
class retryable(object):
"""Fake retryable function"""
def __init__(self, num_tries=1, sleep_time=0.1):
pass
def __call__(self, func: F) -> F:
return func
@fb_overwritable()
def get_dir_path(relative_path):
"""Return a path for a directory in this package, extracting if necessary
For an entire directory within the par file (zip, fastzip) or lpar
structure, this function will check to see if the contents are extracted;
extracting each file that has not been extracted. It returns the path of
a directory containing the expected contents, making sure permissions are
correct.
Returns a string path, throws exeption on error
"""
return os.path.dirname(importlib.import_module(relative_path).__file__)
class D2Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() >= comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(
dataset_name, evaluator_type
)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
| d2go-main | d2go/utils/helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
import re
import time
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.utils.file_io import PathManager
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
logger = logging.getLogger(__name__)
def fetch_checkpoints_till_final(checkpoint_dir):
"""
A generator that yields all checkpoint paths under the given directory, it'll
keep refreshing until model_final is found.
"""
MIN_SLEEP_INTERVAL = 1.0 # in seconds
MAX_SLEEP_INTERVAL = 60.0 # in seconds
sleep_interval = MIN_SLEEP_INTERVAL
finished_checkpoints = set()
def _add_and_log(path):
finished_checkpoints.add(path)
logger.info("Found checkpoint: {}".format(path))
return path
def _log_and_sleep(sleep_interval):
logger.info(
"Sleep {} seconds while waiting for model_final.pth".format(sleep_interval)
)
time.sleep(sleep_interval)
return min(sleep_interval * 2, MAX_SLEEP_INTERVAL)
def _get_lightning_checkpoints(path: str):
return [
os.path.join(path, x)
for x in PathManager.ls(path)
if x.endswith(ModelCheckpoint.FILE_EXTENSION)
and not x.startswith(ModelCheckpoint.CHECKPOINT_NAME_LAST)
]
while True:
if not PathManager.exists(checkpoint_dir):
sleep_interval = _log_and_sleep(sleep_interval)
continue
checkpoint_paths = DetectionCheckpointer(
None, save_dir=checkpoint_dir
).get_all_checkpoint_files()
checkpoint_paths = [
cpt_path
for cpt_path in checkpoint_paths
if os.path.basename(cpt_path).startswith("model")
]
checkpoint_paths.extend(_get_lightning_checkpoints(checkpoint_dir))
final_model_path = None
periodic_checkpoints = []
for path in sorted(checkpoint_paths):
if path.endswith("model_final.pth") or path.endswith("model_final.ckpt"):
final_model_path = path
continue
if path.endswith(ModelCheckpoint.FILE_EXTENSION):
# Lightning checkpoint
model_iter = int(
re.findall(
r"(?<=step=)\d+(?={})".format(ModelCheckpoint.FILE_EXTENSION),
path,
)[0]
)
else:
model_iter = int(re.findall(r"(?<=model_)\d+(?=\.pth)", path)[0])
periodic_checkpoints.append((path, model_iter))
periodic_checkpoints = [
pc for pc in periodic_checkpoints if pc[0] not in finished_checkpoints
]
periodic_checkpoints = sorted(periodic_checkpoints, key=lambda x: x[1])
for pc in periodic_checkpoints:
yield _add_and_log(pc[0])
sleep_interval = MIN_SLEEP_INTERVAL
if final_model_path is None:
sleep_interval = _log_and_sleep(sleep_interval)
else:
yield _add_and_log(final_model_path)
break
| d2go-main | d2go/utils/validation_monitor.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import Callable, TypeVar
from torch.distributed.elastic.multiprocessing.errors import (
_NOT_AVAILABLE,
ChildFailedError,
get_error_handler,
)
logger = logging.getLogger(__name__)
_RT = TypeVar("_RT")
def mast_error_handler(func: Callable[..., _RT]) -> Callable[..., _RT]:
def wrapper(*args, **kwargs) -> _RT:
logger.info("Starting main")
error_handler = get_error_handler()
logger.debug(f"Error handler is: {type(error_handler)=}, {error_handler=}")
error_handler.initialize()
logger.debug("Error handler has been initialized")
try:
logger.debug("Entered main for d2go")
return func(*args, **kwargs)
except ChildFailedError as e:
logger.info(f"Got a ChildFailedError: {e=}")
rank, failure = e.get_first_failure()
if failure.error_file != _NOT_AVAILABLE:
error_handler.dump_error_file(failure.error_file, failure.exitcode)
else:
logger.info(
(
f"local_rank {rank} FAILED with no error file."
f" Decorate your entrypoint fn with @record for traceback info."
f" See: https://pytorch.org/docs/stable/elastic/errors.html"
)
)
raise
except Exception as e:
logger.info(f"Caught a generic exception: {e=}")
error_handler.record_exception(e)
raise
return wrapper
def gather_mast_errors(func: Callable[..., _RT]) -> Callable[..., _RT]:
def wrapper(*args, **kwargs) -> _RT:
logger.info("Starting CLI application")
try:
return func(*args, **kwargs)
finally:
logging.info("Entering final reply file generation step")
import glob
import os
import shutil
torchx_reply_files = glob.glob("/tmp/torchx_*/**/*.json", recursive=True)
logger.info(
f"Found the following reply files on this host: {torchx_reply_files}"
)
first_reply_file = None
first_reply_file_st = float("Inf")
for f in torchx_reply_files:
if (mtime := os.stat(f).st_mtime) < first_reply_file_st:
first_reply_file = f
first_reply_file_st = mtime
if first_reply_file and os.environ.get("MAST_HPC_TASK_FAILURE_REPLY_FILE"):
logger.info(
f'Copying {first_reply_file=} to {os.environ["MAST_HPC_TASK_FAILURE_REPLY_FILE"]}'
)
shutil.copyfile(
first_reply_file, os.environ["MAST_HPC_TASK_FAILURE_REPLY_FILE"]
)
return wrapper
| d2go-main | d2go/utils/mast.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
def generate_test_input(height, width, is_train, num_classes, super_classes=None):
random_image = torch.rand(3, height, width).to(torch.float32)
ret = {"image": random_image}
if is_train:
mask_size = (
(height, width)
if super_classes is None
else (len(super_classes), height, width)
)
random_mask = torch.randint(low=0, high=num_classes, size=mask_size).to(
torch.int64
)
ret["sem_seg"] = random_mask
return ret
def validate_test_output(output, height, width, num_classes, super_classes=None):
sem_seg_per_image = output["sem_seg"]
if super_classes is None: # None MCS case
detect_c_out, detect_h_out, detect_w_out = sem_seg_per_image.size()
assert detect_c_out == num_classes, detect_c_out
assert detect_h_out == height, (detect_h_out, height)
assert detect_w_out == width, (detect_w_out, width)
else: # MCS case
assert isinstance(sem_seg_per_image, dict)
assert all(k in super_classes for k in sem_seg_per_image), (
sem_seg_per_image.keys(),
super_classes,
)
for class_name, mask in sem_seg_per_image.items():
assert isinstance(class_name, str)
detect_c_out, detect_h_out, detect_w_out = mask.size()
assert detect_c_out == num_classes, detect_c_out
assert detect_h_out == height, (detect_h_out, height)
assert detect_w_out == width, (detect_w_out, width)
| d2go-main | d2go/utils/testing/sem_seg_helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import copy
import shutil
import tempfile
import unittest
from typing import Optional
import d2go.data.transforms.box_utils as bu
import torch
from d2go.export.exporter import convert_and_export_predictor
from d2go.runner.default_runner import GeneralizedRCNNRunner
from d2go.utils.testing.data_loader_helper import (
create_detection_data_loader_on_toy_dataset,
)
from detectron2.structures import Boxes, Instances
from mobile_cv.predictor.api import create_predictor
from parameterized import parameterized
def _get_image_with_box(image_size, boxes: Optional[Boxes] = None):
"""Draw boxes on the image, one box per channel, use values 10, 20, ..."""
ret = torch.zeros((3, image_size[0], image_size[1]))
if boxes is None:
return ret
assert len(boxes) <= ret.shape[0]
for idx, box in enumerate(boxes):
x0, y0, x1, y1 = box.int().tolist()
ret[idx, y0:y1, x0:x1] = (idx + 1) * 10
return ret
def _get_boxes_from_image(image, scale_xy=None):
"""Extract boxes from image created by `_get_image_with_box()`"""
cur_img_int = ((image / 10.0 + 0.5).int().float() * 10.0).int()
values = torch.unique(cur_img_int)
gt_values = [x * 10 for x in range(len(values))]
assert set(values.tolist()) == set(gt_values)
boxes = []
for idx in range(cur_img_int.shape[0]):
val = torch.unique(cur_img_int[idx]).tolist()
val = max(val)
if val == 0:
continue
# mask = (cur_img_int[idx, :, :] == val).int()
mask = (cur_img_int[idx, :, :] > 0).int()
box_xywh = bu.get_box_from_mask(mask.numpy())
boxes.append(bu.to_boxes_from_xywh(box_xywh))
ret = Boxes.cat(boxes)
if scale_xy is not None:
ret.scale(*scale_xy)
return ret
def get_batched_inputs(
num_images,
image_size=(1920, 1080),
resize_size=(398, 224),
boxes: Optional[Boxes] = None,
):
"""Get batched inputs in the format from d2/d2go data mapper
Draw the boxes on the images if `boxes` is not None
"""
ret = []
for idx in range(num_images):
cur = {
"file_name": f"img_{idx}.jpg",
"image_id": idx,
"dataset_name": "test_dataset",
"height": image_size[0],
"width": image_size[1],
"image": _get_image_with_box(resize_size, boxes),
}
ret.append(cur)
return ret
def _get_keypoints_from_boxes(boxes: Boxes, num_keypoints: int):
"""Use box center as keypoints"""
centers = boxes.get_centers()
kpts = torch.cat((centers, torch.ones(centers.shape[0], 1)), dim=1)
kpts = kpts.repeat(1, num_keypoints).reshape(len(boxes), num_keypoints, 3)
return kpts
def _get_scale_xy(output_size_hw, instance_size_hw):
return (
output_size_hw[1] / instance_size_hw[1],
output_size_hw[0] / instance_size_hw[0],
)
def get_detected_instances_from_image(batched_inputs, scale_xy=None):
"""Get detected instances from batched_inputs, the results are in the same
format as GeneralizedRCNN.inference()
The images in the batched_inputs are created by `get_batched_inputs()` with
`boxes` provided.
"""
ret = []
for item in batched_inputs:
cur_img = item["image"]
img_hw = cur_img.shape[1:]
boxes = _get_boxes_from_image(cur_img, scale_xy=scale_xy)
num_boxes = len(boxes)
fields = {
"pred_boxes": boxes,
"scores": torch.Tensor([1.0] * num_boxes),
"pred_classes": torch.Tensor([0] * num_boxes).int(),
"pred_keypoints": _get_keypoints_from_boxes(boxes, 21),
"pred_keypoint_heatmaps": torch.ones([num_boxes, 21, 24, 24]),
}
ins = Instances(img_hw, **fields)
ret.append(ins)
return ret
def get_detected_instances(num_images, num_instances, resize_size=(392, 224)):
"""Create an detected instances for unit test"""
assert num_instances in [1, 2]
ret = []
for _idx in range(num_images):
fields = {
"pred_boxes": Boxes(torch.Tensor([[50, 40, 100, 80], [150, 60, 200, 120]])),
"scores": torch.Tensor([1.0, 1.0]),
"pred_classes": torch.Tensor([0, 0]).int(),
"pred_keypoints": torch.Tensor(
[70, 60, 1.5] * 21 + [180, 100, 2.0] * 21
).reshape(2, 21, 3),
"pred_keypoint_heatmaps": torch.ones([2, 21, 24, 24]),
}
ins = Instances(resize_size, **fields)[:num_instances]
ret.append(ins)
return ret
class MockRCNNInference(object):
"""Use to mock the GeneralizedRCNN.inference()"""
def __init__(self, image_size, resize_size):
self.image_size = image_size
self.resize_size = resize_size
@property
def device(self):
return torch.device("cpu")
def __call__(
self,
batched_inputs,
detected_instances=None,
do_postprocess: bool = True,
):
return self.inference(
batched_inputs,
detected_instances,
do_postprocess,
)
def inference(
self,
batched_inputs,
detected_instances=None,
do_postprocess: bool = True,
):
scale_xy = (
_get_scale_xy(self.image_size, self.resize_size) if do_postprocess else None
)
results = get_detected_instances_from_image(batched_inputs, scale_xy=scale_xy)
# when do_postprocess is True, the result instances is stored inside a dict
if do_postprocess:
results = [{"instances": r} for r in results]
return results
def _validate_outputs(inputs, outputs):
assert len(inputs) == len(outputs)
# TODO: figure out how to validate outputs
def get_quick_test_config_opts(
fixed_single_proposals=True,
small_pooler_resolution=True,
small_resize_resolution=True,
):
ret = []
if fixed_single_proposals:
epsilon = 1e-4
ret.extend(
[
"MODEL.RPN.POST_NMS_TOPK_TEST",
1,
"TEST.DETECTIONS_PER_IMAGE",
1,
"MODEL.PROPOSAL_GENERATOR.MIN_SIZE",
0,
"MODEL.RPN.NMS_THRESH",
1.0 + epsilon,
"MODEL.ROI_HEADS.NMS_THRESH_TEST",
1.0 + epsilon,
"MODEL.ROI_HEADS.SCORE_THRESH_TEST",
0.0 - epsilon,
]
)
if small_pooler_resolution:
ret.extend(
[
"MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION",
1,
"MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION",
1,
"MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION",
1,
]
)
if small_resize_resolution:
ret.extend(
[
"INPUT.MIN_SIZE_TRAIN",
(8,),
"INPUT.MAX_SIZE_TRAIN",
9,
"INPUT.MIN_SIZE_TEST",
10,
"INPUT.MAX_SIZE_TEST",
11,
]
)
return [str(x) for x in ret]
def get_export_test_name(testcase_func, param_num, param):
predictor_type, compare_match = param.args
assert isinstance(predictor_type, str)
assert isinstance(compare_match, bool)
return "{}_{}".format(
testcase_func.__name__, parameterized.to_safe_name(predictor_type)
)
class RCNNBaseTestCases:
@staticmethod
def expand_parameterized_test_export(*args, **kwargs):
if "name_func" not in kwargs:
kwargs["name_func"] = get_export_test_name
return parameterized.expand(*args, **kwargs)
class TemplateTestCase(unittest.TestCase): # TODO: maybe subclass from TestMetaArch
def setUp(self):
self.setup_test_dir()
assert hasattr(self, "test_dir")
self.setup_custom_test()
assert hasattr(self, "runner")
assert hasattr(self, "cfg")
self.force_apply_overwrite_opts()
self.test_model = self.runner.build_model(self.cfg, eval_only=True)
def setup_test_dir(self):
self.test_dir = tempfile.mkdtemp(prefix="test_export_")
self.addCleanup(shutil.rmtree, self.test_dir)
def _get_test_image_sizes_default(self, is_train):
# model should work for any size, so don't alway use power of 2 or multiple
# of size_divisibility for testing.
side_length = max(self.test_model.backbone.size_divisibility, 10)
# make it non-square to cover error caused by messing up width & height
h, w = side_length, side_length * 2
return h, w
def _get_test_image_size_no_resize(self, is_train):
# use cfg.INPUT to make sure data loader doesn't resize the image
if is_train:
assert len(self.cfg.INPUT.MAX_SIZE_TRAIN) == 1
h = self.cfg.INPUT.MIN_SIZE_TRAIN[0]
w = self.cfg.INPUT.MAX_SIZE_TRAIN
else:
h = self.cfg.INPUT.MIN_SIZE_TEST
w = self.cfg.INPUT.MAX_SIZE_TEST
return h, w
def _get_test_image_sizes(self, is_train):
"""override this method to use other image size strategy"""
return self._get_test_image_sizes_default(is_train)
def setup_custom_test(self):
"""
Override this when using different runner, using different base config file,
or setting specific config for certain test.
"""
self.runner = GeneralizedRCNNRunner()
self.cfg = self.runner.get_default_cfg()
# subclass can call: self.cfg.merge_from_file(...)
def force_apply_overwrite_opts(self):
"""
Recommend only overriding this for a group of tests, while indivisual test
should have its own `setup_custom_test`.
"""
# update config to make the model run fast
self.cfg.merge_from_list(get_quick_test_config_opts())
# forcing test on CPU
self.cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
@contextlib.contextmanager
def _create_data_loader(self, is_train):
"""
Creating the data loader used for the test case. Note that it's better
to use "fake" data for quick test and isolating I/O.
"""
image_height, image_width = self._get_test_image_sizes(is_train=False)
with create_detection_data_loader_on_toy_dataset(
self.cfg,
image_height,
image_width,
is_train=is_train,
runner=self.runner,
) as data_loader:
yield data_loader
def _test_export(self, predictor_type, compare_match=True):
with self._create_data_loader(is_train=False) as data_loader:
inputs = next(iter(data_loader))
# TODO: the export may change model it self, need to fix this
model_to_export = copy.deepcopy(self.test_model)
predictor_path = convert_and_export_predictor(
self.cfg,
model_to_export,
predictor_type,
self.test_dir,
data_loader,
)
predictor = create_predictor(predictor_path)
predictor_outputs = predictor(inputs)
_validate_outputs(inputs, predictor_outputs)
if compare_match:
with torch.no_grad():
pytorch_outputs = self.test_model(inputs)
from detectron2.utils.testing import assert_instances_allclose
assert_instances_allclose(
predictor_outputs[0]["instances"],
pytorch_outputs[0]["instances"],
size_as_tensor=True,
)
return predictor_path
# TODO: add test_train
def _test_inference(self):
with self._create_data_loader(is_train=False) as data_loader:
inputs = next(iter(data_loader))
with torch.no_grad():
outputs = self.test_model(inputs)
_validate_outputs(inputs, outputs)
| d2go-main | d2go/utils/testing/rcnn_helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from d2go.quantization.qconfig import set_backend_and_create_qconfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.utils.testing.data_loader_helper import create_local_dataset
from detectron2.structures import Boxes, ImageList, Instances
from torch.ao.quantization.quantize_fx import convert_fx, prepare_qat_fx
@META_ARCH_REGISTRY.register()
class DetMetaArchForTest(torch.nn.Module):
def __init__(self, cfg):
super().__init__()
self.conv = torch.nn.Conv2d(3, 4, kernel_size=3, stride=1, padding=1)
self.bn = torch.nn.BatchNorm2d(4)
self.relu = torch.nn.ReLU(inplace=True)
self.avgpool = torch.nn.AdaptiveAvgPool2d((1, 1))
# weights that will be updated in forward() during training, use to simulate
# weight udpates in optimization step
self.register_buffer("scale_weight", torch.Tensor([0.0]))
@property
def device(self):
return self.conv.weight.device
def forward(self, inputs):
if not self.training:
return self.inference(inputs)
images = [x["image"].to(self.device) for x in inputs]
images = ImageList.from_tensors(images, 1)
ret = self.conv(images.tensor)
ret = self.bn(ret)
ret = self.relu(ret)
ret = self.avgpool(ret)
# simulate weight updates
self.scale_weight.fill_(1.0)
return {"loss": ret.norm()}
def inference(self, inputs):
instance = Instances((10, 10))
instance.pred_boxes = Boxes(
torch.tensor([[2.5, 2.5, 7.5, 7.5]], device=self.device) * self.scale_weight
)
instance.scores = torch.tensor([0.9], device=self.device)
instance.pred_classes = torch.tensor([1], dtype=torch.int32, device=self.device)
ret = [{"instances": instance}]
return ret
def custom_prepare_fx(self, cfg, is_qat, example_input=None):
example_inputs = (torch.rand(1, 3, 3, 3),)
self.avgpool = prepare_qat_fx(
self.avgpool,
{"": set_backend_and_create_qconfig(cfg, is_train=self.training)},
example_inputs,
)
def convert_fx_callback(model):
model.avgpool = convert_fx(model.avgpool)
return model
return self, convert_fx_callback
def get_det_meta_arch_cfg(cfg, dataset_name, output_dir):
cfg.MODEL.DEVICE = "cpu"
cfg.MODEL.META_ARCHITECTURE = "DetMetaArchForTest"
cfg.DATASETS.TRAIN = (dataset_name,)
cfg.DATASETS.TEST = (dataset_name,)
cfg.INPUT.MIN_SIZE_TRAIN = (10,)
cfg.INPUT.MIN_SIZE_TEST = (10,)
cfg.SOLVER.MAX_ITER = 5
cfg.SOLVER.STEPS = [2]
cfg.SOLVER.WARMUP_ITERS = 1
cfg.SOLVER.CHECKPOINT_PERIOD = 1
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.REFERENCE_WORLD_SIZE = 0
cfg.OUTPUT_DIR = output_dir
return cfg
def create_detection_cfg(runner, output_dir):
ds_name = create_local_dataset(output_dir, 5, 10, 10)
cfg = runner.get_default_cfg()
return get_det_meta_arch_cfg(cfg, ds_name, output_dir)
| d2go-main | d2go/utils/testing/meta_arch_helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| d2go-main | d2go/utils/testing/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import itertools
import json
import math
import os
import uuid
from d2go.data.datasets import register_dataset_split
from d2go.runner import create_runner
from detectron2.data import DatasetCatalog, MetadataCatalog
from mobile_cv.common.misc.file_utils import make_temp_directory
from PIL import Image
IM_DIR = "image_directory"
ANN_FN = "annotation_file"
def create_toy_dataset(
image_generator, num_images, num_classes=-1, num_keypoints=0, is_rotated=False
):
"""given image_generator, create a dataset with toy annotations and catagories"""
categories = []
images = []
annotations = []
meta_data = {}
if num_classes == -1:
num_classes = num_images
for i in range(num_images):
image_generator.prepare_image(i)
image_dict = image_generator.get_image_dict(i)
width = image_dict["width"]
height = image_dict["height"]
images.append(image_dict)
if i < num_classes:
categories.append({"name": "class_{}".format(i), "id": i})
bbox = (
[width / 4, height / 4, width / 2, height / 2] # XYWH_ABS
if not is_rotated
else [width / 2, height / 2, width / 2, height / 2, 45] # cXcYWHO_ABS
)
keypoints = list(
itertools.chain.from_iterable(
[
math.cos(2 * math.pi * x / num_keypoints) * width / 4 + width / 2,
math.sin(2 * math.pi * x / num_keypoints) * height / 4 + height / 2,
1,
]
for x in range(num_keypoints)
)
)
no_pts = 10
segmentation = list(
itertools.chain.from_iterable(
[
math.cos(2 * math.pi * x / no_pts) * width / 4 + width / 2,
math.sin(2 * math.pi * x / no_pts) * height / 4 + height / 2,
]
for x in range(no_pts)
)
)
annotations.append(
{
"image_id": i,
"category_id": i % num_classes,
"id": i + 1,
"bbox": bbox,
"keypoints": keypoints,
"area": width * height,
"iscrowd": 0,
"ignore": 0,
"segmentation": [segmentation],
}
)
if num_keypoints > 0:
keypoint_names = [f"kp_{idx}" for idx in range(num_keypoints)]
meta_data.update({"keypoint_names": keypoint_names, "keypoint_flip_map": ()})
return (
{"categories": categories, "images": images, "annotations": annotations},
meta_data,
)
@contextlib.contextmanager
def _register_toy_dataset(
dataset_name, image_generator, num_images, num_classes=-1, num_keypoints=0
):
json_dataset, meta_data = create_toy_dataset(
image_generator,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
)
with make_temp_directory("detectron2go_tmp_dataset") as tmp_dir:
json_file = os.path.join(tmp_dir, "{}.json".format(dataset_name))
with open(json_file, "w") as f:
json.dump(json_dataset, f)
split_dict = {
IM_DIR: image_generator.get_image_dir(),
ANN_FN: json_file,
"meta_data": meta_data,
}
register_dataset_split(dataset_name, split_dict)
try:
yield
finally:
DatasetCatalog.remove(dataset_name)
MetadataCatalog.remove(dataset_name)
@contextlib.contextmanager
def register_toy_coco_dataset(
dataset_name, num_images=3, image_size=(5, 10), num_classes=-1, num_keypoints=0
):
width, height = image_size
with make_temp_directory("detectron2go_tmp_dataset") as dataset_dir:
image_dir = os.path.join(dataset_dir, "images")
os.makedirs(image_dir)
image_generator = LocalImageGenerator(image_dir, width=width, height=height)
with _register_toy_dataset(
dataset_name,
image_generator,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
):
yield
def create_local_dataset(
out_dir,
num_images,
image_width,
image_height,
num_classes=-1,
num_keypoints=0,
is_rotated=False,
):
dataset_name = "_test_ds_" + str(uuid.uuid4())
img_gen = LocalImageGenerator(out_dir, image_width, image_height)
json_dataset, meta_data = create_toy_dataset(
img_gen,
num_images=num_images,
num_classes=num_classes,
num_keypoints=num_keypoints,
)
json_file = os.path.join(out_dir, "{}.json".format(dataset_name))
with open(json_file, "w") as f:
json.dump(json_dataset, f)
split_dict = {
IM_DIR: img_gen.get_image_dir(),
ANN_FN: json_file,
"meta_data": meta_data,
}
if is_rotated:
split_dict["evaluator_type"] = "rotated_coco"
register_dataset_split(dataset_name, split_dict)
return dataset_name
class LocalImageGenerator:
def __init__(self, image_dir, width, height):
self._width = width
self._height = height
self._image_dir = image_dir
def get_image_dir(self):
return self._image_dir
def get_image_dict(self, i):
return {
"file_name": "{}.jpg".format(i),
"width": self._width,
"height": self._height,
"id": i,
}
def prepare_image(self, i):
image = Image.new("RGB", (self._width, self._height))
image.save(os.path.join(self._image_dir, self.get_image_dict(i)["file_name"]))
@contextlib.contextmanager
def create_detection_data_loader_on_toy_dataset(
cfg, height, width, is_train, runner=None
):
"""
Args:
cfg (CfgNode): the config used to create data loader, it can control things like
resizing, augmentation.
height, width (int): the height/width of the image files (not the resized image size)
is_train (bool): training or testing
"""
if runner is None:
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
# change dataset name to toy dataset
cfg.DATASETS.TRAIN = ["_toy_dataset_train_"]
cfg.DATASETS.TEST = ["_toy_dataset_test_"]
if is_train:
with register_toy_coco_dataset(
"_toy_dataset_train_", num_images=3, image_size=(width, height)
):
train_loader = runner.build_detection_train_loader(cfg)
yield train_loader
else:
with register_toy_coco_dataset(
"_toy_dataset_test_", num_images=3, image_size=(width, height)
):
test_loader = runner.build_detection_test_loader(
cfg, dataset_name="_toy_dataset_test_"
)
yield test_loader
| d2go-main | d2go/utils/testing/data_loader_helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pytorch_lightning as pl # type: ignore
from detectron2.utils.events import EventStorage
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
def get_lt_trainer(output_dir: str, cfg):
checkpoint_callback = ModelCheckpoint(dirpath=output_dir, save_last=True)
return pl.Trainer(
max_epochs=10**8,
max_steps=cfg.SOLVER.MAX_ITER,
val_check_interval=cfg.TEST.EVAL_PERIOD
if cfg.TEST.EVAL_PERIOD > 0
else cfg.SOLVER.MAX_ITER,
callbacks=[checkpoint_callback],
logger=False,
)
def lt_train(task, trainer):
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
def lt_test(task, trainer):
with EventStorage() as storage:
task.storage = storage
trainer.test(task)
return task.eval_res
| d2go-main | d2go/utils/testing/lightning_train_helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import importlib
import os
import socket
import uuid
from functools import wraps
from tempfile import TemporaryDirectory
from typing import Optional
import torch
from d2go.distributed import distributed_worker, DistributedParams
def get_resource_path(file: Optional[str] = None):
path_list = [
os.path.dirname(importlib.import_module("d2go.tests").__file__),
"resources",
]
if file is not None:
path_list.append(file)
return os.path.join(*path_list)
def skip_if_no_gpu(func):
"""Decorator that can be used to skip GPU tests on non-GPU machines."""
func.skip_if_no_gpu = True
@wraps(func)
def wrapper(*args, **kwargs):
if not torch.cuda.is_available():
return
if torch.cuda.device_count() <= 0:
return
return func(*args, **kwargs)
return wrapper
def enable_ddp_env(backend="gloo"):
def _enable_ddp_env(func):
@wraps(func)
def wrapper(*args, **kwargs):
def find_free_port() -> str:
s = socket.socket()
s.bind(("localhost", 0)) # Bind to a free port provided by the host.
return str(s.getsockname()[1])
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = find_free_port()
return distributed_worker(
main_func=func,
args=args,
kwargs=kwargs,
backend=backend,
init_method="file:///tmp/detectron2go_test_ddp_init_{}".format(
uuid.uuid4().hex
),
dist_params=DistributedParams(
local_rank=0,
machine_rank=0,
global_rank=0,
num_processes_per_machine=1,
world_size=1,
),
return_save_file=None, # don't save file
)
return wrapper
return _enable_ddp_env
def tempdir(func):
"""A decorator for creating a tempory directory that is cleaned up after function execution."""
@wraps(func)
def wrapper(self, *args, **kwargs):
with TemporaryDirectory() as temp:
return func(self, temp, *args, **kwargs)
return wrapper
| d2go-main | d2go/utils/testing/helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# pyre-unsafe
from typing import Optional
import torch
from pytorch_lightning import LightningModule
from torch.utils.data.dataset import Dataset
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class TestModule(LightningModule):
def __init__(self, epoch_min_loss_override: Optional[int] = None):
"""LightningModule for testing purposes
Args:
epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum
validation loss for testing purposes (zero based). If None this is ignored. Defaults to None.
"""
super().__init__()
self.layer = torch.nn.Linear(in_features=32, out_features=2)
self.another_layer = torch.nn.Linear(in_features=2, out_features=2)
self.epoch_min_loss_override = epoch_min_loss_override
def forward(self, x):
x = self.layer(x)
return self.another_layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def training_step(self, batch, batch_idx):
output = self.forward(batch)
loss = self.loss(batch, output)
return {"output": output.detach(), "loss": loss, "checkpoint_on": loss.detach()}
def validation_step(self, batch, batch_idx):
output = self.forward(batch)
loss = self.loss(batch, output)
return {"output": output.detach(), "loss": loss, "checkpoint_on": loss.detach()}
def test_step(self, batch, batch_idx):
output = self.forward(batch)
loss = self.loss(batch, output)
return {"output": output.detach(), "loss": loss}
def training_epoch_end(self, outputs) -> None:
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log("avg_loss", avg_loss)
def validation_epoch_end(self, outputs) -> None:
avg_val_loss = torch.stack(
[torch.randn(1, requires_grad=True) for _ in outputs]
).mean()
# For testing purposes allow a nominated epoch to have a low loss
if self.current_epoch == self.epoch_min_loss_override:
avg_val_loss -= 1e10
self.log("val_loss", avg_val_loss)
self.log("checkpoint_on", avg_val_loss)
def test_epoch_end(self, outputs) -> None:
avg_loss = torch.stack(
[torch.randn(1, requires_grad=True) for _ in outputs]
).mean()
self.log("val_loss", avg_loss)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(), lr=0.001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64))
| d2go-main | d2go/utils/testing/lightning_test_module.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
from typing import Optional
import pkg_resources
import torch
from d2go.runner import create_runner
from d2go.utils.launch_environment import MODEL_ZOO_STORAGE_PREFIX
from detectron2.checkpoint import DetectionCheckpointer
class _ModelZooUrls(object):
"""
Mapping from names to officially released D2Go pre-trained models.
"""
CONFIG_PATH_TO_URL_SUFFIX = {
"faster_rcnn_fbnetv3a_C4.yaml": "268421013/model_final.pth",
"faster_rcnn_fbnetv3a_dsmask_C4.yaml": "268412271/model_0499999.pth",
"faster_rcnn_fbnetv3g_fpn.yaml": "250356938/model_0374999.pth",
"mask_rcnn_fbnetv3a_C4.yaml": "268421013/model_final.pth",
"mask_rcnn_fbnetv3a_dsmask_C4.yaml": "268412271/model_0499999.pth",
"mask_rcnn_fbnetv3g_fpn.yaml": "287445123/model_0409999.pth",
"keypoint_rcnn_fbnetv3a_dsmask_C4.yaml": "250430934/model_0389999.pth",
}
def get_checkpoint_url(config_path):
"""
Returns the URL to the model trained using the given config
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: a URL to the model
"""
name = config_path.replace(".yaml", "")
if config_path in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX:
suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[config_path]
return MODEL_ZOO_STORAGE_PREFIX + suffix
raise RuntimeError("{} not available in Model Zoo!".format(name))
def get_config_file(config_path):
"""
Returns path to a builtin config file.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
Returns:
str: the real path to the config file.
"""
cfg_file = pkg_resources.resource_filename(
"d2go", os.path.join("configs", config_path)
)
if not os.path.exists(cfg_file):
raise RuntimeError("{} not available in Model Zoo!".format(config_path))
return cfg_file
def get_config(
config_path, trained: bool = False, runner="d2go.runner.GeneralizedRCNNRunner"
):
"""
Returns a config object for a model in model zoo.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights.
If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used
instead; this will typically (though not always) initialize a subset of weights using
an ImageNet pre-trained model, while randomly initializing the other weights.
Returns:
CfgNode: a config object
"""
cfg_file = get_config_file(config_path)
runner = create_runner(runner)
cfg = runner.get_default_cfg()
cfg.merge_from_file(cfg_file)
if trained:
cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
return cfg
def get(
config_path,
trained: bool = False,
device: Optional[str] = None,
runner="d2go.runner.GeneralizedRCNNRunner",
):
"""
Get a model specified by relative path under Detectron2's official ``configs/`` directory.
Args:
config_path (str): config file name relative to d2go's "configs/"
directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
trained (bool): see :func:`get_config`.
device (str or None): overwrite the device in config, if given.
Returns:
nn.Module: a d2go model. Will be in training mode.
Example:
::
from d2go import model_zoo
model = model_zoo.get("faster_rcnn_fbnetv3a_C4.yaml", trained=True)
"""
cfg = get_config(config_path, trained)
if device is not None:
cfg.MODEL.DEVICE = device
elif not torch.cuda.is_available():
cfg.MODEL.DEVICE = "cpu"
runner = create_runner(runner)
model = runner.build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
return model
| d2go-main | d2go/model_zoo/model_zoo.py |
d2go-main | d2go/model_zoo/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import List
import detectron2.utils.comm as comm
import numpy as np
import torch
from d2go.config import CfgNode as CN, temp_defrost
from detectron2.engine import hooks
from detectron2.layers import ShapeSpec
from detectron2.modeling import GeneralizedRCNN
from detectron2.modeling.anchor_generator import (
ANCHOR_GENERATOR_REGISTRY,
BufferList,
DefaultAnchorGenerator,
)
from detectron2.modeling.proposal_generator.rpn import RPN
from detectron2.structures.boxes import Boxes
logger = logging.getLogger(__name__)
def add_kmeans_anchors_cfg(_C):
_C.MODEL.KMEANS_ANCHORS = CN()
_C.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON = False
_C.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS = 0
_C.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG = 0
_C.MODEL.KMEANS_ANCHORS.DATASETS = ()
_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
_C.MODEL.KMEANS_ANCHORS.RNG_SEED = 3
return _C
def compute_kmeans_anchors_hook(runner, cfg):
"""
This function will create a before_train hook, it will:
1: create a train loader using provided KMEANS_ANCHORS.DATASETS.
2: collecting statistics of boxes using outputs from train loader, use up
to KMEANS_ANCHORS.NUM_TRAINING_IMG images.
3: compute K-means using KMEANS_ANCHORS.NUM_CLUSTERS clusters
4: update the buffers in anchor_generator.
"""
def before_train_callback(trainer):
if not cfg.MODEL.KMEANS_ANCHORS.KMEANS_ANCHORS_ON:
return
new_cfg = cfg.clone()
with temp_defrost(new_cfg):
new_cfg.DATASETS.TRAIN = cfg.MODEL.KMEANS_ANCHORS.DATASETS
data_loader = runner.build_detection_train_loader(new_cfg)
anchors = compute_kmeans_anchors(cfg, data_loader)
anchors = anchors.tolist()
assert isinstance(trainer.model, GeneralizedRCNN)
assert isinstance(trainer.model.proposal_generator, RPN)
anchor_generator = trainer.model.proposal_generator.anchor_generator
assert isinstance(anchor_generator, KMeansAnchorGenerator)
anchor_generator.update_cell_anchors(anchors)
return hooks.CallbackHook(before_train=before_train_callback)
@ANCHOR_GENERATOR_REGISTRY.register()
class KMeansAnchorGenerator(DefaultAnchorGenerator):
"""Generate anchors using pre-computed KMEANS_ANCHORS.COMPUTED_ANCHORS"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
torch.nn.Module.__init__(self)
self.strides = [x.stride for x in input_shape]
self.offset = cfg.MODEL.ANCHOR_GENERATOR.OFFSET
assert 0.0 <= self.offset < 1.0, self.offset
# kmeans anchors
num_features = len(cfg.MODEL.RPN.IN_FEATURES)
assert num_features == 1, "Doesn't support multiple feature map"
# NOTE: KMEANS anchors are only computed at training time, when initialized,
# set anchors to correct shape but invalid value as place holder.
computed_anchors = [[float("Inf")] * 4] * cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS
cell_anchors = [torch.Tensor(computed_anchors)]
self.cell_anchors = BufferList(cell_anchors)
def update_cell_anchors(self, computed_anchors):
assert len(self.cell_anchors) == 1
for buf in self.cell_anchors.buffers():
assert len(buf) == len(computed_anchors)
buf.data = torch.Tensor(computed_anchors).to(buf.device)
logger.info("Updated cell anchors")
def forward(self, *args, **kwargs):
for base_anchors in self.cell_anchors:
assert torch.isfinite(base_anchors).all(), (
"The anchors are not initialized yet, please providing COMPUTED_ANCHORS"
" when creating the model and/or loading the valid weights."
)
return super().forward(*args, **kwargs)
def collect_boxes_size_stats(data_loader, max_num_imgs, _legacy_plus_one=False):
logger.info(
"Collecting size of boxes, loading up to {} images from data loader ...".format(
max_num_imgs
)
)
# data_loader might be infinite length, thus can't loop all images, using
# max_num_imgs == 0 stands for 0 images instead of whole dataset
assert max_num_imgs >= 0
box_sizes = []
remaining_num_imgs = max_num_imgs
total_batches = 0
for i, batched_inputs in enumerate(data_loader):
total_batches += len(batched_inputs)
batch_size = min(remaining_num_imgs, len(batched_inputs))
batched_inputs = batched_inputs[:batch_size]
for x in batched_inputs:
boxes = x["instances"].gt_boxes # xyxy
assert isinstance(boxes, Boxes)
for t in boxes.tensor:
box_sizes += [[t[2] - t[0], t[3] - t[1]]]
# NOTE: previous implementation didn't apply +1, thus to match
# previous (incorrect) results we have to minus the im_scale
if _legacy_plus_one: # only for matching old tests
im_scale = x["image"].shape[1] / x["height"] # image is chw
box_sizes[-1][0] -= im_scale
box_sizes[-1][1] -= im_scale
estimated_iters = max_num_imgs / total_batches * (i + 1)
remaining_num_imgs -= batch_size
if i % max(1, int(estimated_iters / 20)) == 0:
# log 20 times at most
percentage = 100.0 * i / estimated_iters
logger.info(
"Processed batch {} ({:.2f}%) from data_loader, got {} boxes,"
" remaining number of images: {}/{}".format(
i, percentage, len(box_sizes), remaining_num_imgs, max_num_imgs
)
)
if remaining_num_imgs <= 0:
assert remaining_num_imgs == 0
break
box_sizes = np.array(box_sizes)
logger.info(
"Collected {} boxes from {} images".format(len(box_sizes), max_num_imgs)
)
return box_sizes
def compute_kmeans_anchors(
cfg, data_loader, sort_by_area=True, _stride=0, _legacy_plus_one=False
):
assert (
cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG > 0
), "Please provide positive MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG"
num_training_img = cfg.MODEL.KMEANS_ANCHORS.NUM_TRAINING_IMG
div_i, mod_i = divmod(num_training_img, comm.get_world_size())
num_training_img_i = div_i + (comm.get_rank() < mod_i)
box_sizes_i = collect_boxes_size_stats(
data_loader,
num_training_img_i,
_legacy_plus_one=_legacy_plus_one,
)
all_box_sizes = comm.all_gather(box_sizes_i)
box_sizes = np.concatenate(all_box_sizes)
logger.info("Collected {} boxes from all gpus".format(len(box_sizes)))
assert (
cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS > 0
), "Please provide positive MODEL.KMEANS_ANCHORS.NUM_CLUSTERS"
from sklearn.cluster import KMeans # delayed import
default_anchors = (
KMeans(
n_clusters=cfg.MODEL.KMEANS_ANCHORS.NUM_CLUSTERS,
random_state=cfg.MODEL.KMEANS_ANCHORS.RNG_SEED,
)
.fit(box_sizes)
.cluster_centers_
)
anchors = []
for anchor in default_anchors:
w, h = anchor
# center anchor boxes at (stride/2,stride/2)
new_anchors = np.hstack(
(
_stride / 2 - 0.5 * w,
_stride / 2 - 0.5 * h,
_stride / 2 + 0.5 * w,
_stride / 2 + 0.5 * h,
)
)
anchors.append(new_anchors)
anchors = np.array(anchors)
# sort anchors by area
areas = (anchors[:, 2] - anchors[:, 0]) * (anchors[:, 3] - anchors[:, 1])
sqrt_areas = np.sqrt(areas)
if sort_by_area:
indices = np.argsort(sqrt_areas)
anchors = anchors[indices]
sqrt_areas = sqrt_areas[indices].tolist()
display_str = "\n".join(
[
s + "\t sqrt area: {:.2f}".format(a)
for s, a in zip(str(anchors).split("\n"), sqrt_areas)
]
)
logger.info(
"Compuated kmeans anchors (sorted by area: {}):\n{}".format(
sort_by_area, display_str
)
)
return anchors
| d2go-main | d2go/modeling/kmeans_anchors.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from abc import abstractmethod
from typing import List, Tuple
import torch
from d2go.registry.builtin import MODELING_HOOK_REGISTRY
class ModelingHook(object):
"""Modeling hooks provide a way to modify the model during the model building
process. It is simple but allows users to modify the model by creating wrapper,
override member functions, adding additional components, and loss etc.. It
could be used to implement features such as QAT, model transformation for training,
distillation/semi-supervised learning, and customization for loading pre-trained
weights.
"""
def __init__(self, cfg):
self.cfg = cfg
@abstractmethod
def apply(self, model: torch.nn.Module) -> torch.nn.Module:
"""This function will called during the model building process to modify
the behavior of the input model.
The created model will be
model == create meta arch -> model_hook_1.apply(model) ->
model_hook_2.apply(model) -> ...
"""
pass
@abstractmethod
def unapply(self, model: torch.nn.Module) -> torch.nn.Module:
"""This function will be called when the users called model.unapply_modeling_hooks()
after training. The main use case of the function is to remove the changes
applied to the model in `apply`. The hooks will be called in reverse order
as follow:
model.unapply_modeling_hooks() == model_hook_N.unapply(model) ->
model_hook_N-1.unapply(model) -> ... -> model_hook_1.unapply(model)
"""
pass
def _build_modeling_hooks(cfg, hook_names: List[str]) -> List[ModelingHook]:
"""Build the hooks from cfg"""
ret = [MODELING_HOOK_REGISTRY.get(name)(cfg) for name in hook_names]
return ret
def _unapply_modeling_hook(
model: torch.nn.Module, hooks: List[ModelingHook]
) -> torch.nn.Module:
"""Call unapply on the hooks in reversed order"""
for hook in reversed(hooks):
model = hook.unapply(model)
return model
def _apply_modeling_hooks(
model: torch.nn.Module, hooks: List[ModelingHook]
) -> torch.nn.Module:
"""Apply hooks on the model, users could call model.unapply_modeling_hooks()
to return the model that removes all the hooks
"""
if len(hooks) == 0:
return model
for hook in hooks:
model = hook.apply(model)
assert not hasattr(model, "_modeling_hooks")
model._modeling_hooks = hooks
def _unapply_modeling_hooks(self):
assert hasattr(self, "_modeling_hooks")
model = _unapply_modeling_hook(self, self._modeling_hooks)
return model
# add a function that could be used to unapply the modeling hooks
assert not hasattr(model, "unapply_modeling_hooks")
model.unapply_modeling_hooks = _unapply_modeling_hooks.__get__(model)
return model
def build_and_apply_modeling_hooks(
model: torch.nn.Module, cfg, hook_names: List[str]
) -> Tuple[torch.nn.Module, List[ModelingHook]]:
"""Build modeling hooks from cfg and apply hooks on the model. Users could
call model.unapply_modeling_hooks() to return the model that removes all
the hooks.
"""
hooks = _build_modeling_hooks(cfg, hook_names)
model = _apply_modeling_hooks(model, hooks)
return model, hooks
| d2go-main | d2go/modeling/modeling_hook.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# misc.py
# modules that are used in different places but are not a specific type (e.g., backbone)
from typing import Any, Callable, Optional
import torch
import torch.nn as nn
class SplitAndConcat(nn.Module):
"""Split the data from split_dim and concatenate in concat_dim.
@param split_dim from which axis the data will be chunk
@param concat_dim to which axis the data will be concatenated
@param chunk size of the data to be chunk/concatenated
"""
def __init__(self, split_dim: int = 1, concat_dim: int = 0, chunk: int = 2):
super(SplitAndConcat, self).__init__()
self.split_dim = split_dim
self.concat_dim = concat_dim
self.chunk = chunk
def forward(self, x):
x = torch.chunk(x, self.chunk, dim=self.split_dim)
x = torch.cat(x, dim=self.concat_dim)
return x
def extra_repr(self):
return (
f"split_dim={self.split_dim}, concat_dim={self.concat_dim}, "
f"chunk={self.chunk}"
)
class AddCoordChannels(nn.Module):
"""Appends coordinate location values to the channel dimension.
@param with_r include radial distance from centroid as additional channel (default: False)
"""
def __init__(self, with_r: bool = False) -> None:
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
batch_size_shape, channel_in_shape, dim_y, dim_x = input_tensor.shape
device = input_tensor.device
xx_ones = torch.ones([1, 1, 1, dim_x], dtype=torch.int32)
yy_ones = torch.ones([1, 1, 1, dim_y], dtype=torch.int32)
xx_range = torch.arange(dim_y, dtype=torch.int32)
yy_range = torch.arange(dim_x, dtype=torch.int32)
xx_range = xx_range[None, None, :, None]
yy_range = yy_range[None, None, :, None]
xx_channel = torch.matmul(xx_range, xx_ones)
yy_channel = torch.matmul(yy_range, yy_ones)
# transpose y
yy_channel = yy_channel.permute(0, 1, 3, 2)
xx_channel = xx_channel.float() / (dim_y - 1)
yy_channel = yy_channel.float() / (dim_x - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size_shape, 1, 1, 1)
yy_channel = yy_channel.repeat(batch_size_shape, 1, 1, 1)
out = torch.cat(
[input_tensor, xx_channel.to(device), yy_channel.to(device)], dim=1
)
if self.with_r:
rr = torch.sqrt(
torch.pow(xx_channel - 0.5, 2) + torch.pow(yy_channel - 0.5, 2)
)
out = torch.cat([out, rr], dim=1)
return out
def inplace_delegate(
self,
api_name: str,
sub_module_name: str,
setter_fn: Optional[Callable],
*args,
**kwargs,
) -> Any:
"""Helper function to delegate API calls to its submodule"""
sub_module = getattr(self, sub_module_name)
api_name = f"delegate_{api_name}"
if hasattr(sub_module, api_name):
func = getattr(sub_module, api_name)
orig_ret = func(*args, **kwargs)
if setter_fn is None:
# Assume the return of `func` will replace the submodule
setattr(self, sub_module_name, orig_ret)
return self
else:
return setter_fn(self, sub_module_name, orig_ret)
else:
raise RuntimeError(
f"It seems the {sub_module_name} doesn't implement {api_name},"
" quantization might fail."
)
| d2go-main | d2go/modeling/misc.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, List
import numpy as np
import torch
from d2go.config import CfgNode as CN
from d2go.data.dataset_mappers.build import D2GO_DATA_MAPPER_REGISTRY
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from detectron2.layers import cat
from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.utils.registry import Registry
from mobile_cv.torch.utils_toffee.alias import alias
from torch import nn
from torch.nn import functional as F
logger = logging.getLogger(__name__)
SUBCLASS_FETCHER_REGISTRY = Registry("SUBCLASS_FETCHER")
def add_subclass_configs(cfg):
_C = cfg
_C.MODEL.SUBCLASS = CN()
_C.MODEL.SUBCLASS.SUBCLASS_ON = False
_C.MODEL.SUBCLASS.NUM_SUBCLASSES = 0 # must be set
_C.MODEL.SUBCLASS.NUM_LAYERS = 1
_C.MODEL.SUBCLASS.SUBCLASS_ID_FETCHER = "SubclassFetcher" # ABC, must be set
_C.MODEL.SUBCLASS.SUBCLASS_MAPPING = (
[]
) # subclass mapping from model output to annotation
class SubclassFetcher(ABC):
"""Fetcher class to read subclass id annotations from dataset and prepare for train/eval.
Subclass this and register with `@SUBCLASS_FETCHER_REGISTRY.register()` decorator
to use with custom projects.
"""
def __init__(self, cfg):
raise NotImplementedError()
@property
@abstractmethod
def subclass_names(self) -> List[str]:
"""Overwrite this member with any new mappings' subclass names, which
may be useful for specific evaluation purposes.
len(self.subclass_names) should be equal to the expected number
of subclass head outputs (cfg.MODEL.SUBCLASS.NUM_SUBCLASSES + 1).
"""
pass
def remap(self, subclass_id: int) -> int:
"""Map subclass ids read from dataset to new label id"""
return subclass_id
def fetch_subclass_ids(self, dataset_dict: Dict[str, Any]) -> List[int]:
"""Get all the subclass_ids in a dataset dict"""
extras_list = [anno.get("extras") for anno in dataset_dict["annotations"]]
subclass_ids = [extras["subclass_id"] for extras in extras_list]
return subclass_ids
@D2GO_DATA_MAPPER_REGISTRY.register()
class SubclassDatasetMapper(D2GoDatasetMapper):
"""
Wrap any dataset mapper, encode gt_subclasses to the instances.
"""
def __init__(self, cfg, is_train, tfm_gens=None, subclass_fetcher=None):
super().__init__(cfg, is_train=is_train, tfm_gens=tfm_gens)
if subclass_fetcher is None:
fetcher_name = cfg.MODEL.SUBCLASS.SUBCLASS_ID_FETCHER
self.subclass_fetcher = SUBCLASS_FETCHER_REGISTRY.get(fetcher_name)(cfg)
logger.info(
f"Initialized {self.__class__.__name__} with "
f"subclass fetcher '{self.subclass_fetcher.__class__.__name__}'"
)
else:
assert isinstance(subclass_fetcher, SubclassFetcher), subclass_fetcher
self.subclass_fetcher = subclass_fetcher
logger.info(f"Set subclass fetcher to {self.subclass_fetcher}")
# NOTE: field doesn't exist when loading a (old) caffe2 model.
# self.subclass_on = cfg.MODEL.SUBCLASS.SUBCLASS_ON
self.subclass_on = True
def _original_call(self, dataset_dict):
"""
Map the dataset dict with D2GoDatasetMapper, then augment with subclass gt tensors.
"""
# Transform removes key 'annotations' from the dataset dict
mapped_dataset_dict = super()._original_call(dataset_dict)
if self.is_train and self.subclass_on:
subclass_ids = self.subclass_fetcher.fetch_subclass_ids(dataset_dict)
subclasses = torch.tensor(subclass_ids, dtype=torch.int64)
mapped_dataset_dict["instances"].gt_subclasses = subclasses
return mapped_dataset_dict
def build_subclass_head(cfg, in_chann, out_chann):
# fully connected layers: n-1 in_chann x in_chann layers, and 1 in_chann x out_chann layer
layers = [
nn.Linear(in_chann, in_chann) for _ in range(cfg.MODEL.SUBCLASS.NUM_LAYERS - 1)
]
layers.append(nn.Linear(in_chann, out_chann))
return nn.Sequential(*layers)
@ROI_HEADS_REGISTRY.register()
class StandardROIHeadsWithSubClass(StandardROIHeads):
"""
A Standard ROIHeads which contains an addition of subclass head.
"""
def __init__(self, cfg, input_shape):
super().__init__(cfg, input_shape)
self.subclass_on = cfg.MODEL.SUBCLASS.SUBCLASS_ON
if not self.subclass_on:
return
self.num_subclasses = cfg.MODEL.SUBCLASS.NUM_SUBCLASSES
self.subclass_head = build_subclass_head(
cfg, self.box_head.output_shape.channels, self.num_subclasses + 1
)
for layer in self.subclass_head:
nn.init.normal_(layer.weight, std=0.01)
nn.init.constant_(layer.bias, 0.0)
def forward(self, images, features, proposals, targets=None):
"""
Same as StandardROIHeads.forward but add logic for subclass.
"""
if not self.subclass_on:
return super().forward(images, features, proposals, targets)
# --- start copy -------------------------------------------------------
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
# NOTE: `has_gt` = False for negatives and we must manually register `gt_subclasses`,
# because custom gt_* fields will not be automatically registered in sampled proposals.
for pp_per_im in proposals:
if not pp_per_im.has("gt_subclasses"):
background_subcls_idx = 0
pp_per_im.gt_subclasses = torch.cuda.LongTensor(
len(pp_per_im)
).fill_(background_subcls_idx)
del targets
features_list = [features[f] for f in self.in_features]
box_features = self.box_pooler(
features_list, [x.proposal_boxes for x in proposals]
)
box_features = self.box_head(box_features)
predictions = self.box_predictor(box_features)
# --- end copy ---------------------------------------------------------
# NOTE: don't delete box_features, keep it temporarily
# del box_features
box_features = box_features.view(
box_features.shape[0], np.prod(box_features.shape[1:])
)
pred_subclass_logits = self.subclass_head(box_features)
if self.training:
losses = self.box_predictor.losses(predictions, proposals)
# During training the proposals used by the box head are
# used by the mask, keypoint (and densepose) heads.
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
# subclass head
gt_subclasses = cat([p.gt_subclasses for p in proposals], dim=0)
loss_subclass = F.cross_entropy(
pred_subclass_logits, gt_subclasses, reduction="mean"
)
losses.update({"loss_subclass": loss_subclass})
return proposals, losses
else:
pred_instances, kept_indices = self.box_predictor.inference(
predictions, proposals
)
# During inference cascaded prediction is used: the mask and keypoints
# heads are only applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
# subclass head
probs = F.softmax(pred_subclass_logits, dim=-1)
for pred_instances_i, kept_indices_i in zip(pred_instances, kept_indices):
pred_instances_i.pred_subclass_prob = torch.index_select(
probs,
dim=0,
index=kept_indices_i.to(torch.int64),
)
if torch.onnx.is_in_onnx_export():
assert len(pred_instances) == 1
pred_instances[0].pred_subclass_prob = alias(
pred_instances[0].pred_subclass_prob, "subclass_prob_nms"
)
return pred_instances, {}
| d2go-main | d2go/modeling/subclass.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import itertools
import logging
from contextlib import contextmanager
from typing import Iterator, List
import torch
from detectron2.engine.train_loop import HookBase
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
_CHECKPOINT_PREFIX,
)
logger = logging.getLogger(__name__)
class EMAState(object):
def __init__(self, include_frozen=True, include_buffer=True):
self.include_frozen = include_frozen
self.include_buffer = include_buffer
self.state = {}
# HACK: This hack is needed to strip checkpoint wrapper prefix from fqns so it doesn't affect loading.
# TODO: Remove this hack by rewriting EMAState to use model.state_dict()
self.prefix_to_remove = [_CHECKPOINT_PREFIX]
@classmethod
def FromModel(cls, model: torch.nn.Module, device: str = "", **kwargs):
ret = cls(**kwargs)
ret.save_from(model, device)
return ret
def save_from(self, model: torch.nn.Module, device: str = ""):
"""Save model state from `model` to this object"""
for name, val in self.get_model_state_iterator(model):
val = val.detach().clone()
self.state[name] = val.to(device) if device else val
def apply_to(self, model: torch.nn.Module):
"""Apply state to `model` from this object"""
with torch.no_grad():
for name, val in self.get_model_state_iterator(model):
assert (
name in self.state
), f"Name {name} not existed, available names {self.state.keys()}"
val.copy_(self.state[name])
@contextmanager
def apply_and_restore(self, model):
old_state = EMAState.FromModel(model, self.device)
self.apply_to(model)
yield old_state
old_state.apply_to(model)
def get_ema_model(self, model):
ret = copy.deepcopy(model)
self.apply_to(ret)
return ret
@property
def device(self):
if not self.has_inited():
return None
return next(iter(self.state.values())).device
def to(self, device):
for name in self.state:
self.state[name] = self.state[name].to(device)
return self
def has_inited(self):
return self.state
def clear(self):
self.state.clear()
return self
def _get_model_parameter_iterator(self, model):
"""
Return iterator for model parameters. Remove frozen parameters if needed.
"""
for name, params in model.named_parameters():
if params.requires_grad or self.include_frozen:
yield name, params
def get_model_state_iterator(self, model):
param_iter = self._get_model_parameter_iterator(model)
if self.include_buffer:
param_iter = itertools.chain(param_iter, model.named_buffers())
return _remove_prefix(param_iter, self.prefix_to_remove)
def state_dict(self):
return self.state
def load_state_dict(self, state_dict, strict: bool = True):
self.clear()
for x, y in state_dict.items():
self.state[x] = y
return torch.nn.modules.module._IncompatibleKeys(
missing_keys=[], unexpected_keys=[]
)
def __repr__(self):
ret = f"EMAState(state=[{','.join(self.state.keys())}])"
return ret
class EMAUpdater(object):
"""Model Exponential Moving Average
Keep a moving average of everything in the model state_dict (parameters and
buffers). This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
Note: It's very important to set EMA for ALL network parameters (instead of
parameters that require gradient), including batch-norm moving average mean
and variance. This leads to significant improvement in accuracy.
For example, for EfficientNetB3, with default setting (no mixup, lr exponential
decay) without bn_sync, the EMA accuracy with EMA on params that requires
gradient is 79.87%, while the corresponding accuracy with EMA on all params
is 80.61%.
Also, bn sync should be switched on for EMA.
"""
def __init__(
self,
state: EMAState,
decay: float = 0.999,
device: str = "",
use_lerp: bool = False,
decay_warm_up_factor: int = -1,
):
self.decay = decay
self.device = device
self.state = state
self.use_lerp = use_lerp
self.debug_lerp = False
self._num_updates: int = -1
self.decay_warm_up_factor = decay_warm_up_factor
if self.decay_warm_up_factor >= 0:
self._num_updates = 0
def init_state(self, model):
self.state.clear()
self.state.save_from(model, self.device)
def update(self, model):
# compute decay
decay = self.decay
if self._num_updates >= 0:
self._num_updates += 1
decay = min(
self.decay,
(1 + self._num_updates)
/ (self.decay_warm_up_factor + self._num_updates),
)
# update moving average
with torch.no_grad():
ema_param_list = []
param_list = []
for name, val in self.state.get_model_state_iterator(model):
ema_val = self.state.state[name]
if self.device:
val = val.to(self.device)
if val.dtype in [torch.float32, torch.float16]:
ema_param_list.append(ema_val)
param_list.append(val)
else:
ema_val.copy_(ema_val * decay + val * (1.0 - decay))
self._ema_avg(ema_param_list, param_list, decay)
def _ema_avg(
self,
averaged_model_parameters: List[torch.Tensor],
model_parameters: List[torch.Tensor],
decay: float,
) -> None:
"""
Function to perform exponential moving average:
x_avg = alpha * x_avg + (1-alpha)* x_t
"""
if self.use_lerp:
if self.debug_lerp:
orig_averaged_model_parameters = torch._foreach_mul(
averaged_model_parameters, decay
)
torch._foreach_add_(
orig_averaged_model_parameters, model_parameters, alpha=1 - decay
)
torch._foreach_lerp_(
averaged_model_parameters, model_parameters, 1.0 - decay
)
if self.debug_lerp:
for (orig_val, lerp_val) in zip(
orig_averaged_model_parameters, averaged_model_parameters
):
assert torch.allclose(orig_val, lerp_val, rtol=1e-4, atol=1e-3)
else:
torch._foreach_mul_(averaged_model_parameters, decay)
torch._foreach_add_(
averaged_model_parameters, model_parameters, alpha=1 - decay
)
def add_model_ema_configs(_C):
_C.MODEL_EMA = type(_C)()
_C.MODEL_EMA.ENABLED = False
_C.MODEL_EMA.DECAY = 0.999
# Whether to include frozen parameters in EMA
_C.MODEL_EMA.INCLUDE_FROZEN = True
# Whether to include model buffers in EMA
_C.MODEL_EMA.INCLUDE_BUFFER = True
# use the same as MODEL.DEVICE when empty
_C.MODEL_EMA.DEVICE = ""
# When True, loading the ema weight to the model when eval_only=True in build_model()
_C.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = False
# Whether to use LERP to compute EMA
_C.MODEL_EMA.USE_LERP = False
# Whether to put EMA to the backward pass
_C.MODEL_EMA.AFTER_BACKWARD = False
# Whether to warmup the EMA update process
_C.MODEL_EMA.DECAY_WARM_UP_FACTOR = -1
def _remove_ddp(model):
from torch.nn.parallel import DistributedDataParallel
if isinstance(model, DistributedDataParallel):
return model.module
return model
def _remove_prefix(named_iterator: Iterator, prefix_to_remove: List[str]) -> Iterator:
"""
Remove a list of prefix from a named_module iterator
"""
for name, params in named_iterator:
for prefix in prefix_to_remove:
name = name.replace(prefix, "")
yield name, params
def may_build_model_ema(cfg, model):
if not cfg.MODEL_EMA.ENABLED:
return
model = _remove_ddp(model)
assert not hasattr(
model, "ema_state"
), "Name `ema_state` is reserved for model ema."
model.ema_state = EMAState(
include_frozen=cfg.MODEL_EMA.INCLUDE_FROZEN,
include_buffer=cfg.MODEL_EMA.INCLUDE_BUFFER,
)
logger.info("Using Model EMA.")
def may_get_ema_checkpointer(cfg, model):
if not cfg.MODEL_EMA.ENABLED:
return {}
model = _remove_ddp(model)
return {"ema_state": model.ema_state}
def get_model_ema_state(model):
"""Return the ema state stored in `model`"""
model = _remove_ddp(model)
assert hasattr(model, "ema_state")
ema = model.ema_state
return ema
def apply_model_ema(model, state=None, save_current=False):
"""Apply ema stored in `model` to model and returns a function to restore
the weights are applied
"""
model = _remove_ddp(model)
if state is None:
state = get_model_ema_state(model)
if save_current:
# save current model state
old_state = EMAState.FromModel(model, state.device)
state.apply_to(model)
if save_current:
return old_state
return None
@contextmanager
def apply_model_ema_and_restore(model, state=None):
"""Apply ema stored in `model` to model and returns a function to restore
the weights are applied
"""
model = _remove_ddp(model)
if state is None:
state = get_model_ema_state(model)
old_state = EMAState.FromModel(model, state.device)
state.apply_to(model)
yield old_state
old_state.apply_to(model)
class EMAHook(HookBase):
def __init__(self, cfg, model):
model = _remove_ddp(model)
assert cfg.MODEL_EMA.ENABLED
assert hasattr(
model, "ema_state"
), "Call `may_build_model_ema` first to initilaize the model ema"
self.model = model
self.ema = self.model.ema_state
self.device = cfg.MODEL_EMA.DEVICE or cfg.MODEL.DEVICE
self.is_after_backward = cfg.MODEL_EMA.AFTER_BACKWARD
self.ema_updater = EMAUpdater(
self.model.ema_state,
decay=cfg.MODEL_EMA.DECAY,
device=self.device,
use_lerp=cfg.MODEL_EMA.USE_LERP,
decay_warm_up_factor=cfg.MODEL_EMA.DECAY_WARM_UP_FACTOR,
)
def before_train(self):
if self.ema.has_inited():
self.ema.to(self.device)
else:
self.ema_updater.init_state(self.model)
def after_train(self):
pass
def before_step(self):
pass
def after_backward(self):
if not self.is_after_backward:
return
self._update()
def after_step(self):
if self.is_after_backward:
return
self._update()
def _update(self):
if not self.model.train:
return
self.ema_updater.update(self.model)
| d2go-main | d2go/modeling/ema.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.modeling import ( # noqa
backbone as _backbone,
meta_arch as _meta_arch,
modeldef as _modeldef,
)
| d2go-main | d2go/modeling/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass
from typing import List
import torch
import torch.nn as nn
from d2go.config import CfgNode
from d2go.modeling import modeling_hook as mh
from d2go.registry.builtin import META_ARCH_REGISTRY
from d2go.utils.misc import _log_api_usage
from detectron2.modeling import META_ARCH_REGISTRY as D2_META_ARCH_REGISTRY
@dataclass
class D2GoModelBuildResult:
"""Class to store the output of build_d2go_model.
It stores the model, a key-value mapping of modeling hooks and can be further
extended with other fields, e.g. state_dict.
"""
# Stores model with applied modeling hooks.
# If modeling hooks (e.g. EMA) are not enabled in config
# the modeling hook will be no-op (e.g. return original model)
model: nn.Module
modeling_hooks: List[mh.ModelingHook]
def build_meta_arch(cfg):
"""
Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
Note that it does not load any weights from ``cfg``.
"""
# initialize the meta-arch and cast to the device
meta_arch = cfg.MODEL.META_ARCHITECTURE
# NOTE: during transition we also check if meta_arch is registered as D2 MetaArch
# TODO: remove this check after Sep 2022.
if meta_arch not in META_ARCH_REGISTRY and meta_arch in D2_META_ARCH_REGISTRY:
raise KeyError(
f"Can't find '{meta_arch}' in D2Go's META_ARCH_REGISTRY, although it is in"
f" D2's META_ARCH_REGISTRY, now D2Go uses its own registry, please register"
f" it in D2Go's META_ARCH_REGISTRY."
)
model = META_ARCH_REGISTRY.get(meta_arch)(cfg)
model.to(torch.device(cfg.MODEL.DEVICE))
_log_api_usage("modeling.meta_arch." + meta_arch)
return model
def build_d2go_model(
cfg: CfgNode,
) -> D2GoModelBuildResult:
model = build_meta_arch(cfg)
modeling_hooks: List[mh.ModelingHook] = []
# apply modeling hooks
# some custom projects bypass d2go's default config so may not have the
# MODELING_HOOKS key
if hasattr(cfg.MODEL, "MODELING_HOOKS"):
hook_names = cfg.MODEL.MODELING_HOOKS
model, modeling_hooks = mh.build_and_apply_modeling_hooks(
model, cfg, hook_names
)
return D2GoModelBuildResult(model=model, modeling_hooks=modeling_hooks)
| d2go-main | d2go/modeling/api.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import re
import torch.nn as nn
from detectron2.layers import FrozenBatchNorm2d
logger = logging.getLogger(__name__)
def add_model_freezing_configs(_C):
_C.MODEL.FROZEN_LAYER_REG_EXP = []
def set_requires_grad(model, reg_exps, value):
total_num_parameters = 0
unmatched_parameters = []
unmatched_parameter_names = []
matched_parameters = []
matched_parameter_names = []
for name, parameter in model.named_parameters():
total_num_parameters += 1
matched = False
for frozen_layers_regex in reg_exps:
if re.match(frozen_layers_regex, name):
matched = True
parameter.requires_grad = value
matched_parameter_names.append(name)
matched_parameters.append(parameter)
break
if not matched:
unmatched_parameter_names.append(name)
unmatched_parameters.append(parameter)
logger.info(
"Matched layers (require_grad={}): {}".format(value, matched_parameter_names)
)
logger.info("Unmatched layers: {}".format(unmatched_parameter_names))
return matched_parameter_names, unmatched_parameter_names
def _freeze_matched_bn(module, name, reg_exps, matched_names, unmatched_names):
"""
Recursive function to freeze bn layers that match specified regular expressions.
"""
res = module
# Base case: current module is a leaf node
if len(list(module.children())) == 0:
if isinstance(module, nn.modules.batchnorm._BatchNorm):
matched = False
for frozen_layers_regex in reg_exps:
if re.match(frozen_layers_regex, name):
matched = True
matched_names.append(name)
# Convert to frozen batch norm
res = FrozenBatchNorm2d.convert_frozen_batchnorm(module)
if not matched:
unmatched_names.append(name)
return res
# Recursion: current module has children
for child_name, child in module.named_children():
_name = name + "." + child_name if name != "" else child_name
new_child = _freeze_matched_bn(
child, _name, reg_exps, matched_names, unmatched_names
)
if new_child is not child:
res.add_module(child_name, new_child)
return res
def freeze_matched_bn(module, reg_exps):
"""
Convert matching batchnorm layers in module into FrozenBatchNorm2d.
Args:
module: nn.Module
reg_exps: list of regular expressions to match
Returns:
If module is an instance of batchnorm and it matches the reg exps,
returns a new FrozenBatchNorm2d module.
Otherwise, in-place converts the matching batchnorm child modules to FrozenBatchNorm2d
and returns the main module.
"""
matched_names = []
unmatched_names = []
res = _freeze_matched_bn(module, "", reg_exps, matched_names, unmatched_names)
logger.info("Matched BN layers are frozen: {}".format(matched_names))
logger.info("Unmatched BN layers: {}".format(unmatched_names))
return res
| d2go-main | d2go/modeling/model_freezing_utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import d2go.data.transforms.crop as tfm_crop
import d2go.data.transforms.tensor as tfm_tensor
import detectron2.data.transforms as transforms
import torch
from detectron2.data.transforms.augmentation import AugmentationList
from torch import nn
class ImagePooler(nn.Module):
"""Get a subset of image
Returns the transforms that could be used to inverse the image/boxes/keypoints
as well.
Only available for inference. The code is not tracable/scriptable.
"""
def __init__(
self,
resize_type="resize_shortest",
resize_short=None,
resize_max=None,
box_scale_factor=1.0,
):
super().__init__()
assert resize_type in ["resize_shortest", "resize", "None", None]
resizer = None
if resize_type == "resize_shortest":
resizer = transforms.ResizeShortestEdge(resize_short, resize_max)
elif resize_type == "resize":
resizer = transforms.Resize(resize_short)
self.aug = [
tfm_tensor.Tensor2Array(),
tfm_crop.CropBoxAug(box_scale_factor=box_scale_factor),
*([resizer] if resizer else []),
tfm_tensor.Array2Tensor(),
]
def forward(self, x: torch.Tensor, box: torch.Tensor):
"""box: 1 x 4 tensor in XYXY format"""
assert not self.training
assert isinstance(x, torch.Tensor)
assert isinstance(box, torch.Tensor)
# box: 1 x 4 in xyxy format
inputs = tfm_tensor.AugInput(image=x.cpu(), boxes=box.cpu())
transforms = AugmentationList(self.aug)(inputs)
return (
inputs.image.to(x.device),
torch.Tensor(inputs.boxes).to(box.device),
transforms,
)
| d2go-main | d2go/modeling/image_pooler.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# This is the main definition of distillation components in D2Go. This
# includes:
# DistillationModelingHook => how we update the student model to obtain
# distillation methods and properties (e.g., override model.forward)
# DistillationAlgorithm => how we define what occurs during distillation
# (e.g., specific forward func, teacher weights updates)
# DistillationHelper => main class users should use to customize their
# distllation (e.g., define how to pseudo label inputs)
#
# We use two additional registries so that users can select their
# distillation algorithms in configs: DISILLATION_ALAGORITHM, DISTILLATION_HELPER
import logging
from abc import abstractmethod
from dataclasses import dataclass
from typing import Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
import torch
import torch.nn as nn
from d2go.config import CfgNode as CN
from d2go.modeling import modeling_hook as mh
from d2go.registry.builtin import (
DISTILLATION_ALGORITHM_REGISTRY,
DISTILLATION_HELPER_REGISTRY,
MODELING_HOOK_REGISTRY,
)
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.mixin import dynamic_mixin, remove_dynamic_mixin
logger = logging.getLogger(__name__)
ModelOutput = Union[None, torch.Tensor, Iterable["ModelOutput"]]
@dataclass
class LayerLossMetadata:
loss: nn.Module
name: str
layer0: str
layer1: str
class PseudoLabeler:
@abstractmethod
def label(self, x):
"""
We expect all pseudolabelers to implement a func called label which
will then be run on the input before passing the func output to the
model
This is typically something like running a teacher model on the input
to generate new ground truth which we can use to override the input
gt
"""
pass
class NoopPseudoLabeler(PseudoLabeler):
def label(self, x):
return x
class RelabelTargetInBatch(PseudoLabeler):
"""Run the teacher model on the batched inputs, replace targets.
We expect the batched_inputs to be a list of dicts:
batched_inputs = [
{"input": ..., "target": ...},
{"input": ..., "target": ...},
...
]
where there is a single label "target" that needs to be replaced
The teacher can take this batch of inputs directly and return a tensor
of size nchw where n corresopnds to the index of the input
We return updated batched_inputs with the new target
new_batched_inputs = [
{"input": ..., "target": teacher_output[0, :]},
{"input": ..., "target": teacher_output[1, :]},
...
]
Note that the output of the teacher is a tensor of NCHW while we assume
the target is CHW. Create a new pseudo_labeler if a different input
output is needed.
"""
def __init__(self, teacher: nn.Module):
"""Assume that a teacher is passed to the psuedolabeler
As an example in distillation, the distillaiton helper should create
or pass along a teacher to the psuedo labeler
"""
self.teacher = teacher
def label(self, batched_inputs: List) -> List:
batched_inputs = [
{"input": d["input"].to(self.teacher.device), "target": d["target"]}
for d in batched_inputs
]
with torch.no_grad():
batched_outputs = self.teacher(batched_inputs)
for i, input in enumerate(batched_inputs):
input["target"] = batched_outputs[i, :]
return batched_inputs
@DISTILLATION_HELPER_REGISTRY.register()
class BaseDistillationHelper:
"""Example of what distillation helper can provide
Users should inherit this class and replace any functions with whatever they
need in order to customize their distillation given a specific distililation
algorithm (e.g., user wants to change the name of the label in the inputs).
The distillation helper is an object passed to the distillation algorithm so
any functionality in the helper can be accessed in the algorithm
"""
def __init__(self, cfg: CN, teacher: nn.Module):
self.cfg = cfg
self.teacher = teacher
def get_pseudo_labeler(self) -> PseudoLabeler:
"""
pseudo_labeler should update the labels in batched_inputs with teacher model
results
This dummy psuedo_labeler returns the batched_inputs without modification
"""
return NoopPseudoLabeler()
def get_teacher(self) -> nn.Module:
"""Return a teacher that can be run by the algorithm"""
return self.teacher
def get_layer_losses(
self, model: Optional[nn.Module] = None
) -> List[LayerLossMetadata]:
"""Return losses that are run on layers
Layer parameters may be dependent on model parameters so option to pass
in a model
"""
return []
def get_preprocess_student_input(self) -> Callable:
"""Return a function that allows user to modify the dataloader output
before passing to the student
The output of this function will be directly passed to the student model.
Example use cases include:
* dataloader returns a large image used by the teacher model but the
student model needs a lower resolution version
* dataloader returns both labeled and unlabeled data and the student
requires labeled data
"""
return lambda x: x
def get_preprocess_teacher_input(self) -> Callable:
"""Return a function that allows user to modify dataloader output before
passing to teacher
The output of this function will be directly passed to the teacher model.
"""
return lambda x: x
def get_combine_losses(self) -> Callable:
"""Return a function that takes as input a dictionary of losses and
modifies the loss as required
The default trainer sums the losses at the end so typically this
function is used to change the relative contribution of losses
Example:
def combine_losses(losses)
alpha = 0.1
losses["nll"] *= alpha
losses["kd_loss"] *= (1 - alpha)
return losses
student_losses = {"nll": ...}
student_losses.update({"kl_loss": ...})
losses = combine_losses(student_losses)
"""
return lambda x: x
def get_preprocess_domain0_input(self) -> Callable:
"""Return a function that allows user to modify the dataloader output
before passing to the model
The output of this function will be directly passed to the model.
Example use cases include:
* dataloader returns a dictionary of real and synthetic images. use
this function to return only the real data (domain0) to the model
"""
return lambda x: x
def get_preprocess_domain1_input(self) -> Callable:
"""Same as get_preprocess_domain0_input but returns domain1 inputs
Example:
* dataloader returns a dictionary of real and synthetic images. use
this function to return only synthetic data (domain1) to the model
"""
return lambda x: x
@DISTILLATION_HELPER_REGISTRY.register()
class ExampleDistillationHelper(BaseDistillationHelper):
"""
This is an example of a user customizing distillation.
We return a pseudo labeler that can be used with a specific project
where the training input is a list of dicts with a label called target
"""
def get_pseudo_labeler(self) -> PseudoLabeler:
return RelabelTargetInBatch(self.teacher)
class BaseDistillationAlgorithm(nn.Module):
"""
Base distillation algorithm
All distillation algorithms will be initialized with the same inputs including the
teacher model, distillation helper and student class. Require user to define forward
which overrides student model forward.
Note that the init is unused when we use mixin. We manually set these attributes in
the modeling hook. However we keep the init to make it clear what attributes the
class will contain.
"""
def dynamic_mixin_init(
self,
distillation_helper: BaseDistillationHelper,
):
# check if we might override user attrs with same name
# add any new distillation method attrs to this list
assert not hasattr(
self, "distillation_helper"
), "Distillation attempting to override attribute that already exists: distillation_helper"
self.distillation_helper = distillation_helper
def remove_dynamic_mixin(self):
del self.distillation_helper
@abstractmethod
def forward(self, *args, **kwargs):
"""User required to override forward to implement distillation"""
# must call super to ensure student forward is used when calling the
# super in the algorithm (i.e., DistillationAlgorithm.super().forward())
# this is because distillation algorithms inherit this base class so
# the MRO of the mixin class is something like:
# [DistillationAlgorithm, BaseDistillationAlgorithm, StudentModel]
# DistillationAlgorithm forward uses super().forward to call the
# student model but the BaseDistillationAlgorithm is the next class
# in the MRO so we make sure to call super on BaseDistillationAlgorithm
# so we can access the StudentModel forward.
return super().forward(*args, **kwargs)
@DISTILLATION_ALGORITHM_REGISTRY.register()
class LabelDistillation(BaseDistillationAlgorithm):
"""Basic distillation uses a teacher model to generate new labels used
by the student
We modify the forward to replace the input labels with teacher outputs when
the model is training and run the student at inference
"""
def dynamic_mixin_init(self, distillation_helper: BaseDistillationHelper):
"""Init pseudo labeler"""
super().dynamic_mixin_init(distillation_helper)
self.pseudo_labeler = self.distillation_helper.get_pseudo_labeler()
def remove_dynamic_mixin(self):
super().remove_dynamic_mixin()
del self.pseudo_labeler
def forward(self, batched_inputs: List):
"""If training, overrides input labels with teacher outputs
During inference, runs the student.
Note: The "student" model can be accessed by calling super(). In order
to run the student forward method, we call super().forward(input) as opposed
to super()(input) as super objects are not callable. We avoid calling
super().__call__(input) as this leads to infinite recursion. We can call
super().forward(input) without worrying about ignoring hooks as we should
be calling this model as model(input) which will then activate the hooks.
"""
if not self.training:
return super().forward(batched_inputs)
new_batched_inputs = self.pseudo_labeler.label(batched_inputs)
return super().forward(new_batched_inputs)
@DISTILLATION_ALGORITHM_REGISTRY.register()
class KnowledgeDistillation(BaseDistillationAlgorithm):
"""Knowledge distillation applies loss over the outputs of the student
and teacher models
"""
def dynamic_mixin_init(self, distillation_helper: BaseDistillationHelper):
"""Note all variables use _ to avoid name conflicts with existing
variable names in the model
Consider adding a check to avoid variable name reuse
"""
super().dynamic_mixin_init(distillation_helper)
self._teacher = WrappedTeacher(self.distillation_helper.get_teacher())
self._student_preprocess_input = (
self.distillation_helper.get_preprocess_student_input()
)
self._teacher_preprocess_input = (
self.distillation_helper.get_preprocess_teacher_input()
)
ll = self.distillation_helper.get_layer_losses(self)
self._layer_losses = register_layer_losses_and_to_device(ll, self)
self._student_cache = record_layers(
self, [ll.layer0 for ll in self._layer_losses]
)
self._teacher_cache = record_layers(
self._teacher.model, [ll.layer1 for ll in self._layer_losses]
)
self._combine_losses = self.distillation_helper.get_combine_losses()
def remove_dynamic_mixin(self):
super().remove_dynamic_mixin()
unrecord_layers(self, [ll.layer0 for ll in self._layer_losses])
unrecord_layers(self._teacher.model, [ll.layer1 for ll in self._layer_losses])
del self._teacher
del self._layer_losses
del self._student_cache
del self._teacher_cache
del self._student_preprocess_input
del self._teacher_preprocess_input
del self._combine_losses
def forward(self, batched_inputs: List):
"""Run teacher, then student and compute losses"""
student_input = self._student_preprocess_input(batched_inputs)
if not self.training:
return super().forward(student_input)
teacher_input = self._teacher_preprocess_input(batched_inputs)
with torch.no_grad():
self._teacher(teacher_input)
student_losses = super().forward(student_input)
distillation_losses = compute_layer_losses(
self._layer_losses, self._student_cache, self._teacher_cache
)
student_losses.update(distillation_losses)
losses = self._combine_losses(student_losses)
return losses
@DISTILLATION_ALGORITHM_REGISTRY.register()
class DomainAdaptation(BaseDistillationAlgorithm):
"""Domain adaptation applies loss over the inputs of domain0 and domain1"""
def dynamic_mixin_init(self, distillation_helper: BaseDistillationHelper):
super().dynamic_mixin_init(distillation_helper)
self._domain0_preprocess_input = (
self.distillation_helper.get_preprocess_domain0_input()
)
self._domain1_preprocess_input = (
self.distillation_helper.get_preprocess_domain1_input()
)
ll = self.distillation_helper.get_layer_losses(self)
self._layer_losses = register_layer_losses_and_to_device(ll, self)
# we ignore the cache dict returned by record_layers as we need to
# manually set the dict at every iteration in the forward
self._domain0_cache = {}
self._domain1_cache = {}
# since domain adaptation uses the same model in both domains, we
# only need to add CachedLayers once
record_layers(self, [ll.layer0 for ll in self._layer_losses])
self._combine_losses = self.distillation_helper.get_combine_losses()
def remove_dynamic_mixin(self):
super().remove_dynamic_mixin()
unrecord_layers(self, [ll.layer0 for ll in self._layer_losses])
del self._layer_losses
del self._domain0_cache
del self._domain1_cache
del self._domain0_preprocess_input
del self._domain1_preprocess_input
del self._combine_losses
def forward(self, batched_inputs: List):
"""Run domain0 input, domain1 input and compute losses"""
domain0_input = self._domain0_preprocess_input(batched_inputs)
if not self.training:
return super().forward(domain0_input)
# run domain0
set_cache_dict(self, self._domain0_cache)
domain0_losses = super().forward(domain0_input)
# run domain1
domain1_input = self._domain1_preprocess_input(batched_inputs)
set_cache_dict(self, self._domain1_cache)
domain1_losses = super().forward(domain1_input)
# calculate losses
domain_adaptation_losses = compute_layer_losses(
self._layer_losses, self._domain0_cache, self._domain1_cache
)
# combine losses
# note we currently assume that the loss combiner uses training iteration
losses = self._combine_losses(
domain0_losses,
domain1_losses,
domain_adaptation_losses,
getattr(self, "_training_iteration", -1),
)
return losses
@MODELING_HOOK_REGISTRY.register()
class DistillationModelingHook(mh.ModelingHook):
"""Wrapper hook that allows us to apply different distillation algorithms
based on config
This is meant to be used after creating a model:
def build_model(cfg):
model = d2_build_model(cfg)
distillation_modeling_hook = DistillationModelingHook(cfg)
d2go.modeling_hook.apply_modeling_hooks(model, distillation_modeling_hook)
The udpated model will then be updated with a forward func that corresponds
to the distillation method in the cfg as well as any new methods
"""
def __init__(self, cfg):
"""
Set the three major components
distillation_algorithm_class => the distillation algorithm to be used, we
only get the class as the apply() will mixin the class
distillation_helper => user customization of the algorithm
teacher => all distillation algorithms utilize an additional model to
modify inputs
"""
super().__init__(cfg)
self.teacher = _build_teacher(cfg)
self.distillation_algorithm_class = DISTILLATION_ALGORITHM_REGISTRY.get(
cfg.DISTILLATION.ALGORITHM
)
self.distillation_helper = DISTILLATION_HELPER_REGISTRY.get(
cfg.DISTILLATION.HELPER
)(cfg, self.teacher)
def apply(self, model: nn.Module) -> nn.Module:
"""Use dynamic mixin to apply the distillation class
As opposed to wrapping the model, dynamic mixin allows us to override the
model methods so that the model retains all existing attributes the user expects
(e.g., if the user thinks their is an attr called model.my_attr then dynamic mixin
retains that property). This has the advantage over directly overriding the model
forward as we can still call the original model forward using super:
old_model: MyModel
new_model: MyDistillationClass = DistillationModelingHook(...).apply(old_model)
class MyDistillationClass:
def forward(self, ...):
# do some processing
...
super().forward(...) # call MyModel.forward
...
"""
logger.info("Applying distillation")
dynamic_mixin(
model,
self.distillation_algorithm_class,
init_dict={
"distillation_helper": self.distillation_helper,
},
)
return model
def unapply(self, model: nn.Module) -> nn.Module:
"""Remove distillation class using dynamic mixin with saved original class"""
remove_dynamic_mixin(model)
return model
def _build_teacher(cfg) -> nn.Module:
"""Create teacher using config settings
Supports torchscript or creating pytorch model using config.
"""
_validate_teacher_config(cfg)
if cfg.DISTILLATION.TEACHER.TYPE == "torchscript":
with PathManager.open(cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME, "rb") as f:
model = torch.jit.load(f)
elif cfg.DISTILLATION.TEACHER.TYPE == "config":
from d2go.runner import import_runner
from d2go.setup import create_cfg_from_cli
# teacher config may be set to cuda
# if user wants to run teacher on cpu only machine by specifying teacher.device,
# need to override device to cpu before building model
if cfg.DISTILLATION.TEACHER.DEVICE:
cfg.DISTILLATION.TEACHER.OVERWRITE_OPTS.extend(
["MODEL.DEVICE", cfg.DISTILLATION.TEACHER.DEVICE]
)
teacher_cfg = create_cfg_from_cli(
cfg.DISTILLATION.TEACHER.CONFIG_FNAME,
cfg.DISTILLATION.TEACHER.OVERWRITE_OPTS,
cfg.DISTILLATION.TEACHER.RUNNER_NAME,
)
runner = import_runner(cfg.DISTILLATION.TEACHER.RUNNER_NAME)()
model = runner.build_model(teacher_cfg, eval_only=True)
elif cfg.DISTILLATION.TEACHER.TYPE == "no_teacher":
model = nn.Identity()
else:
raise ValueError(f"Unexpected teacher type: {cfg.DISTILLATION.TEACHER.TYPE}")
# move teacher to same device as student unless specified
device = torch.device(cfg.DISTILLATION.TEACHER.DEVICE or cfg.MODEL.DEVICE)
model = _set_device(model, device)
model.eval()
return model
def _set_device(model: nn.Module, device: torch.device) -> nn.Module:
"""Set the device of the model
Some D2Go models have device as a property of the model (e.g., GeneralizedRCNN)
whereas others are missing this attribute which is assumed by distillation
to exist (e.g., we may call teacher.device to move inputs)
This helper function guarantees that the model.device attribute exists
and runs model.to(device)
"""
model = model.to(device)
if not hasattr(model, "device"):
model.device = device
return model
def _validate_teacher_config(cfg: CN) -> None:
"""We support torchscript or PyTorch checkpoint as teacher models
If torchscript, need:
* torchscript_filename
If config, needs:
* config_fname
Bypass allowed if setting teacher.type = "no_teacher". This can be
useful in cases where we only have the student model
(e.g., domain adaptation)
"""
if cfg.DISTILLATION.TEACHER.TYPE == "torchscript":
assert (
cfg.DISTILLATION.TEACHER.TORCHSCRIPT_FNAME
), "Trying to load torchscript model without fname"
elif cfg.DISTILLATION.TEACHER.TYPE == "config":
assert (
cfg.DISTILLATION.TEACHER.CONFIG_FNAME
), "Trying to load D2Go teacher model without config"
elif cfg.DISTILLATION.TEACHER.TYPE == "no_teacher":
pass
else:
raise ValueError(
f"Unrecognized DISTILLATION.TEACHER.TYPE: {cfg.DISTILLATION.TEACHER.TYPE}"
)
class CachedLayer(nn.Module):
"""Cached layer records the output of a layer
This is meant to be used with dynamic mixin. The layer overrides the forward
of the original layer such that the input and the output is the same but
the output of the layer is saved to a dict that can be retrieved later
"""
def dynamic_mixin_init(
self,
label: str,
cache: Dict[str, ModelOutput],
):
self.label = label
self.cache = cache
def remove_dynamic_mixin(self):
del self.label
del self.cache
def forward(self, *args, **kwargs):
"""Run the original layer and save the output
We clone the output to avoid the case where a subsequent module
runs an inplace operation. However, this limits what the cache
can support as we can only run clone on a tensor so we need to
check the type of the output.
Support of the output type is limited to None type and arbitrary nested
collections of List, Tuple and Dict of tensor.
"""
output = super().forward(*args, **kwargs)
self.cache[self.label] = CachedLayer._clone(output)
return output
@staticmethod
def _clone(output: ModelOutput) -> ModelOutput:
if output is None:
return None
elif isinstance(output, torch.Tensor):
return output.clone()
elif isinstance(output, List) or isinstance(output, Tuple):
cloned_output = []
for x in output:
cloned_output.append(CachedLayer._clone(x))
if isinstance(output, Tuple):
return tuple(cloned_output)
return cloned_output
elif isinstance(output, Dict):
cloned_output = {}
for k, v in output.items():
cloned_output[k] = CachedLayer._clone(v)
return cloned_output
else:
raise ValueError(f"Unexpected type to save: {type(output)}")
def set_cache_dict(model: nn.Module, cache: ModelOutput) -> None:
"""Sets the cache in all CachedLayers to input cache"""
for module in model.modules():
if isinstance(module, CachedLayer):
module.cache = cache
def record_layers(model: nn.Module, layer_names: Set[str]) -> ModelOutput:
"""Save the outputs of layer_names in model
Iterates over all named layers in model, applies cached layer to layers in
layer_names. Returns dict which is used by the cached layers.
"""
cache = {}
for name, module in model.named_modules():
if name in layer_names:
dynamic_mixin(
module,
CachedLayer,
init_dict={"label": name, "cache": cache},
)
return cache
def unrecord_layers(model: nn.Module, layer_names: Set[str]) -> None:
"""Remove cached layers based on the layer_names"""
for name, module in model.named_modules():
if name in layer_names:
remove_dynamic_mixin(module)
def compute_layer_losses(
layer_losses: List[LayerLossMetadata],
layer0_cache: ModelOutput,
layer1_cache: ModelOutput,
) -> Dict[str, torch.Tensor]:
"""Compute loss over layers specified in layer_loss
layer0_cache and layer1_cache should contain the data required to compute
the losses specified in layer_loss
"""
losses = {}
for ll in layer_losses:
if ll.layer0 not in layer0_cache:
raise ValueError(f"Missing saved layer {ll.layer0} in layer0_cache")
if ll.layer1 not in layer1_cache:
raise ValueError(f"Missing saved layer {ll.layer1} in layer1_cache")
losses[ll.name] = ll.loss(layer0_cache[ll.layer0], layer1_cache[ll.layer1])
return losses
class WrappedTeacher:
"""Used to remove the teacher model from the student module list
See: DistillationMiscTests.test_teacher_outside_updated_parameters to get
more details on avoiding adding the teacher as a module
"""
def __init__(self, model: nn.Module):
self.model = model
def __call__(self, *args, **kwargs):
return self.model(*args, **kwargs)
def get_default_kd_image_classification_layer_losses() -> List[LayerLossMetadata]:
"""Return some typical values used in knowledge distillation
Assumes student model is ImageClassificationMetaArch and teacher model is the same
or a wrapped torchscript model with the same output layer name
"""
return [
LayerLossMetadata(
loss=nn.CrossEntropyLoss(),
name="kd",
layer0="classifier",
layer1="", # use empty layer name to indicate last layer
)
]
class DefaultLossCombiner:
"""Returns a weighted sum of the losses based on the name_weight
name_weight is a dictionary indicating the name of the loss and the
weight associated with that loss
Example:
name_weight = {"nll": 0.1, "kd": 0.9}
"""
def __init__(self, name_weight: Dict[str, float]):
self.name_weight = name_weight
def __call__(self, losses: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
output = {}
for k, v in losses.items():
if k not in self.name_weight:
raise ValueError(f"Unexpected weight in loss dict: {k}")
output[k] = v * self.name_weight[k]
return output
def register_layer_losses_and_to_device(
layer_losses: List[LayerLossMetadata], model: nn.Module
) -> List[LayerLossMetadata]:
"""Register loss modules in layerlossemtadata to model and move to device"""
registered_losses = []
for ll in layer_losses:
loss_on_device = ll.loss.to(model.device)
model.add_module(ll.name, loss_on_device)
registered_losses.append(
LayerLossMetadata(
loss_on_device,
ll.name,
ll.layer0,
ll.layer1,
)
)
return registered_losses
| d2go-main | d2go/modeling/distillation.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import inspect
import json
import logging
import math
from typing import Any, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from d2go.config import CfgNode
from d2go.config.utils import flatten_config_dict
from d2go.export.api import PredictorExportConfig
from d2go.quantization.qconfig import set_backend_and_create_qconfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import (
GeneralizedRCNN as _GeneralizedRCNN,
ProposalNetwork as _ProposalNetwork,
)
from detectron2.modeling.backbone.fpn import FPN
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.projects.point_rend import PointRendMaskHead
from detectron2.structures import Boxes, Instances, Keypoints, PolygonMasks
from detectron2.utils.events import EventStorage
from detectron2.utils.registry import Registry
from mobile_cv.arch.utils import fuse_utils
from mobile_cv.arch.utils.quantize_utils import (
QuantWrapper,
wrap_non_quant_group_norm,
wrap_quant_subclass,
)
from mobile_cv.predictor.api import FuncInfo
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
from torch.ao.quantization.utils import get_fqn_to_example_inputs
logger = logging.getLogger(__name__)
# NOTE: Customized heads are often used in the GeneralizedRCNN, this leads to the needs
# for also customizating export/quant APIs, therefore registries are provided for easy
# override without creating new meta-archs. For other less general meta-arch, this type
# of registries might be over-kill.
RCNN_PREPARE_FOR_EXPORT_REGISTRY = Registry("RCNN_PREPARE_FOR_EXPORT")
RCNN_PREPARE_FOR_QUANT_REGISTRY = Registry("RCNN_PREPARE_FOR_QUANT")
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class GeneralizedRCNN(_GeneralizedRCNN):
def prepare_for_export(self, cfg, *args, **kwargs):
func = RCNN_PREPARE_FOR_EXPORT_REGISTRY.get(cfg.RCNN_PREPARE_FOR_EXPORT)
return func(self, cfg, *args, **kwargs)
def prepare_for_quant(self, cfg, *args, **kwargs):
func = RCNN_PREPARE_FOR_QUANT_REGISTRY.get(cfg.RCNN_PREPARE_FOR_QUANT)
return func(self, cfg, *args, **kwargs)
def custom_prepare_fx(self, cfg, is_qat, example_input=None):
return default_rcnn_custom_prepare_fx(self, cfg, is_qat, example_input)
def _cast_model_to_device(self, device):
return _cast_detection_model(self, device)
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class ProposalNetwork(_ProposalNetwork):
pass
@RCNN_PREPARE_FOR_EXPORT_REGISTRY.register()
def default_rcnn_prepare_for_export(self, cfg, inputs, predictor_type):
pytorch_model = self
if (
"@c2_ops" in predictor_type
or "caffe2" in predictor_type
or "onnx" in predictor_type
):
from detectron2.export.caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP
C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
c2_compatible_model = C2MetaArch(cfg, pytorch_model)
preprocess_info = FuncInfo.gen_func_info(
D2Caffe2MetaArchPreprocessFunc,
params=D2Caffe2MetaArchPreprocessFunc.get_params(cfg, c2_compatible_model),
)
postprocess_info = FuncInfo.gen_func_info(
D2Caffe2MetaArchPostprocessFunc,
params=D2Caffe2MetaArchPostprocessFunc.get_params(cfg, c2_compatible_model),
)
preprocess_func = preprocess_info.instantiate()
model_export_kwargs = {}
if "torchscript" in predictor_type:
model_export_kwargs["force_disable_tracing_adapter"] = True
return PredictorExportConfig(
model=c2_compatible_model,
# Caffe2MetaArch takes a single tuple as input (which is the return of
# preprocess_func), data_generator requires all positional args as a tuple.
data_generator=lambda x: (preprocess_func(x),),
model_export_method=predictor_type.replace("@c2_ops", "", 1),
model_export_kwargs=model_export_kwargs,
preprocess_info=preprocess_info,
postprocess_info=postprocess_info,
)
else:
do_postprocess = cfg.RCNN_EXPORT.INCLUDE_POSTPROCESS
preprocess_info = FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Preprocess, params={}
)
preprocess_func = preprocess_info.instantiate()
return PredictorExportConfig(
model=D2RCNNInferenceWrapper(
pytorch_model,
do_postprocess=do_postprocess,
),
data_generator=lambda x: (preprocess_func(x),),
model_export_method=predictor_type,
preprocess_info=preprocess_info,
postprocess_info=FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Postprocess,
params={"detector_postprocess_done_in_model": do_postprocess},
),
)
def _apply_eager_mode_quant(cfg, model):
if isinstance(model, GeneralizedRCNN):
"""Wrap each quantized part of the model to insert Quant and DeQuant in-place"""
# Wrap backbone and proposal_generator
if isinstance(model.backbone, FPN):
# HACK: currently the quantization won't pick up D2's the Conv2d, which is
# used by D2's default FPN (same as FBNetV2FPN), this causes problem if we
# warpping entire backbone as whole. The current solution is only quantizing
# bottom_up and leaving other parts un-quantized. TODO (T109761730): However
# we need to re-visit this if using other (fbnet-based) FPN module since the
# new FPN module might be pikced by quantization.
model.backbone.bottom_up = wrap_quant_subclass(
model.backbone.bottom_up,
n_inputs=1,
n_outputs=len(model.backbone.bottom_up._out_features),
)
else:
model.backbone = wrap_quant_subclass(
model.backbone, n_inputs=1, n_outputs=len(model.backbone._out_features)
)
model.proposal_generator.rpn_head = wrap_quant_subclass(
model.proposal_generator.rpn_head,
n_inputs=len(cfg.MODEL.RPN.IN_FEATURES),
n_outputs=len(cfg.MODEL.RPN.IN_FEATURES) * 2,
)
# Wrap the roi_heads, box_pooler is not quantized
if hasattr(model.roi_heads, "box_head"):
model.roi_heads.box_head = wrap_quant_subclass(
model.roi_heads.box_head,
n_inputs=1,
n_outputs=1,
)
# for faster_rcnn_R_50_C4
if hasattr(model.roi_heads, "res5"):
model.roi_heads.res5 = wrap_quant_subclass(
model.roi_heads.res5,
n_inputs=1,
n_outputs=1,
)
model.roi_heads.box_predictor = wrap_quant_subclass(
model.roi_heads.box_predictor, n_inputs=1, n_outputs=2
)
# Optionally wrap keypoint and mask heads, pools are not quantized
if hasattr(model.roi_heads, "keypoint_head"):
model.roi_heads.keypoint_head = wrap_quant_subclass(
model.roi_heads.keypoint_head,
n_inputs=1,
n_outputs=1,
wrapped_method_name="layers",
)
if hasattr(model.roi_heads, "mask_head"):
model.roi_heads.mask_head = wrap_quant_subclass(
model.roi_heads.mask_head,
n_inputs=1,
n_outputs=1,
wrapped_method_name="layers",
)
# StandardROIHeadsWithSubClass uses a subclass head
if hasattr(model.roi_heads, "subclass_head"):
q_subclass_head = QuantWrapper(model.roi_heads.subclass_head)
model.roi_heads.subclass_head = q_subclass_head
else:
raise NotImplementedError(
"Eager mode for {} is not supported".format(type(model))
)
# TODO: wrap the normalizer and make it quantizable
# NOTE: GN is not quantizable, assuming all GN follows a quantized conv,
# wrap them with dequant-quant
model = wrap_non_quant_group_norm(model)
return model
def _lcm(x: Optional[int], y: Optional[int]) -> int:
if x is None or x == 0:
return y
if y is None or y == 0:
return x
return x * y // math.gcd(x, y)
def _get_example_rcnn_input(image_tensor_size: int):
def _get_batch():
# example input image
# TODO: do not hard-code channel size 3
image = torch.randn(3, image_tensor_size, image_tensor_size)
# example GT instances
num_instances = 2
gt_boxes = torch.tensor([[0.0, 0.0, 10.0, 10.0]] * num_instances)
gt_boxes = Boxes(gt_boxes)
gt_classes = torch.tensor([0] * num_instances)
polygon = np.array([0.0, 0.0, 10.0, 0.0, 10.0, 10.0]) # x1,y1,x2,y2,x3,y3
gt_masks = PolygonMasks([[polygon]] * num_instances)
# TODO: make keypoints inside box and set visibililty
# TODO: do not hard-code num_keypoints 17
keypoints = torch.randn(num_instances, 17, 3)
gt_keypoints = Keypoints(keypoints)
# NOTE: currenlty supports faster/mask/keypoint RCNN
instances = Instances(
image_size=(10, 10),
gt_boxes=gt_boxes,
gt_classes=gt_classes,
gt_masks=gt_masks,
gt_keypoints=gt_keypoints,
)
return {
# `file_name` and `image_id` are not used, can be any value.
"file_name": "fake_example_image.jpg",
"image_id": 42,
# `height` and `width` are used in post-processing to scale predictions back
# to original size, not used during training.
"height": 10,
"width": 10,
"image": image,
"instances": instances,
# NOTE: proposals are not supported
}
return [_get_batch(), _get_batch()]
def _set_qconfig(model, cfg, is_qat):
model.qconfig = set_backend_and_create_qconfig(cfg, is_train=is_qat)
# skip quantization for point rend head
if (
hasattr(model, "roi_heads")
and hasattr(model.roi_heads, "mask_head")
and isinstance(model.roi_heads.mask_head, PointRendMaskHead)
):
model.roi_heads.mask_head.qconfig = None
logger.info("Setup the model with qconfig:\n{}".format(model.qconfig))
@RCNN_PREPARE_FOR_QUANT_REGISTRY.register()
def default_rcnn_prepare_for_quant(self, cfg):
model = self
_set_qconfig(model, cfg, model.training)
# Modify the model for eager mode
model = _apply_eager_mode_quant(cfg, model)
model = fuse_utils.fuse_model(
model,
is_qat=cfg.QUANTIZATION.QAT.ENABLED,
inplace=True,
)
return model
def default_rcnn_custom_prepare_fx(self, cfg, is_qat, example_input=None):
model = self
_set_qconfig(model, cfg, is_qat)
# construct example input for FX when not provided
if example_input is None:
assert (
is_qat
), "Currently only (FX mode) QAT requires user-provided `example_input`"
# make sure the image size can be divided by all strides and size_divisibility
required_strides = [model.backbone.size_divisibility] + [
shape_spec.stride for shape_spec in model.backbone.output_shape().values()
]
image_tensor_size = functools.reduce(_lcm, required_strides)
example_input = _get_example_rcnn_input(image_tensor_size)
_fx_quant_prepare(model, cfg, is_qat, example_input)
def convert_fx_callback(model):
return default_rcnn_custom_convert_fx(model, cfg)
return model, convert_fx_callback
def _fx_quant_prepare(self, cfg, is_qat, example_input):
prep_fn = prepare_qat_fx if is_qat else prepare_fx
qconfig = {"": self.qconfig}
assert not isinstance(self.backbone, FPN), "FPN is not supported in FX mode"
with EventStorage() as _: # D2's rcnn requires EventStorage when for loss
with torch.no_grad():
fqn_to_example_inputs = get_fqn_to_example_inputs(self, (example_input,))
self.backbone = prep_fn(
self.backbone,
qconfig,
fqn_to_example_inputs["backbone"],
prepare_custom_config={
"preserved_attributes": ["size_divisibility", "padding_constraints"],
# keep the output of backbone quantized, to avoid
# redundant dequant
# TODO: output of backbone is a dict and currently this will keep all output
# quantized, when we fix the implementation of "output_quantized_idxs"
# we'll need to change this
"output_quantized_idxs": [0],
},
)
self.proposal_generator.rpn_head.rpn_feature = prep_fn(
self.proposal_generator.rpn_head.rpn_feature,
qconfig,
fqn_to_example_inputs["proposal_generator.rpn_head.rpn_feature"],
prepare_custom_config={
# rpn_feature expecting quantized input, this is used to avoid redundant
# quant
"input_quantized_idxs": [0]
},
)
self.proposal_generator.rpn_head.rpn_regressor.cls_logits = prep_fn(
self.proposal_generator.rpn_head.rpn_regressor.cls_logits,
qconfig,
fqn_to_example_inputs["proposal_generator.rpn_head.rpn_regressor.cls_logits"],
)
self.proposal_generator.rpn_head.rpn_regressor.bbox_pred = prep_fn(
self.proposal_generator.rpn_head.rpn_regressor.bbox_pred,
qconfig,
fqn_to_example_inputs["proposal_generator.rpn_head.rpn_regressor.bbox_pred"],
)
self.roi_heads.box_head.roi_box_conv = prep_fn(
self.roi_heads.box_head.roi_box_conv,
qconfig,
fqn_to_example_inputs["roi_heads.box_head.roi_box_conv"],
prepare_custom_config={
"output_quantized_idxs": [0],
},
)
self.roi_heads.box_head.avgpool = prep_fn(
self.roi_heads.box_head.avgpool,
qconfig,
(torch.randn(1, 3, 224, 224),),
prepare_custom_config={
"input_quantized_idxs": [0],
"output_quantized_idxs": [0],
},
)
self.roi_heads.box_predictor.cls_score = prep_fn(
self.roi_heads.box_predictor.cls_score,
qconfig,
fqn_to_example_inputs["roi_heads.box_predictor.cls_score"],
prepare_custom_config={"input_quantized_idxs": [0]},
)
self.roi_heads.box_predictor.bbox_pred = prep_fn(
self.roi_heads.box_predictor.bbox_pred,
qconfig,
fqn_to_example_inputs["roi_heads.box_predictor.bbox_pred"],
prepare_custom_config={"input_quantized_idxs": [0]},
)
def default_rcnn_custom_convert_fx(self, cfg):
assert not isinstance(self.backbone, FPN), "FPN is not supported in FX mode"
self.backbone = convert_fx(
self.backbone,
convert_custom_config={
"preserved_attributes": ["size_divisibility", "padding_constraints"]
},
)
self.proposal_generator.rpn_head.rpn_feature = convert_fx(
self.proposal_generator.rpn_head.rpn_feature
)
self.proposal_generator.rpn_head.rpn_regressor.cls_logits = convert_fx(
self.proposal_generator.rpn_head.rpn_regressor.cls_logits
)
self.proposal_generator.rpn_head.rpn_regressor.bbox_pred = convert_fx(
self.proposal_generator.rpn_head.rpn_regressor.bbox_pred
)
self.roi_heads.box_head.roi_box_conv = convert_fx(
self.roi_heads.box_head.roi_box_conv
)
self.roi_heads.box_head.avgpool = convert_fx(self.roi_heads.box_head.avgpool)
self.roi_heads.box_predictor.cls_score = convert_fx(
self.roi_heads.box_predictor.cls_score
)
self.roi_heads.box_predictor.bbox_pred = convert_fx(
self.roi_heads.box_predictor.bbox_pred
)
return self
class D2Caffe2MetaArchPreprocessFunc(object):
def __init__(self, size_divisibility, device):
self.size_divisibility = size_divisibility
self.device = device
def __call__(self, inputs):
from detectron2.export.caffe2_modeling import (
convert_batched_inputs_to_c2_format,
)
data, im_info = convert_batched_inputs_to_c2_format(
inputs, self.size_divisibility, self.device
)
return (data, im_info)
@staticmethod
def get_params(cfg, model):
from caffe2.proto import caffe2_pb2
from detectron2.export.shared import get_pb_arg_vali, get_pb_arg_vals
fake_predict_net = caffe2_pb2.NetDef()
model.encode_additional_info(fake_predict_net, None)
size_divisibility = get_pb_arg_vali(fake_predict_net, "size_divisibility", 0)
device = get_pb_arg_vals(fake_predict_net, "device", b"cpu").decode("ascii")
return {
"size_divisibility": size_divisibility,
"device": device,
}
class D2Caffe2MetaArchPostprocessFunc(object):
def __init__(self, external_input, external_output, encoded_info):
self.external_input = external_input
self.external_output = external_output
self.encoded_info = encoded_info
def __call__(self, inputs, tensor_inputs, tensor_outputs):
from caffe2.proto import caffe2_pb2
from detectron2.export.caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP
from detectron2.export.shared import get_pb_arg_vals
encoded_info = self.encoded_info.encode("ascii")
fake_predict_net = caffe2_pb2.NetDef().FromString(encoded_info)
meta_architecture = get_pb_arg_vals(fake_predict_net, "meta_architecture", None)
meta_architecture = meta_architecture.decode("ascii")
model_class = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_architecture]
convert_outputs = model_class.get_outputs_converter(fake_predict_net, None)
c2_inputs = tensor_inputs
c2_results = dict(zip(self.external_output, tensor_outputs))
return convert_outputs(inputs, c2_inputs, c2_results)
@staticmethod
def get_params(cfg, model):
from caffe2.proto import caffe2_pb2
# NOTE: the post processing has different values for different meta
# architectures, here simply relying Caffe2 meta architecture to encode info
# into a NetDef and storing it as whole.
fake_predict_net = caffe2_pb2.NetDef()
model.encode_additional_info(fake_predict_net, None)
encoded_info = fake_predict_net.SerializeToString().decode("ascii")
# HACK: Caffe2MetaArch's post processing requires the blob name of model output,
# this information is missed for torchscript. There's no easy way to know this
# unless using NamedTuple for tracing.
external_input = ["data", "im_info"]
if cfg.MODEL.META_ARCHITECTURE == "GeneralizedRCNN":
external_output = ["bbox_nms", "score_nms", "class_nms"]
if cfg.MODEL.MASK_ON:
external_output.extend(["mask_fcn_probs"])
if cfg.MODEL.KEYPOINT_ON:
if cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT:
external_output.extend(["keypoints_out"])
else:
external_output.extend(["kps_score"])
else:
raise NotImplementedError("")
return {
"external_input": external_input,
"external_output": external_output,
"encoded_info": encoded_info,
}
class D2RCNNInferenceWrapper(nn.Module):
def __init__(
self,
model,
do_postprocess=False,
):
super().__init__()
self.model = model
self.do_postprocess = do_postprocess
def forward(self, image):
"""
This function describes what happends during the tracing. Note that the output
contains non-tensor, therefore the TracingAdaptedTorchscriptExport must be used in
order to convert the output back from flattened tensors.
"""
if self.do_postprocess:
inputs = [
{
"image": image,
# NOTE: the width/height is not available since the model takes a
# single image tensor as input. Therefore even though post-process
# is specified, the wrapped model doesn't resize the output to its
# original width/height.
# TODO: If this is needed, we might make the model take extra
# width/height info like the C2-style inputs.
}
]
return self.model.forward(inputs)[0]["instances"]
else:
inputs = [{"image": image}]
return self.model.inference(inputs, do_postprocess=False)[0]
@staticmethod
class Preprocess(object):
"""
This function describes how to covert orginal input (from the data loader)
to the inputs used during the tracing (i.e. the inputs of forward function).
"""
def __call__(self, batch):
assert len(batch) == 1, "only support single batch"
return batch[0]["image"]
class Postprocess(object):
def __init__(self, detector_postprocess_done_in_model=False):
"""
Args:
detector_postprocess_done_in_model (bool): whether `detector_postprocess`
has already applied in the D2RCNNInferenceWrapper
"""
self.detector_postprocess_done_in_model = detector_postprocess_done_in_model
def __call__(self, batch, inputs, outputs):
"""
This function describes how to run the predictor using exported model. Note
that `tracing_adapter_wrapper` runs the traced model under the hood and
behaves exactly the same as the forward function.
"""
assert len(batch) == 1, "only support single batch"
width, height = batch[0]["width"], batch[0]["height"]
if self.detector_postprocess_done_in_model:
image_shape = batch[0]["image"].shape # chw
if image_shape[1] != height or image_shape[2] != width:
raise NotImplementedError(
f"Image tensor (shape: {image_shape}) doesn't match the"
f" input width ({width}) height ({height}). Since post-process"
f" has been done inside the torchscript without width/height"
f" information, can't recover the post-processed output to "
f"orignail resolution."
)
return [{"instances": outputs}]
else:
r = detector_postprocess(outputs, height, width)
return [{"instances": r}]
# TODO: model.to(device) might not work for detection meta-arch, this function is the
# workaround, in general, we might need a meta-arch API for this if needed.
def _cast_detection_model(model, device):
# check model is an instance of one of the meta arch
from detectron2.export.caffe2_modeling import Caffe2MetaArch
if isinstance(model, Caffe2MetaArch):
model._wrapped_model = _cast_detection_model(model._wrapped_model, device)
return model
assert isinstance(model, tuple(META_ARCH_REGISTRY._obj_map.values()))
model.to(device)
# cast normalizer separately
if hasattr(model, "normalizer") and not (
hasattr(model, "pixel_mean") and hasattr(model, "pixel_std")
):
pixel_mean = inspect.getclosurevars(model.normalizer).nonlocals["pixel_mean"]
pixel_std = inspect.getclosurevars(model.normalizer).nonlocals["pixel_std"]
pixel_mean = pixel_mean.to(device)
pixel_std = pixel_std.to(device)
model.normalizer = lambda x: (x - pixel_mean) / pixel_std
return model
def _update_export_config_with_extra_files(export_config, extra_files):
export_config_dict = export_config._asdict()
if export_config_dict["model_export_kwargs"] is None:
export_config_dict["model_export_kwargs"] = {}
export_config_dict["model_export_kwargs"]["_extra_files"] = extra_files
return PredictorExportConfig(**export_config_dict)
@RCNN_PREPARE_FOR_EXPORT_REGISTRY.register()
def prepare_for_export_with_inference_config(
self, cfg: CfgNode, inputs: Optional[Tuple[Any]], predictor_type: str
) -> PredictorExportConfig:
"""
For certain tasks, the exported model needs to encode config as part of the extra
files.
"""
export_config = default_rcnn_prepare_for_export(self, cfg, inputs, predictor_type)
# Add "inference_config.json" for the _extra_files as part of model_export_kwargs
extra_files = {"inference_config.json": json.dumps(flatten_config_dict(cfg))}
return _update_export_config_with_extra_files(export_config, extra_files)
| d2go-main | d2go/modeling/meta_arch/rcnn.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import PanopticFPN as _PanopticFPN
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class PanopticFPN(_PanopticFPN):
def prepare_for_export(self, cfg, inputs, predictor_type):
raise NotImplementedError
| d2go-main | d2go/modeling/meta_arch/panoptic_fpn.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.modeling.meta_arch import ( # noqa
fcos as _fcos,
panoptic_fpn as _panoptic_fpn,
rcnn as _rcnn,
retinanet as _retinanet,
semantic_seg as _semantic_seg,
)
# @fb-only: from d2go.modeling.meta_arch import fb as _fb # isort:skip # noqa
| d2go-main | d2go/modeling/meta_arch/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import torch.nn as nn
from d2go.config import CfgNode as CN
from d2go.export.api import PredictorExportConfig
from d2go.modeling.meta_arch.rcnn import D2RCNNInferenceWrapper
from d2go.quantization.qconfig import set_backend_and_create_qconfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.config import configurable
from detectron2.layers.batch_norm import CycleBatchNormList
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.backbone.fpn import FPN
from detectron2.modeling.meta_arch.fcos import FCOS as d2_FCOS, FCOSHead
from detectron2.utils.registry import Registry
from mobile_cv.arch.utils import fuse_utils
from mobile_cv.arch.utils.quantize_utils import (
wrap_non_quant_group_norm,
wrap_quant_subclass,
)
from mobile_cv.predictor.api import FuncInfo
logger = logging.getLogger(__name__)
# Registry to store custom export logic
FCOS_PREPARE_FOR_EXPORT_REGISTRY = Registry("FCOS_PREPARE_FOR_EXPORT")
class FCOSInferenceWrapper(nn.Module):
def __init__(
self,
model,
):
super().__init__()
self.model = model
def forward(self, image):
inputs = [{"image": image}]
return self.model.forward(inputs)[0]["instances"]
def add_fcos_configs(cfg):
cfg.MODEL.FCOS = CN()
# the number of foreground classes.
cfg.MODEL.FCOS.NUM_CLASSES = 80
cfg.MODEL.FCOS.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
cfg.MODEL.FCOS.NUM_CONVS = 4
cfg.MODEL.FCOS.HEAD_NORM = "GN"
# inference parameters
cfg.MODEL.FCOS.SCORE_THRESH_TEST = 0.04
cfg.MODEL.FCOS.TOPK_CANDIDATES_TEST = 1000
cfg.MODEL.FCOS.NMS_THRESH_TEST = 0.6
# Focal loss parameters
cfg.MODEL.FCOS.FOCAL_LOSS_ALPHA = 0.25
cfg.MODEL.FCOS.FOCAL_LOSS_GAMMA = 2.0
# Export method
cfg.FCOS_PREPARE_FOR_EXPORT = "default_fcos_prepare_for_export"
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class FCOS(d2_FCOS):
"""
Implement config->argument translation for FCOS model.
"""
@configurable
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
backbone_shape = backbone.output_shape()
try:
feature_shapes = [backbone_shape[f] for f in cfg.MODEL.FCOS.IN_FEATURES]
except KeyError:
raise KeyError(
f"Available keys: {backbone_shape.keys()}. Requested keys: {cfg.MODEL.FCOS.IN_FEATURES}"
)
head = FCOSHead(
input_shape=feature_shapes,
num_classes=cfg.MODEL.FCOS.NUM_CLASSES,
conv_dims=[feature_shapes[0].channels] * cfg.MODEL.FCOS.NUM_CONVS,
norm=cfg.MODEL.FCOS.HEAD_NORM,
)
return {
"backbone": backbone,
"head": head,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
"num_classes": cfg.MODEL.FCOS.NUM_CLASSES,
"head_in_features": cfg.MODEL.FCOS.IN_FEATURES,
# Loss parameters:
"focal_loss_alpha": cfg.MODEL.FCOS.FOCAL_LOSS_ALPHA,
"focal_loss_gamma": cfg.MODEL.FCOS.FOCAL_LOSS_GAMMA,
# Inference parameters:
"test_score_thresh": cfg.MODEL.FCOS.SCORE_THRESH_TEST,
"test_topk_candidates": cfg.MODEL.FCOS.TOPK_CANDIDATES_TEST,
"test_nms_thresh": cfg.MODEL.FCOS.NMS_THRESH_TEST,
"max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE,
}
# HACK: default FCOS export shares the same prepare functions w/ RCNN under certain constrains
def prepare_for_export(self, cfg, *args, **kwargs):
func = FCOS_PREPARE_FOR_EXPORT_REGISTRY.get(cfg.FCOS_PREPARE_FOR_EXPORT)
return func(self, cfg, *args, **kwargs)
def prepare_for_quant(self, cfg, *args, **kwargs):
"""Wrap each quantized part of the model to insert Quant and DeQuant in-place"""
model = self
qconfig = set_backend_and_create_qconfig(
cfg, is_train=cfg.QUANTIZATION.QAT.ENABLED
)
logger.info("Setup the model with qconfig:\n{}".format(qconfig))
model.backbone.qconfig = qconfig
model.head.qconfig = qconfig
# Wrap the backbone based on the architecture type
if isinstance(model.backbone, FPN):
# Same trick in RCNN's _apply_eager_mode_quant
model.backbone.bottom_up = wrap_quant_subclass(
model.backbone.bottom_up,
n_inputs=1,
n_outputs=len(model.backbone.bottom_up._out_features),
)
else:
model.backbone = wrap_quant_subclass(
model.backbone, n_inputs=1, n_outputs=len(model.backbone._out_features)
)
def unpack_cyclebatchnormlist(module):
# HACK: This function flattens CycleBatchNormList for quantization purpose
if isinstance(module, CycleBatchNormList):
if len(module) > 1:
# TODO: add quantization support of CycleBatchNormList
raise NotImplementedError(
"CycleBatchNormList w/ more than one element cannot be quantized"
)
else:
num_channel = module.weight.size(0)
new_module = nn.BatchNorm2d(num_channel, affine=True)
new_module.weight = module.weight
new_module.bias = module.bias
new_module.running_mean = module[0].running_mean
new_module.running_var = module[0].running_var
module = new_module
else:
for name, child in module.named_children():
new_child = unpack_cyclebatchnormlist(child)
if new_child is not child:
module.add_module(name, new_child)
return module
model.head = unpack_cyclebatchnormlist(model.head)
# Wrap the FCOS head
model.head = wrap_quant_subclass(
model.head,
n_inputs=len(cfg.MODEL.FCOS.IN_FEATURES),
n_outputs=len(cfg.MODEL.FCOS.IN_FEATURES) * 3,
)
model = fuse_utils.fuse_model(
model,
is_qat=cfg.QUANTIZATION.QAT.ENABLED,
inplace=True,
)
model = wrap_non_quant_group_norm(model)
return model
@FCOS_PREPARE_FOR_EXPORT_REGISTRY.register()
def default_fcos_prepare_for_export(self, cfg, inputs, predictor_type):
pytorch_model = self
preprocess_info = FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Preprocess, params={}
)
preprocess_func = preprocess_info.instantiate()
return PredictorExportConfig(
model=FCOSInferenceWrapper(pytorch_model),
data_generator=lambda x: (preprocess_func(x),),
model_export_method=predictor_type,
preprocess_info=preprocess_info,
postprocess_info=FuncInfo.gen_func_info(
D2RCNNInferenceWrapper.Postprocess,
params={"detector_postprocess_done_in_model": True},
),
)
| d2go-main | d2go/modeling/meta_arch/fcos.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import RetinaNet as _RetinaNet
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class RetinaNet(_RetinaNet):
def prepare_for_export(self, cfg, inputs, predictor_type):
raise NotImplementedError
| d2go-main | d2go/modeling/meta_arch/retinanet.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List
import torch
import torch.nn as nn
from d2go.export.api import PredictorExportConfig
from d2go.registry.builtin import META_ARCH_REGISTRY
from detectron2.modeling import SemanticSegmentor as _SemanticSegmentor
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import ImageList
from mobile_cv.predictor.api import FuncInfo
# Re-register D2's meta-arch in D2Go with updated APIs
@META_ARCH_REGISTRY.register()
class SemanticSegmentor(_SemanticSegmentor):
def prepare_for_export(self, cfg, inputs, predictor_type):
preprocess_info = FuncInfo.gen_func_info(
PreprocessFunc,
params={
"size_divisibility": self.backbone.size_divisibility,
"device": str(self.device),
},
)
postprocess_info = FuncInfo.gen_func_info(
PostprocessFunc,
params={},
)
preprocess_func = preprocess_info.instantiate()
return PredictorExportConfig(
model=ModelWrapper(self),
data_generator=lambda x: (preprocess_func(x),),
preprocess_info=preprocess_info,
postprocess_info=postprocess_info,
)
class ModelWrapper(nn.Module):
def __init__(self, segmentor):
super().__init__()
self.segmentor = segmentor
def forward(self, x):
x = (x - self.segmentor.pixel_mean) / self.segmentor.pixel_std
features = self.segmentor.backbone(x)
results, losses = self.segmentor.sem_seg_head(features, targets=None)
return results
class PreprocessFunc(object):
"""
A common preprocessing module for semantic segmentation models.
"""
def __init__(self, size_divisibility, device):
self.size_divisibility = size_divisibility
self.device = device
def __call__(self, batched_inputs: List[Dict[str, Any]]) -> torch.Tensor:
"""
Retreive image tensor from dataloader batches.
Args:
batched_inputs: (List[Dict[str, Tensor]]): output from a
D2Go train or test data loader.
Returns:
input images (torch.Tensor): ImageList-wrapped NCHW tensor
(i.e. with padding and divisibility alignment) of batches' images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = ImageList.from_tensors(images, self.size_divisibility)
return images.tensor
class PostprocessFunc(object):
"""
A common postprocessing module for semantic segmentation models.
"""
def __call__(
self,
batched_inputs: List[Dict[str, Any]],
tensor_inputs: torch.Tensor,
tensor_outputs: torch.Tensor,
) -> List[Dict[str, Any]]:
"""
Rescales sem_seg logits to original image input resolution,
and packages the logits into D2Go's expected output format.
Args:
inputs (List[Dict[str, Tensor]]): batched inputs from the dataloader.
tensor_inputs (Tensor): tensorized inputs, e.g. from `PreprocessFunc`.
tensor_outputs (Tensor): sem seg logits tensor from the model to process.
Returns:
processed_results (List[Dict]): List of D2Go output dicts ready to be used
downstream in an Evaluator, for export, etc.
"""
results = tensor_outputs # nchw
processed_results = []
for result, input_per_image in zip(results, batched_inputs):
height = input_per_image.get("height")
width = input_per_image.get("width")
image_tensor_shape = input_per_image["image"].shape
image_size = (image_tensor_shape[1], image_tensor_shape[2])
# D2's sem_seg_postprocess rescales sem seg masks to the
# provided original input resolution.
r = sem_seg_postprocess(result, image_size, height, width)
processed_results.append({"sem_seg": r})
return processed_results
| d2go-main | d2go/modeling/meta_arch/semantic_seg.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
from d2go.modeling.modeldef.fbnet_modeldef_registry import FBNetV2ModelArch
from d2go.registry.bootstrap import lazy_on_bootstrap
from mobile_cv.arch.fbnet_v2.modeldef_utils import _ex, e1, e1p, e2, e3, e4, e6
def _mutated_tuple(tp, pos, value):
tp_list = list(tp)
tp_list[pos] = value
return tuple(tp_list)
@lazy_on_bootstrap
def _repeat_last(stage, n=None):
"""
Repeat the last "layer" of given stage, i.e. a (op_type, c, s, n_repeat, ...)
tuple, reset n_repeat if specified otherwise kept the original value.
"""
assert isinstance(stage, list)
assert all(isinstance(x, tuple) for x in stage)
last_layer = copy.deepcopy(stage[-1])
if n is not None:
last_layer = _mutated_tuple(last_layer, 3, n)
return last_layer
_BASIC_ARGS = {
# skil norm and activation for depthwise conv in IRF module, this make the
# model easier to quantize.
"dw_skip_bnrelu": True,
# uncomment below (always_pw and bias) to match model definition of the
# FBNetV1 builder.
# "always_pw": True,
# "bias": False,
# temporarily disable zero_last_bn_gamma
"zero_last_bn_gamma": False,
}
DEFAULT_STAGES = [
# NOTE: each stage is a list of (op_type, out_channels, stride, n_repeat, ...)
# resolution stage 0, equivalent to 224->112
[("conv_k3", 32, 2, 1), ("ir_k3", 16, 1, 1, e1)],
# resolution stage 1, equivalent to 112->56
[("ir_k3", 24, 2, 2, e6)],
# resolution stage 2, equivalent to 56->28
[("ir_k3", 32, 2, 3, e6)],
# resolution stage 3, equivalent to 28->14
[("ir_k3", 64, 2, 4, e6), ("ir_k3", 96, 1, 3, e6)],
# resolution stage 4, equivalent to 14->7
[("ir_k3", 160, 2, 3, e6), ("ir_k3", 320, 1, 1, e6)],
# final stage, equivalent to 7->1, ignored
]
IRF_CFG = {"less_se_channels": False}
FBNetV3_A_dsmask = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 1, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 32, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 32, 1, 1, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 40, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 40, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 72, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 72, 1, 3, {"expansion": 3}, IRF_CFG),
("ir_k5", 112, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 112, 1, 3, {"expansion": 4}, IRF_CFG),
],
[
("ir_k5", 184, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 184, 1, 4, {"expansion": 4}, IRF_CFG),
("ir_k5", 200, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_A_dsmask_tiny = [
[("conv_k3", 8, 2, 1), ("ir_k3", 8, 1, 1, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 16, 2, 1, {"expansion": 3}, IRF_CFG),
("ir_k5", 16, 1, 1, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 24, 1, 2, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 40, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 40, 1, 2, {"expansion": 3}, IRF_CFG),
("ir_k5", 64, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 64, 1, 2, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 92, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 92, 1, 2, {"expansion": 4}, IRF_CFG),
("ir_k5", 92, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_A = [
# FBNetV3 arch without hs
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5_se", 32, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3_se", 32, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 64, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 64, 1, 3, {"expansion": 3}, IRF_CFG),
("ir_k5_se", 112, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 112, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5_se", 184, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3_se", 184, 1, 4, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 200, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_B = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 3, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 40, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 40, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 72, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 72, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 120, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 120, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 184, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 184, 1, 5, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 224, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_C = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 24, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5_se", 48, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 48, 1, 4, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 88, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 88, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 120, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 120, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5_se", 216, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 216, 1, 5, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 216, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_D = [
[("conv_k3", 24, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k3", 24, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 24, 1, 5, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 40, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3_se", 40, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3", 72, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 72, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 128, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 128, 1, 6, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 208, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 208, 1, 5, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 240, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_E = [
[("conv_k3", 24, 2, 1), ("ir_k3", 16, 1, 3, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 4, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 48, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 48, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 80, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 80, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 128, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 128, 1, 7, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 216, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 216, 1, 5, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 240, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_F = [
[("conv_k3", 24, 2, 1), ("ir_k3", 24, 1, 3, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 32, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 32, 1, 4, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 56, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 56, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 88, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 88, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 144, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 144, 1, 8, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 248, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 248, 1, 6, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 272, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_G = [
[("conv_k3", 32, 2, 1), ("ir_k3", 24, 1, 3, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 40, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 40, 1, 4, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 56, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 56, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 104, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 104, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 160, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 160, 1, 8, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 264, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 264, 1, 6, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 288, 1, 2, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_H = [
[("conv_k3", 48, 2, 1), ("ir_k3", 32, 1, 4, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 64, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 64, 1, 6, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5_se", 80, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5_se", 80, 1, 6, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 160, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 160, 1, 6, {"expansion": 3}, IRF_CFG),
("ir_k3_se", 240, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 240, 1, 12, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3_se", 400, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5_se", 400, 1, 8, {"expansion": 5}, IRF_CFG),
("ir_k5_se", 480, 1, 3, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_A_no_se = [
# FBNetV3 without hs and SE (SE is not quantization friendly)
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 32, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 32, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 64, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 64, 1, 3, {"expansion": 3}, IRF_CFG),
("ir_k5", 112, 1, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 112, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 184, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k3", 184, 1, 4, {"expansion": 4}, IRF_CFG),
("ir_k5", 200, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
FBNetV3_B_no_se = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 3, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 40, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5", 40, 1, 4, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 72, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 72, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3", 120, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5", 120, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3", 184, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5", 184, 1, 5, {"expansion": 4}, IRF_CFG),
("ir_k5", 224, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
# FBNetV3_B model, a lighter version for real-time inference
FBNetV3_B_light_no_se = [
[("conv_k3", 16, 2, 1), ("ir_k3", 16, 1, 2, {"expansion": 1}, IRF_CFG)],
[
("ir_k5", 24, 2, 1, {"expansion": 4}, IRF_CFG),
("ir_k5", 24, 1, 2, {"expansion": 2}, IRF_CFG),
],
[
("ir_k5", 40, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k5", 40, 1, 3, {"expansion": 3}, IRF_CFG),
],
[
("ir_k5", 72, 2, 1, {"expansion": 5}, IRF_CFG),
("ir_k3", 72, 1, 4, {"expansion": 3}, IRF_CFG),
("ir_k3", 120, 1, 1, {"expansion": 5}, IRF_CFG),
("ir_k5", 120, 1, 5, {"expansion": 3}, IRF_CFG),
],
[
("ir_k3", 184, 2, 1, {"expansion": 6}, IRF_CFG),
("ir_k5", 184, 1, 5, {"expansion": 4}, IRF_CFG),
("ir_k5", 224, 1, 1, {"expansion": 6}, IRF_CFG),
],
]
LARGE_BOX_HEAD_STAGES = [
[("ir_k3", 160, 2, 1, e4), ("ir_k3", 160, 1, 2, e6), ("ir_k3", 240, 1, 1, e6)],
]
SMALL_BOX_HEAD_STAGES = [
[("ir_k3", 128, 2, 1, e4), ("ir_k3", 128, 1, 2, e6), ("ir_k3", 160, 1, 1, e6)],
]
TINY_BOX_HEAD_STAGES = [
[("ir_k3", 64, 2, 1, e4), ("ir_k3", 64, 1, 2, e4), ("ir_k3", 80, 1, 1, e4)],
]
LARGE_UPSAMPLE_HEAD_STAGES = [
[("ir_k3", 160, 1, 1, e4), ("ir_k3", 160, 1, 3, e6), ("ir_k3", 80, -2, 1, e3)],
]
LARGE_UPSAMPLE_HEAD_D21_STAGES = [
[("ir_k3", 192, 1, 1, e4), ("ir_k3", 192, 1, 5, e3), ("ir_k3", 96, -2, 1, e3)],
]
SMALL_UPSAMPLE_HEAD_STAGES = [
[("ir_k3", 128, 1, 1, e4), ("ir_k3", 128, 1, 3, e6), ("ir_k3", 64, -2, 1, e3)],
]
# NOTE: Compared with SMALL_UPSAMPLE_HEAD_STAGES, this does one more down-sample
# in the first "layer" and then up-sample twice
SMALL_DS_UPSAMPLE_HEAD_STAGES = [
[
("ir_k3", 128, 2, 1, e4),
("ir_k3", 128, 1, 2, e6),
("ir_k3", 128, -2, 1, e6),
("ir_k3", 64, -2, 1, e3),
], # noqa
]
TINY_DS_UPSAMPLE_HEAD_STAGES = [
[
("ir_k3", 64, 2, 1, e4),
("ir_k3", 64, 1, 2, e4),
("ir_k3", 64, -2, 1, e4),
("ir_k3", 40, -2, 1, e3),
], # noqa
]
FPN_UPSAMPLE_HEAD_STAGES = [
[("ir_k3", 96, 1, 1, e6), ("ir_k3", 160, 1, 3, e6), ("ir_k3", 80, -2, 1, e3)],
]
MODEL_ARCH_BUILTIN = {
"default": {
"trunk": DEFAULT_STAGES[0:4],
"rpn": [[_repeat_last(DEFAULT_STAGES[3])]],
"bbox": LARGE_BOX_HEAD_STAGES,
"mask": LARGE_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"default_dsmask": {
"trunk": DEFAULT_STAGES[0:4],
"rpn": [[_repeat_last(DEFAULT_STAGES[3])]],
"bbox": SMALL_BOX_HEAD_STAGES,
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"kpts": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A": {
"trunk": FBNetV3_A[0:4],
"rpn": [[_repeat_last(FBNetV3_A[3])]],
"bbox": [FBNetV3_A[4]],
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A_C5": {
"trunk": FBNetV3_A[0:5],
"rpn": [[_repeat_last(FBNetV3_A[3])]],
"bbox": [FBNetV3_A[4]],
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_B": {
"trunk": FBNetV3_B[0:4],
"rpn": [[_repeat_last(FBNetV3_B[3])]],
"bbox": [FBNetV3_B[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_B_C5": {
"trunk": FBNetV3_B[0:5],
"rpn": [[_repeat_last(FBNetV3_B[3])]],
"bbox": [FBNetV3_B[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_C": {
"trunk": FBNetV3_C[0:4],
"rpn": [[_repeat_last(FBNetV3_C[3])]],
"bbox": [FBNetV3_C[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_C_C5": {
"trunk": FBNetV3_C[0:5],
"rpn": [[_repeat_last(FBNetV3_C[3])]],
"bbox": [FBNetV3_C[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_D": {
"trunk": FBNetV3_D[0:4],
"rpn": [[_repeat_last(FBNetV3_D[3])]],
"bbox": [FBNetV3_D[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_E": {
"trunk": FBNetV3_E[0:4],
"rpn": [[_repeat_last(FBNetV3_E[3])]],
"bbox": [FBNetV3_E[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_F": {
"trunk": FBNetV3_F[0:4],
"rpn": [[_repeat_last(FBNetV3_F[3])]],
"bbox": [FBNetV3_F[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_F_C5": {
"trunk": FBNetV3_F[0:5],
"rpn": [[_repeat_last(FBNetV3_F[3])]],
"bbox": [FBNetV3_F[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_G": {
"trunk": FBNetV3_G[0:4],
"rpn": [[_repeat_last(FBNetV3_G[3])]],
"bbox": [FBNetV3_G[4]],
"mask": LARGE_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_D21_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_G_C5": {
"trunk": FBNetV3_G[0:5],
"rpn": [[_repeat_last(FBNetV3_G[3])]],
"bbox": [FBNetV3_G[4]],
"mask": LARGE_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_D21_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_H": {
"trunk": FBNetV3_H[0:4],
"rpn": [[_repeat_last(FBNetV3_H[3])]],
"bbox": [FBNetV3_H[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A_dsmask_C5": {
"trunk": FBNetV3_A_dsmask,
"rpn": [[_repeat_last(FBNetV3_A_dsmask[3])]],
"bbox": SMALL_BOX_HEAD_STAGES,
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"kpts": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A_dsmask": {
"trunk": FBNetV3_A_dsmask[0:4],
"rpn": [[_repeat_last(FBNetV3_A_dsmask[3])]],
"bbox": SMALL_BOX_HEAD_STAGES,
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"kpts": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_A_dsmask_tiny": {
"trunk": FBNetV3_A_dsmask_tiny[0:4],
"rpn": [[_repeat_last(FBNetV3_A_dsmask_tiny[3])]],
"bbox": TINY_BOX_HEAD_STAGES,
"mask": TINY_DS_UPSAMPLE_HEAD_STAGES,
"kpts": TINY_DS_UPSAMPLE_HEAD_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_B_light_large": {
"trunk": FBNetV3_B_light_no_se[0:4],
"rpn": [[_repeat_last(FBNetV3_B_light_no_se[3])]],
"bbox": SMALL_BOX_HEAD_STAGES,
"mask": SMALL_DS_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_D21_STAGES,
"basic_args": _BASIC_ARGS,
},
"FBNetV3_B_light_no_se_C5": {
"trunk": FBNetV3_B_light_no_se[0:5],
"rpn": [[_repeat_last(FBNetV3_B_light_no_se[3])]],
"bbox": [FBNetV3_B_light_no_se[4]],
"basic_args": _BASIC_ARGS,
},
"FBNetV3_G_fpn": {
"trunk": FBNetV3_G[0:5], # FPN uses all 5 stages
"rpn": [[_repeat_last(FBNetV3_G[3], n=1)]],
"bbox": [FBNetV3_G[4]],
"mask": FPN_UPSAMPLE_HEAD_STAGES,
"kpts": LARGE_UPSAMPLE_HEAD_D21_STAGES,
"basic_args": _BASIC_ARGS,
},
}
FBNetV2ModelArch.add_archs(MODEL_ARCH_BUILTIN)
| d2go-main | d2go/modeling/modeldef/modeldef.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This is the centralized place to define modeldef for all projects under D2Go.
"""
# Populating registreis
from d2go.modeling.modeldef import modeldef as _modeldef # noqa
# @fb-only: from d2go.modeling.modeldef import fb as _fb # isort:skip # noqa
| d2go-main | d2go/modeling/modeldef/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
class FBNetV2ModelArch(object):
_MODEL_ARCH = {}
@staticmethod
def add(name, arch):
assert (
name not in FBNetV2ModelArch._MODEL_ARCH
), "Arch name '{}' is already existed".format(name)
FBNetV2ModelArch._MODEL_ARCH[name] = arch
@staticmethod
def add_archs(archs):
for name, arch in archs.items():
FBNetV2ModelArch.add(name, arch)
@staticmethod
def get(name):
return copy.deepcopy(FBNetV2ModelArch._MODEL_ARCH[name])
| d2go-main | d2go/modeling/modeldef/fbnet_modeldef_registry.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.modeling.backbone import fbnet_v2 as _fbnet_v2 # noqa
# @fb-only: from d2go.modeling.backbone import fb as _fb # isort:skip # noqa
| d2go-main | d2go/modeling/backbone/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import itertools
import logging
from typing import List
import torch
import torch.nn as nn
from d2go.modeling.backbone.modules import (
KeypointRCNNConvUpsamplePredictorNoUpscale,
KeypointRCNNIRFPredictorNoUpscale,
KeypointRCNNPredictor,
KeypointRCNNPredictorNoUpscale,
MaskRCNNConv1x1Predictor,
RPNHeadConvRegressor,
)
from d2go.modeling.modeldef.fbnet_modeldef_registry import FBNetV2ModelArch
from detectron2.layers import ShapeSpec
from detectron2.modeling import (
Backbone,
BACKBONE_REGISTRY,
build_anchor_generator,
RPN_HEAD_REGISTRY,
)
from detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7
from detectron2.modeling.roi_heads import box_head, keypoint_head, mask_head
from detectron2.utils.logger import log_first_n
from mobile_cv.arch.fbnet_v2 import fbnet_builder as mbuilder
from mobile_cv.arch.utils.helper import format_dict_expanding_list_values
logger = logging.getLogger(__name__)
FBNET_BUILDER_IDENTIFIER = "fbnetv2"
def _get_builder_norm_args(cfg):
norm_name = cfg.MODEL.FBNET_V2.NORM
norm_args = {"name": norm_name}
assert all(isinstance(x, dict) for x in cfg.MODEL.FBNET_V2.NORM_ARGS)
for dic in cfg.MODEL.FBNET_V2.NORM_ARGS:
norm_args.update(dic)
return norm_args
def _merge_fbnetv2_arch_def(cfg):
arch_def = {}
assert all(
isinstance(x, dict) for x in cfg.MODEL.FBNET_V2.ARCH_DEF
), cfg.MODEL.FBNET_V2.ARCH_DEF
for dic in cfg.MODEL.FBNET_V2.ARCH_DEF:
arch_def.update(dic)
return arch_def
def _parse_arch_def(cfg):
arch = cfg.MODEL.FBNET_V2.ARCH
arch_def = cfg.MODEL.FBNET_V2.ARCH_DEF
assert (arch != "" and not arch_def) ^ (
not arch and arch_def != []
), "Only allow one unset node between MODEL.FBNET_V2.ARCH ({}) and MODEL.FBNET_V2.ARCH_DEF ({})".format(
arch, arch_def
)
arch_def = FBNetV2ModelArch.get(arch) if arch else _merge_fbnetv2_arch_def(cfg)
# NOTE: arch_def is a dictionary describing the CNN architecture for creating
# the detection model. It can describe a wide range of models including the
# original FBNet. Each key-value pair expresses either a sub part of the model
# like trunk or head, or stores other meta information.
message = 'Using un-unified arch_def for ARCH "{}" (without scaling):\n{}'.format(
arch, format_dict_expanding_list_values(arch_def)
)
log_first_n(logging.INFO, message, n=1, key="message")
return arch_def
def _get_fbnet_builder_and_arch_def(cfg):
arch_def = _parse_arch_def(cfg)
# NOTE: one can store extra information in arch_def to configurate FBNetBuilder,
# after this point, builder and arch_def will become independent.
basic_args = arch_def.pop("basic_args", {})
builder = mbuilder.FBNetBuilder(
width_ratio=cfg.MODEL.FBNET_V2.SCALE_FACTOR,
width_divisor=cfg.MODEL.FBNET_V2.WIDTH_DIVISOR,
bn_args=_get_builder_norm_args(cfg),
)
builder.add_basic_args(**basic_args)
return builder, arch_def
def _get_stride_per_stage(blocks):
"""
Count the accummulated stride per stage given a list of blocks. The mbuilder
provides API for counting per-block accumulated stride, this function leverages
it to count per-stage accumulated stride.
Input: a list of blocks from the unified arch_def. Note that the stage_idx
must be contiguous (not necessarily starting from 0), and can be
non-ascending (not tested).
Output: a list of accumulated stride per stage, starting from lowest stage_idx.
"""
stride_per_block = mbuilder.count_stride_each_block(blocks)
assert len(stride_per_block) == len(blocks)
stage_idx_set = {s["stage_idx"] for s in blocks}
# assume stage idx are contiguous, eg. 1, 2, 3, ...
assert max(stage_idx_set) - min(stage_idx_set) + 1 == len(stage_idx_set)
start_stage_id = min(stage_idx_set)
ids_per_stage = [
[i for i, s in enumerate(blocks) if s["stage_idx"] == stage_idx]
for stage_idx in range(start_stage_id, start_stage_id + len(stage_idx_set))
] # eg. [[0], [1, 2], [3, 4, 5, 6], ...]
block_stride_per_stage = [
[stride_per_block[i] for i in ids] for ids in ids_per_stage
] # eg. [[1], [2, 1], [2, 1, 1, 1], ...]
stride_per_stage = [
list(itertools.accumulate(s, lambda x, y: x * y))[-1]
for s in block_stride_per_stage
] # eg. [1, 2, 2, ...]
accum_stride_per_stage = list(
itertools.accumulate(stride_per_stage, lambda x, y: x * y)
) # eg. [first*1, first*2, first*4, ...]
assert accum_stride_per_stage[-1] == mbuilder.count_strides(blocks)
return accum_stride_per_stage
def fbnet_identifier_checker(func):
"""Can be used to decorate _load_from_state_dict"""
def wrapper(self, state_dict, prefix, *args, **kwargs):
possible_keys = [k for k in state_dict.keys() if k.startswith(prefix)]
if not all(FBNET_BUILDER_IDENTIFIER in k for k in possible_keys):
logger.warning(
"Couldn't match FBNetV2 pattern given prefix {}, possible keys: \n{}".format(
prefix, "\n".join(possible_keys)
)
)
if any("xif" in k for k in possible_keys):
raise RuntimeError(
"Seems a FBNetV1 trained checkpoint is loaded by FBNetV2 model,"
" which is not supported. Please consider re-train your model"
" using the same setup as before (it will be FBNetV2). If you"
" need to run the old FBNetV1 models, those configs can be"
" still found, see D19477651 as example."
)
return func(self, state_dict, prefix, *args, **kwargs)
return wrapper
# pyre-fixme[11]: Annotation `Sequential` is not defined as a type.
class FBNetModule(nn.Sequential):
@fbnet_identifier_checker
def _load_from_state_dict(self, *args, **kwargs):
return super()._load_from_state_dict(*args, **kwargs)
def build_fbnet(cfg, name, in_channels):
"""
Create a FBNet module using FBNet V2 builder.
Args:
cfg (CfgNode): the config that contains MODEL.FBNET_V2.
name (str): the key in arch_def that represents a subpart of network
in_channels (int): input channel size
Returns:
nn.Sequential: the first return is a nn.Sequential, each element
corresponds a stage in arch_def.
List[ShapeSpec]: the second return is a list of ShapeSpec containing the
output channels and accumulated strides for that stage.
"""
builder, raw_arch_def = _get_fbnet_builder_and_arch_def(cfg)
# Reset the last_depth for this builder (might have been cached), this is
# the only mutable member variable.
builder.last_depth = in_channels
# NOTE: Each sub part of the model consists of several stages and each stage
# has several blocks. "Raw" arch_def (Dict[str, List[List[Tuple]]]) uses a
# list of stages to describe the architecture, which is more compact and
# thus written as builtin metadata (inside FBNetV2ModelArch) or config
# (MODEL.FBNET_V2.ARCH_DEF). "Unified" arch_def (Dict[str, List[Dict]])
# uses a list blocks from all stages instead, which is recognized by builder.
arch_def = mbuilder.unify_arch_def(raw_arch_def, [name])
arch_def = {name: arch_def[name]}
logger.info(
"Build FBNet using unified arch_def:\n{}".format(
format_dict_expanding_list_values(arch_def)
)
)
arch_def_blocks = arch_def[name]
stages = []
trunk_stride_per_stage = _get_stride_per_stage(arch_def_blocks)
shape_spec_per_stage = []
for i, stride_i in enumerate(trunk_stride_per_stage):
stages.append(
builder.build_blocks(
arch_def_blocks,
stage_indices=[i],
prefix_name=FBNET_BUILDER_IDENTIFIER + "_",
)
)
shape_spec_per_stage.append(
ShapeSpec(
channels=builder.last_depth,
stride=stride_i,
)
)
return FBNetModule(*stages), shape_spec_per_stage
class FBNetV2Backbone(Backbone):
"""
Backbone (bottom-up) for FBNet.
Hierarchy:
trunk0:
xif0_0
xif0_1
...
trunk1:
xif1_0
xif1_1
...
...
Output features:
The outputs from each "stage", i.e. trunkX.
"""
def __init__(self, cfg):
super(FBNetV2Backbone, self).__init__()
stages, shape_specs = build_fbnet(
cfg, name="trunk", in_channels=cfg.MODEL.FBNET_V2.STEM_IN_CHANNELS
)
self._trunk_stage_names = []
self._trunk_stages = []
self._out_feature_channels = {}
self._out_feature_strides = {}
for i, (stage, shape_spec) in enumerate(zip(stages, shape_specs)):
name = "trunk{}".format(i)
self.add_module(name, stage)
self._trunk_stage_names.append(name)
self._trunk_stages.append(stage)
self._out_feature_channels[name] = shape_spec.channels
self._out_feature_strides[name] = shape_spec.stride
# returned features are the final output of each stage
self._out_features = self._trunk_stage_names
self._trunk_stage_names = tuple(self._trunk_stage_names)
def __prepare_scriptable__(self):
ret = copy.deepcopy(self)
ret._trunk_stages = nn.ModuleList(ret._trunk_stages)
for k in self._trunk_stage_names:
delattr(ret, k)
return ret
@fbnet_identifier_checker
def _load_from_state_dict(self, *args, **kwargs):
return super()._load_from_state_dict(*args, **kwargs)
# return features for each stage
def forward(self, x):
features = {}
for name, stage in zip(self._trunk_stage_names, self._trunk_stages):
x = stage(x)
features[name] = x
return features
class FBNetV2FPN(FPN):
"""
FPN module for FBNet.
"""
pass
def build_fbnet_backbone(cfg):
return FBNetV2Backbone(cfg)
@BACKBONE_REGISTRY.register()
class FBNetV2C4Backbone(Backbone):
def __init__(self, cfg, _):
super(FBNetV2C4Backbone, self).__init__()
self.body = build_fbnet_backbone(cfg)
self._out_features = self.body._out_features
self._out_feature_strides = self.body._out_feature_strides
self._out_feature_channels = self.body._out_feature_channels
def forward(self, x):
return self.body(x)
@BACKBONE_REGISTRY.register()
def FBNetV2FpnBackbone(cfg, _):
backbone = FBNetV2FPN(
bottom_up=build_fbnet_backbone(cfg),
in_features=cfg.MODEL.FPN.IN_FEATURES,
out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
)
return backbone
@BACKBONE_REGISTRY.register()
def FBNetV2RetinaNetBackbone(cfg, _):
bottom_up = build_fbnet_backbone(cfg)
in_channels_p6p7 = bottom_up.output_shape()[cfg.MODEL.FPN.IN_FEATURES[-1]].channels
top_block = LastLevelP6P7(in_channels_p6p7, cfg.MODEL.FPN.OUT_CHANNELS)
top_block.in_feature = cfg.MODEL.FPN.IN_FEATURES[-1]
backbone = FBNetV2FPN(
bottom_up=bottom_up,
in_features=cfg.MODEL.FPN.IN_FEATURES,
out_channels=cfg.MODEL.FPN.OUT_CHANNELS,
norm=cfg.MODEL.FPN.NORM,
top_block=top_block,
)
return backbone
@RPN_HEAD_REGISTRY.register()
class FBNetV2RpnHead(nn.Module):
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super(FBNetV2RpnHead, self).__init__()
in_channels = [x.channels for x in input_shape]
assert len(set(in_channels)) == 1
in_channels = in_channels[0]
anchor_generator = build_anchor_generator(cfg, input_shape)
num_cell_anchors = anchor_generator.num_cell_anchors
box_dim = anchor_generator.box_dim
assert len(set(num_cell_anchors)) == 1
num_cell_anchors = num_cell_anchors[0]
self.rpn_feature, shape_specs = build_fbnet(
cfg, name="rpn", in_channels=in_channels
)
self.rpn_regressor = RPNHeadConvRegressor(
in_channels=shape_specs[-1].channels,
num_anchors=num_cell_anchors,
box_dim=box_dim,
)
def forward(self, x: List[torch.Tensor]):
x = [self.rpn_feature(y) for y in x]
return self.rpn_regressor(x)
@box_head.ROI_BOX_HEAD_REGISTRY.register()
class FBNetV2RoIBoxHead(nn.Module):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIBoxHead, self).__init__()
self.roi_box_conv, shape_specs = build_fbnet(
cfg, name="bbox", in_channels=input_shape.channels
)
self._out_channels = shape_specs[-1].channels
self.avgpool = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
x = self.roi_box_conv(x)
if len(x.shape) == 4 and (x.shape[2] > 1 or x.shape[3] > 1):
x = self.avgpool(x)
return x
@property
@torch.jit.unused
def output_shape(self):
return ShapeSpec(channels=self._out_channels)
@keypoint_head.ROI_KEYPOINT_HEAD_REGISTRY.register()
class FBNetV2RoIKeypointHead(keypoint_head.BaseKeypointRCNNHead):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIKeypointHead, self).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg, name="kpts", in_channels=input_shape.channels
)
self.predictor = KeypointRCNNPredictor(
in_channels=shape_specs[-1].channels,
num_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
@keypoint_head.ROI_KEYPOINT_HEAD_REGISTRY.register()
class FBNetV2RoIKeypointHeadKRCNNPredictorNoUpscale(keypoint_head.BaseKeypointRCNNHead):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIKeypointHeadKRCNNPredictorNoUpscale, self).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg,
name="kpts",
in_channels=input_shape.channels,
)
self.predictor = KeypointRCNNPredictorNoUpscale(
in_channels=shape_specs[-1].channels,
num_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
@keypoint_head.ROI_KEYPOINT_HEAD_REGISTRY.register()
class FBNetV2RoIKeypointHeadKPRCNNIRFPredictorNoUpscale(
keypoint_head.BaseKeypointRCNNHead,
):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIKeypointHeadKPRCNNIRFPredictorNoUpscale, self).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg,
name="kpts",
in_channels=input_shape.channels,
)
self.predictor = KeypointRCNNIRFPredictorNoUpscale(
cfg,
in_channels=shape_specs[-1].channels,
num_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
@keypoint_head.ROI_KEYPOINT_HEAD_REGISTRY.register()
class FBNetV2RoIKeypointHeadKPRCNNConvUpsamplePredictorNoUpscale(
keypoint_head.BaseKeypointRCNNHead,
):
def __init__(self, cfg, input_shape: ShapeSpec):
super(
FBNetV2RoIKeypointHeadKPRCNNConvUpsamplePredictorNoUpscale, self
).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg,
name="kpts",
in_channels=input_shape.channels,
)
self.predictor = KeypointRCNNConvUpsamplePredictorNoUpscale(
cfg,
in_channels=shape_specs[-1].channels,
num_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS,
)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
@mask_head.ROI_MASK_HEAD_REGISTRY.register()
class FBNetV2RoIMaskHead(mask_head.BaseMaskRCNNHead):
def __init__(self, cfg, input_shape: ShapeSpec):
super(FBNetV2RoIMaskHead, self).__init__(
cfg=cfg,
input_shape=input_shape,
)
self.feature_extractor, shape_specs = build_fbnet(
cfg,
name="mask",
in_channels=input_shape.channels,
)
num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.predictor = MaskRCNNConv1x1Predictor(shape_specs[-1].channels, num_classes)
def layers(self, x):
x = self.feature_extractor(x)
x = self.predictor(x)
return x
| d2go-main | d2go/modeling/backbone/fbnet_v2.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List
import torch
import torch.nn as nn
from detectron2 import layers
from detectron2.utils.tracing import is_fx_tracing
from mobile_cv.arch.fbnet_v2.irf_block import IRFBlock
class RPNHeadConvRegressor(nn.Module):
"""
A simple RPN Head for classification and bbox regression
"""
def __init__(self, in_channels, num_anchors, box_dim=4):
"""
Arguments:
in_channels (int): number of channels of the input feature
num_anchors (int): number of anchors to be predicted
box_dim (int): dimension of bbox
"""
super(RPNHeadConvRegressor, self).__init__()
self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)
self.bbox_pred = nn.Conv2d(
in_channels, num_anchors * box_dim, kernel_size=1, stride=1
)
for l in [self.cls_logits, self.bbox_pred]:
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
def forward(self, x: List[torch.Tensor]):
if not is_fx_tracing():
torch._assert(isinstance(x, (list, tuple)), "Unexpected data type")
logits = [self.cls_logits(y) for y in x]
bbox_reg = [self.bbox_pred(y) for y in x]
return logits, bbox_reg
class MaskRCNNConv1x1Predictor(nn.Module):
def __init__(self, in_channels, out_channels):
super(MaskRCNNConv1x1Predictor, self).__init__()
num_classes = out_channels
num_inputs = in_channels
self.mask_fcn_logits = nn.Conv2d(num_inputs, num_classes, 1, 1, 0)
for name, param in self.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
# Caffe2 implementation uses MSRAFill, which in fact
# corresponds to kaiming_normal_ in PyTorch
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
def forward(self, x):
return self.mask_fcn_logits(x)
class KeypointRCNNPredictor(nn.Module):
def __init__(self, in_channels, num_keypoints):
super(KeypointRCNNPredictor, self).__init__()
input_features = in_channels
deconv_kernel = 4
self.kps_score_lowres = nn.ConvTranspose2d(
input_features,
num_keypoints,
deconv_kernel,
stride=2,
padding=deconv_kernel // 2 - 1,
)
nn.init.kaiming_normal_(
self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu"
)
nn.init.constant_(self.kps_score_lowres.bias, 0)
self.up_scale = 2
self.out_channels = num_keypoints
def forward(self, x):
x = self.kps_score_lowres(x)
x = layers.interpolate(
x, scale_factor=self.up_scale, mode="bilinear", align_corners=False
)
return x
class KeypointRCNNPredictorNoUpscale(nn.Module):
def __init__(self, in_channels, num_keypoints):
super(KeypointRCNNPredictorNoUpscale, self).__init__()
input_features = in_channels
deconv_kernel = 4
self.kps_score_lowres = nn.ConvTranspose2d(
input_features,
num_keypoints,
deconv_kernel,
stride=2,
padding=deconv_kernel // 2 - 1,
)
nn.init.kaiming_normal_(
self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu"
)
nn.init.constant_(self.kps_score_lowres.bias, 0)
self.out_channels = num_keypoints
def forward(self, x):
x = self.kps_score_lowres(x)
return x
class KeypointRCNNIRFPredictorNoUpscale(nn.Module):
def __init__(self, cfg, in_channels, num_keypoints):
super(KeypointRCNNIRFPredictorNoUpscale, self).__init__()
input_features = in_channels
self.kps_score_lowres = IRFBlock(
input_features,
num_keypoints,
stride=-2,
expansion=3,
bn_args="none",
dw_skip_bnrelu=True,
)
self.out_channels = num_keypoints
def forward(self, x):
x = self.kps_score_lowres(x)
return x
class KeypointRCNNConvUpsamplePredictorNoUpscale(nn.Module):
def __init__(self, cfg, in_channels, num_keypoints):
super(KeypointRCNNConvUpsamplePredictorNoUpscale, self).__init__()
input_features = in_channels
self.kps_score_lowres = nn.Conv2d(
input_features,
num_keypoints,
kernel_size=3,
stride=1,
padding=1,
)
self.out_channels = num_keypoints
def forward(self, x):
x = layers.interpolate(x, scale_factor=(2, 2), mode="nearest")
x = self.kps_score_lowres(x)
return x
| d2go-main | d2go/modeling/backbone/modules.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.config import CfgNode as CN
def add_fbnet_default_configs(_C):
"""FBNet options and default values"""
_C.MODEL.FBNET = CN()
_C.MODEL.FBNET.ARCH = "default"
# custom arch
_C.MODEL.FBNET.ARCH_DEF = ""
_C.MODEL.FBNET.BN_TYPE = "bn"
_C.MODEL.FBNET.NUM_GROUPS = 32 # for gn usage only
_C.MODEL.FBNET.SCALE_FACTOR = 1.0
# the output channels will be divisible by WIDTH_DIVISOR
_C.MODEL.FBNET.WIDTH_DIVISOR = 1
_C.MODEL.FBNET.DW_CONV_SKIP_BN = True
_C.MODEL.FBNET.DW_CONV_SKIP_RELU = True
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.DET_HEAD_LAST_SCALE = 1.0
_C.MODEL.FBNET.DET_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.DET_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.KPTS_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.KPTS_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.KPTS_HEAD_STRIDE = 0
# > 0 scale, == 0 skip, < 0 same dimension
_C.MODEL.FBNET.MASK_HEAD_LAST_SCALE = 0.0
_C.MODEL.FBNET.MASK_HEAD_BLOCKS = []
# overwrite the stride for the head, 0 to use original value
_C.MODEL.FBNET.MASK_HEAD_STRIDE = 0
# 0 to use all blocks defined in arch_def
_C.MODEL.FBNET.RPN_HEAD_BLOCKS = 0
_C.MODEL.FBNET.RPN_BN_TYPE = ""
# number of channels input to trunk
_C.MODEL.FBNET.STEM_IN_CHANNELS = 3
def add_fbnet_v2_default_configs(_C):
_C.MODEL.FBNET_V2 = CN()
_C.MODEL.FBNET_V2.ARCH = "default"
_C.MODEL.FBNET_V2.ARCH_DEF = []
# number of channels input to trunk
_C.MODEL.FBNET_V2.STEM_IN_CHANNELS = 3
_C.MODEL.FBNET_V2.SCALE_FACTOR = 1.0
# the output channels will be divisible by WIDTH_DIVISOR
_C.MODEL.FBNET_V2.WIDTH_DIVISOR = 1
# normalization configs
# name of norm such as "bn", "sync_bn", "gn"
_C.MODEL.FBNET_V2.NORM = "bn"
# for advanced use case that requries extra arguments, passing a list of
# dict such as [{"num_groups": 8}, {"momentum": 0.1}] (merged in given order).
# Note that string written it in .yaml will be evaluated by yacs, thus this
# node will become normal python object.
# https://github.com/rbgirshick/yacs/blob/master/yacs/config.py#L410
_C.MODEL.FBNET_V2.NORM_ARGS = []
_C.MODEL.VT_FPN = CN()
_C.MODEL.VT_FPN.IN_FEATURES = ["res2", "res3", "res4", "res5"]
_C.MODEL.VT_FPN.OUT_CHANNELS = 256
_C.MODEL.VT_FPN.LAYERS = 3
_C.MODEL.VT_FPN.TOKEN_LS = [16, 16, 8, 8]
_C.MODEL.VT_FPN.TOKEN_C = 1024
_C.MODEL.VT_FPN.HEADS = 16
_C.MODEL.VT_FPN.MIN_GROUP_PLANES = 64
_C.MODEL.VT_FPN.NORM = "BN"
_C.MODEL.VT_FPN.POS_HWS = []
_C.MODEL.VT_FPN.POS_N_DOWNSAMPLE = []
def add_bifpn_default_configs(_C):
_C.MODEL.BIFPN = CN()
_C.MODEL.BIFPN.DEPTH_MULTIPLIER = 1
_C.MODEL.BIFPN.SCALE_FACTOR = 1
_C.MODEL.BIFPN.WIDTH_DIVISOR = 8
_C.MODEL.BIFPN.NORM = "bn"
_C.MODEL.BIFPN.NORM_ARGS = []
_C.MODEL.BIFPN.TOP_BLOCK_BEFORE_FPN = False
| d2go-main | d2go/modeling/backbone/fbnet_cfg.py |
from typing import Tuple
import torch
from d2go.quantization.learnable_qat import convert_to_learnable_qconfig
from mobile_cv.common.misc.registry import Registry
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION > (1, 10):
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
else:
from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
from mobile_cv.common.misc.oss_utils import fb_overwritable
QCONFIG_CREATOR_REGISTRY = Registry("QCONFIG_CREATOR_REGISTRY")
def set_backend_and_create_qconfig(cfg, *, is_train):
"""
Recommended function to create qconfig given D2Go's quantization config.
"""
# In case we need different implmentation, we can add a new key called
# QUANTIZATION.QCONFIG_CREATOR with "smart" as default value, and use this key
# to toggle between registries.
return QCONFIG_CREATOR_REGISTRY.get("smart")(cfg, is_train=is_train)
def holistic_get_qconfig(backend, is_qat, use_symmetric=False):
"""
Config-less vanilla way to create the QConfig, suitable for explicitly creating qconfig.
"""
if use_symmetric:
if not backend == "qnnpack":
raise ValueError(
f"Only qnnpack supports Symmetric quantization, given: {backend}"
)
if is_qat:
return torch.ao.quantization.default_symmetric_qnnpack_qat_qconfig
else:
return torch.ao.quantization.default_symmetric_qnnpack_qconfig
else:
if is_qat:
return torch.ao.quantization.get_default_qat_qconfig(backend)
else:
return torch.ao.quantization.get_default_qconfig(backend)
@QCONFIG_CREATOR_REGISTRY.register("smart")
def _smart_set_backend_and_create_qconfig(cfg, *, is_train):
"""
This is the default / "smart" way to create qconfig based on various of configs,
supports:
- learnable QAT
- set symmetric quantization via backend.
"""
backend, options = _smart_parse_extended_backend(cfg.QUANTIZATION.BACKEND)
is_symmetric = options["is_symmetric"]
# Set backend
torch.backends.quantized.engine = backend
qat_method = cfg.QUANTIZATION.QAT.FAKE_QUANT_METHOD
assert qat_method in ["default", "learnable"]
qconfig = holistic_get_qconfig(
backend=backend, is_qat=is_train, use_symmetric=is_symmetric
)
if is_train and qat_method == "learnable":
qconfig = convert_to_learnable_qconfig(qconfig)
return qconfig
def validate_native_backend(backend):
_PYTORCH_NATIVE_BACKENDS = ["fbgemm", "qnnpack"]
if backend not in _PYTORCH_NATIVE_BACKENDS:
raise ValueError(
f"Unrecognized backend: {backend}, PyTorch"
f" supported backends are: {_PYTORCH_NATIVE_BACKENDS}"
)
@fb_overwritable()
def _smart_parse_extended_backend(extended_backend):
"""
D2Go extends the definition of quantization "backend". In addition to PyTorch's
native backends (i.e. qnnpack and fbgemm), we allow other type of backend so users
can easily express different settings. Here are the supported cases:
1. Symmetric quantization: "qnnpack@symmetric" refers to using QNNPACK with
symmetric QConfig.
"""
backend = extended_backend
# default options
options = {
"is_symmetric": False,
}
if "@symmetric" in backend:
options["is_symmetric"] = True
backend = backend.replace("@symmetric", "", 1)
validate_native_backend(backend)
return backend, options
def smart_decode_backend(extended_backend):
"""
Since we extend the definition of quantization backend, user shouldn't directly use
cfg.QUANTIZATION.BACKEND under PyTorch's context, this is the translation function
if direct use is necessary.
"""
return _smart_parse_extended_backend(extended_backend)[0]
| d2go-main | d2go/quantization/qconfig.py |
d2go-main | d2go/quantization/__init__.py |
|
#!/usr/bin/env python3
import logging
from functools import partial
import torch
import torch.distributed as dist
from d2go.utils.parse_module_params import iterate_module_named_parameters
from torch.ao.quantization._learnable_fake_quantize import _LearnableFakeQuantize
logger = logging.getLogger(__name__)
def mixin_with_subclass(module, mix_class):
"""Create a subclass of type(module) and mix_class while using all the data
from the `module` object
"""
ModuleType = type(module)
class SubClass(mix_class, ModuleType):
def __init__(self, module):
assert isinstance(module, ModuleType)
# initialize the parent by copying the dict directly
self.__dict__ = module.__dict__.copy()
ret = SubClass(module)
return ret
def _has_module(model, module_type):
for x in model.modules():
if isinstance(x, module_type):
return True
return False
def check_for_learnable_fake_quant_ops(qat_method, model):
"""Make sure learnable observers are used if qat method is `learnable`"""
if qat_method.startswith("learnable"):
if not _has_module(model, _LearnableFakeQuantize):
raise Exception(
"No learnable fake quant is used for learnable quantzation, please use d2go.quantization.learnable_qat.get_learnable_qat_qconfig() to get proper qconfig"
)
def convert_to_learnable_qconfig(qconfig):
"""
Convert a QConfig to its learnable counterpart.
"""
def _update_fused_moving_avg_obs_fake_quantize(keywords):
# requires setting use_grad_scaling to True, all other parameters are the same
# as default setting of FusedMovingAvgObsFakeQuantize (both qnnpack and fbgemm).
assert "use_grad_scaling" not in keywords
keywords["use_grad_scaling"] = True
return keywords
_OVERWRITE_PARAMS = {
# map from supported FakeQuant type to the its new parameters in order to convert
# it to a learnable FakeQuant
torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize: _update_fused_moving_avg_obs_fake_quantize
}
def _update_to_learnable(wrapper):
assert isinstance(
wrapper, torch.ao.quantization.observer._PartialWrapper
), wrapper
assert isinstance(wrapper.p, partial), wrapper
keywords_updater = _OVERWRITE_PARAMS[wrapper.p.func]
keywords = keywords_updater(wrapper.p.keywords)
new_p = partial(_LearnableFakeQuantize, *wrapper.p.args, **keywords)
wrapper.p = new_p
return wrapper
activation = _update_to_learnable(qconfig.activation)
weight = _update_to_learnable(qconfig.weight)
return torch.quantization.QConfig(activation=activation, weight=weight)
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def sync_tensor(data):
world_size = get_world_size()
if world_size > 1:
dist.all_reduce(data, op=dist.ReduceOp.SUM)
data /= world_size
def toggle_lqat_fake_quant(mod, enable):
"""Toggle fake quantization for learnable qat"""
if type(mod) == _LearnableFakeQuantize:
mod.toggle_fake_quant(enable)
# enable/disable fake quantization for learnable qat
enable_lqat_fake_quant = partial(toggle_lqat_fake_quant, enable=True)
disable_lqat_fake_quant = partial(toggle_lqat_fake_quant, enable=False)
def toggle_lqat_static_observer(mod, enable):
"""Toggle static observers for learnable qat"""
if type(mod) == _LearnableFakeQuantize:
mod.toggle_observer_update(enable)
# enable/disable static observer for learnable qat
enable_lqat_static_observer = partial(toggle_lqat_static_observer, enable=True)
disable_lqat_static_observer = partial(toggle_lqat_static_observer, enable=False)
def enable_lqat_learnable_observer(mod):
"""Enable learning observers, will disable static observer updates"""
if type(mod) == _LearnableFakeQuantize:
sync_tensor(mod.scale.data)
sync_tensor(mod.zero_point.data)
mod.toggle_qparam_learning(enabled=True).toggle_observer_update(enabled=False)
def disable_lqat_learnable_observer(mod):
"""Disable learning observers"""
if type(mod) == _LearnableFakeQuantize:
mod.toggle_qparam_learning(enabled=False)
def get_optimizer_param_groups_learnable_qat(model, _):
"""Set the weight decay for scale/zero_point for learnable_fake_quant to 0"""
params = []
for (
_module_name,
module,
module_param_name,
value,
) in iterate_module_named_parameters(model, check_requires_grad=False):
if isinstance(module, _LearnableFakeQuantize):
if module_param_name in ("scale", "zero_point"):
params += [
{
"params": [value],
"weight_decay": 0.0,
}
]
return params
def _is_observer_key(state_dict_key):
observer_keys = ["activation_post_process", "weight_fake_quant"]
return any(x in state_dict_key for x in observer_keys)
def _is_q_state_dict(state_dict):
return any(_is_observer_key(k) for k in state_dict)
class ModelGetOptimizerParamGroupLearnableQATMixin:
def get_optimizer_param_groups(self, opts):
ret = []
if hasattr(super(), "get_optimizer_param_groups"):
ret = super().get_optimizer_param_groups(opts)
ret += get_optimizer_param_groups_learnable_qat(self, opts)
return ret
def setup_qat_get_optimizer_param_groups(model, qat_method):
"""Add a function `get_optimizer_param_groups` to the model so that it could
return proper weight decay for learnable qat
"""
if not qat_method.startswith("learnable"):
return model
assert _is_q_state_dict(model.state_dict())
model = mixin_with_subclass(model, ModelGetOptimizerParamGroupLearnableQATMixin)
assert hasattr(model, "get_optimizer_param_groups")
return model
| d2go-main | d2go/quantization/learnable_qat.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Tuple
import torch
from mobile_cv.common.misc.oss_utils import fb_overwritable
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION > (1, 10):
from torch.ao.quantization.quantize import convert
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
else:
from torch.quantization.quantize import convert
from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
@fb_overwritable()
def get_prepare_fx_fn(cfg, is_qat):
return prepare_qat_fx if is_qat else prepare_fx
@fb_overwritable()
def get_convert_fn(cfg, example_inputs=None, qconfig_mapping=None, backend_config=None):
if cfg.QUANTIZATION.EAGER_MODE:
return convert
else:
return convert_fx
| d2go-main | d2go/quantization/fx.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import math
from typing import Any, Dict, Tuple
import detectron2.utils.comm as comm
import torch
from d2go.quantization import learnable_qat
from d2go.quantization.fx import get_convert_fn, get_prepare_fx_fn
from d2go.quantization.qconfig import (
set_backend_and_create_qconfig,
smart_decode_backend,
)
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.engine.train_loop import HookBase, SimpleTrainer
from detectron2.utils.file_io import PathManager
from mobile_cv.arch.quantization.observer import update_stat as observer_update_stat
from mobile_cv.arch.utils import fuse_utils
from mobile_cv.common.misc.iter_utils import recursive_iterate
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION > (1, 10):
from torch.ao.quantization.quantize import convert
from torch.ao.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
else:
from torch.quantization.quantize import convert
from torch.quantization.quantize_fx import convert_fx, prepare_fx, prepare_qat_fx
logger = logging.getLogger(__name__)
_CONVERT_FX_CALLBACK_ATTRIBUTE = "_convert_fx_callback"
_STATE_DICT_KEY = "state_dict"
_OLD_STATE_DICT_KEY = "model"
_OLD_EMA_KEY = "ema_state"
def _is_observer_key(state_dict_key):
observer_keys = ["activation_post_process", "weight_fake_quant"]
return any(x in state_dict_key for x in observer_keys)
# TODO: replace QATCheckpointer with central D2GoCheckpointer which supports customize
# state_dict re-mapping (which includes QAT re-mapping).
class QATCheckpointer(DetectionCheckpointer):
"""
Extend the Checkpointer to support loading (QAT / non-QAT) weight into
(QAT / non-QAT) model.
"""
def __init__(
self,
model,
save_dir="",
*,
load_ckpt_to_gpu=False,
save_to_disk=None,
**checkpointables,
):
super().__init__(
model,
save_dir,
save_to_disk=save_to_disk,
**checkpointables,
)
self.load_ckpt_to_gpu = load_ckpt_to_gpu
@classmethod
def _is_q_state_dict(cls, state_dict):
return any(_is_observer_key(k) for k in state_dict)
# HACK: temporarily put it here, move to centrail D2GoCheckpointer later on
def _load_file(self, filename):
# support loading lightning checkpointer
if filename.endswith(".ckpt"):
# assume file is from lightning; no one else seems to use the ".ckpt" extension
with PathManager.open(filename, "rb") as f:
data = self._torch_load(f)
_convert_to_d2(data)
return data
return super()._load_file(filename)
def _torch_load(self, f):
device = (
"cuda:{}".format(torch.cuda.current_device())
if self.load_ckpt_to_gpu
else "cpu"
)
return torch.load(f, map_location=torch.device(device))
def _load_model(self, checkpoint):
model_is_qat = self._is_q_state_dict(self.model.state_dict())
checkpoint_is_qat = self._is_q_state_dict(checkpoint["model"])
if model_is_qat and not checkpoint_is_qat:
logger.info("Loading QAT model with non-QAT checkpoint, ignore observers!")
mapping = getattr(self.model, "_non_qat_to_qat_state_dict_map", {})
# map the key from non-QAT model to QAT model if possible
checkpoint_state_dict = {
mapping.get(k, k): v for k, v in checkpoint["model"].items()
}
checkpoint["model"] = checkpoint_state_dict
incompatible = super()._load_model(checkpoint)
# suppress the missing observer keys warning
# NOTE: for some reason incompatible.missing_keys can have duplicated keys,
# here we replace the entire list rather than calling .remove()
missing_non_qat_keys = [
k for k in incompatible.missing_keys if not _is_observer_key(k)
]
incompatible.missing_keys[:] = missing_non_qat_keys
return incompatible
elif not model_is_qat and checkpoint_is_qat:
raise NotImplementedError()
elif model_is_qat and checkpoint_is_qat:
# TODO: maybe suppress shape mismatch
# For models trained with QAT and per-channel quant, the inital size of the
# buffers in fake_quant and observer modules does not reflect the size in
# state_dict, which is updated only when convert is called.
return super()._load_model(checkpoint)
else:
return super()._load_model(checkpoint)
def add_quantization_default_configs(_C):
CfgNode = type(_C)
_C.QUANTIZATION = CfgNode()
# Note: EAGER_MODE == False currently represents FX graph mode quantization
_C.QUANTIZATION.EAGER_MODE = True
# Available backends include PyTorch's natively supported backends (i.e. fbgemm and
# qnnpack), plus D2Go-defined backends such as "qnnpack@symmetric".
_C.QUANTIZATION.BACKEND = "fbgemm"
# used to enable metarch set_custom_qscheme (need to implement)
# this is a limited implementation where only str is provided to change options
_C.QUANTIZATION.CUSTOM_QSCHEME = ""
_C.QUANTIZATION.MODULES = []
# Lightning quantization callback name
_C.QUANTIZATION.NAME = ""
_C.QUANTIZATION.ACT_BITS = 8
_C.QUANTIZATION.WEIGHT_BITS = 8
# quantization-aware training
_C.QUANTIZATION.QAT = CfgNode()
_C.QUANTIZATION.QAT.ENABLED = False
# Methods for QAT training, could be "default" or "learnable"
_C.QUANTIZATION.QAT.FAKE_QUANT_METHOD = "default"
# QAT will use more GPU memory, user can change this factor to reduce the batch size
# after fake quant is enabled. Setting it to 0.5 should guarantee no memory increase
# compared with QAT is disabled.
_C.QUANTIZATION.QAT.BATCH_SIZE_FACTOR = 1.0
# the iteration number to start QAT, (i.e. enable fake quant). The default value of
# SOLVER.MAX_ITER is 40k and SOLVER.STEPS is (30k,), here we turn on QAT at 35k, so
# the last 5k iterations will run with QAT with decreased learning rate.
_C.QUANTIZATION.QAT.START_ITER = 35000
# the iteration number to enable observer, it's usually set to be the same as
# QUANTIZATION.QAT.START_ITER.
_C.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER = 35000
# the iteration number to enable learnable observer, only used when METHOD == "learnable"
_C.QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER = 36000
# the iteration number to disable observer, here it's 3k after enabling the fake
# quant, 3k roughly corresponds to 7 out of 90 epochs in classification.
_C.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER = 35000 + 3000
# the iteration number to freeze BN, here it's 3k after enabling the fake quant, 2k
# roughly corresponds to 5 out of 90 epochs for classification.
_C.QUANTIZATION.QAT.FREEZE_BN_ITER = 35000 + 2000
# qat hook will run observers update_stat if it exists
# after update_observer_stats_period iters
_C.QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIODICALLY = False
_C.QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIOD = 1
_C.QUANTIZATION.WEIGHT_OBSERVERS = None
_C.QUANTIZATION.ACTIVATION_OBSERVERS = None
# post-training quantization
_C.QUANTIZATION.PTQ = CfgNode()
_C.QUANTIZATION.PTQ.CALIBRATION_NUM_IMAGES = 16 # NOTE: this is actually iterations
_C.QUANTIZATION.PTQ.CALIBRATION_FORCE_ON_GPU = False
# register deprecated and renamed keys
_C.register_deprecated_key("QUANTIZATION.QAT.LOAD_PRETRAINED")
_C.register_renamed_key("QUANTIZATION.QAT.BACKEND", "QUANTIZATION.BACKEND")
_C.register_deprecated_key("QUANTIZATION.ENABLE_CUSTOM_QSCHEME")
_C.register_deprecated_key("QUANTIZATION.SILICON_QAT")
_C.register_deprecated_key("QUANTIZATION.SILICON_QAT.ENABLED")
# TODO: model.to(device) might not work for detection meta-arch, this function is the
# workaround, in general, we might need a meta-arch API for this if needed.
def _cast_model_to_device(model, device):
if hasattr(
model, "_cast_model_to_device"
): # we can make this formal by removing "_"
return model._cast_model_to_device(device)
else:
logger.warning(
"model.to(device) doesn't guarentee moving the entire model, "
"if customization is needed, please implement _cast_model_to_device "
"for the MetaArch"
)
return model.to(device)
def add_d2_quant_mapping(mappings):
"""HACK: Add d2 specific module mapping for eager model quantization"""
import torch.ao.quantization.quantization_mappings as qm
for k, v in mappings.items():
if k not in qm.get_default_static_quant_module_mappings():
qm.DEFAULT_STATIC_QUANT_MODULE_MAPPINGS[k] = v
if k not in qm.get_default_qat_module_mappings():
qm.DEFAULT_QAT_MODULE_MAPPINGS[k] = v
# The `mock_quantization_type` decorate may not be needed anymore to unify
# detectron2.layers modules and torch.nn modules since Pytorch 1.5. See comments on D23790034.
def mock_quantization_type(quant_func):
import builtins
import functools
from unittest import mock
import detectron2.layers as d2l
type_mapping = {d2l.Linear: torch.nn.Linear}
from d2go.utils.misc import check_version
if check_version(torch, "1.7.2", warning_only=True):
add_d2_quant_mapping(type_mapping)
real_type = builtins.type
def _new_type(obj):
rtype = real_type(obj)
return type_mapping.get(rtype, rtype)
@functools.wraps(quant_func)
def wrapper(cfg, model, *args, **kwargs):
if d2l.Linear == torch.nn.Linear:
# we do not need the moc after when the type is expected, consider
# remving those related code
logger.warning(
"`detectron2.layers.Linear` is in expected type (torch.nn.Linear),"
"consider removing this code `mock_quantization_type`."
)
return quant_func(cfg, model, *args, **kwargs)
if not cfg.QUANTIZATION.EAGER_MODE:
return quant_func(cfg, model, *args, **kwargs)
# `from_float()` in `torch.nn.quantized.modules.linear.Linear` and
# `torch.nn.qat.modules.linear` checkes if the type of `mod` is torch.Linear,
# hack it to return the expected value
with mock.patch("torch.nn.quantized.modules.linear.type") as mock_type:
with mock.patch("torch.nn.qat.modules.linear.type") as mock_type2:
mock_type.side_effect = _new_type
mock_type2.side_effect = _new_type
return quant_func(cfg, model, *args, **kwargs)
return wrapper
def default_prepare_for_quant(cfg, model):
"""
Default implementation of preparing a model for quantization. This function will
be called to before training if QAT is enabled, or before calibration during PTQ if
the model is not already quantized.
NOTE:
- This is the simplest implementation, most meta-arch needs its own version.
- For eager model, user should make sure the returned model has Quant/DeQuant
insert. This can be done by wrapping the model or defining the model with
quant stubs.
- QAT/PTQ can be determined by model.training.
- Currently the input model can be changed inplace since we won't re-use the
input model.
- Currently this API doesn't include the final torch.ao.quantization.prepare(_qat)
call since existing usecases don't have further steps after it.
Args:
model (nn.Module): a non-quantized model.
cfg (CfgNode): config
Return:
nn.Module: a ready model for QAT training or PTQ calibration
"""
assert cfg.QUANTIZATION.EAGER_MODE
qconfig = set_backend_and_create_qconfig(cfg, is_train=model.training)
model = fuse_utils.fuse_model(
model,
is_qat=cfg.QUANTIZATION.QAT.ENABLED,
inplace=True,
)
model.qconfig = qconfig
# TODO(future diff): move the torch.ao.quantization.prepare(...) call
# here, to be consistent with the FX branch
logger.info("Setup the model with qconfig:\n{}".format(qconfig))
return model
def default_custom_prepare_fx(cfg, model, is_qat, example_input=None):
"""
Similar to default_prepare_for_quant, but for FX graph mode.
Args:
example_input (Optional[Any]): optional example_input for model,
if it is not provided we'll use `model.example_input` when example_input
is required, Note: d2go assumes we always have a single example_input
"""
assert not cfg.QUANTIZATION.EAGER_MODE
qconfig = set_backend_and_create_qconfig(cfg, is_train=is_qat)
qconfig_dict = {"": qconfig}
if example_input is None:
raise NotImplementedError(
"prepare FX requires `example_input`, user should implement this for"
" their own MetaArch."
)
prepare_fn = get_prepare_fx_fn(cfg, is_qat)
model = prepare_fn(
model,
qconfig_mapping=qconfig_dict,
example_inputs=(example_input,),
)
convert_fn = get_convert_fn(cfg, (example_input,))
return model, convert_fn
def prepare_fake_quant_model(cfg, model, is_qat, example_input=None):
"""
Centralized function to prepare fp32 model (D2Go's MetaArch) to fake quant model.
"""
# TODO: create a warning for the direct use of `torch.ao.quantization.get_default_qconfig`
# or `torch.ao.quantization.get_default_qat_qconfig` without calling D2Go's high-level
# `set_backend_and_create_qconfig` API.
if cfg.QUANTIZATION.EAGER_MODE:
if hasattr(model, "prepare_for_quant"):
model = model.prepare_for_quant(cfg)
else:
logger.info(
"Using default implementation for prepare_for_quant (eager mode)"
)
model = default_prepare_for_quant(cfg, model)
# NOTE: eager model needs to call prepare after `prepare_for_quant`
if is_qat:
torch.ao.quantization.prepare_qat(model, inplace=True)
else:
torch.ao.quantization.prepare(model, inplace=True)
else:
# FX graph mode requires the model to be symbolically traceable, swap common
# modules like SyncBN to FX-friendly version.
if not is_qat:
# NOTE: we only do this for PTQ, because we want to keep using unmodified
# model during QAT.
model = fuse_utils.swap_modules(model)
if hasattr(model, "custom_prepare_fx"):
ret = model.custom_prepare_fx(cfg, is_qat, example_input)
if not (isinstance(ret, tuple) and len(ret) == 2):
raise ValueError(
"`custom_prepare_fx` requires return model and convert_callback"
)
model, convert_fx_callback = ret
else:
logger.info(
"Using default implementation for custom_prepare_fx (FX graph mode)"
)
model, convert_fx_callback = default_custom_prepare_fx(
cfg, model, is_qat, example_input
)
# HACK: store the convert_callback function as model attribute, which can be
# later accessed to convert fake quant model to quantized model. We'll find a
# better place to store this.
if hasattr(model, _CONVERT_FX_CALLBACK_ATTRIBUTE):
raise AttributeError(
f"{_CONVERT_FX_CALLBACK_ATTRIBUTE} is already set in model: {model}"
)
setattr(model, _CONVERT_FX_CALLBACK_ATTRIBUTE, convert_fx_callback)
return model
def convert_to_quantized_model(cfg, fp32_model):
"""
Contralized function to convert fake quant model (fp32 operators) to "real"
quantized model (int8 operators).
"""
if cfg.QUANTIZATION.EAGER_MODE:
convert_fn = get_convert_fn(cfg)
int8_model = convert_fn(fp32_model, inplace=False)
else:
# FX graph mode quantization
if not hasattr(fp32_model, _CONVERT_FX_CALLBACK_ATTRIBUTE):
raise AttributeError(
f"Can't find {_CONVERT_FX_CALLBACK_ATTRIBUTE} in model, please check "
f"`prepare_fake_quant_model` has been called: {fp32_model}"
)
convert_fx_callback = getattr(fp32_model, _CONVERT_FX_CALLBACK_ATTRIBUTE)
int8_model = convert_fx_callback(fp32_model)
return int8_model
@mock_quantization_type
def post_training_quantize(cfg, model, data_loader):
"""Calibrate a model, convert it to a quantized pytorch model"""
model = copy.deepcopy(model)
model.eval()
# TODO: check why some parameters will have gradient
for param in model.parameters():
param.requires_grad = False
example_input = next(iter(data_loader))
model = prepare_fake_quant_model(cfg, model, False, example_input)
logger.info("Prepared the PTQ model for calibration:\n{}".format(model))
# Option for forcing running calibration on GPU, works only when the model supports
# casting both model and inputs.
calibration_force_on_gpu = (
cfg.QUANTIZATION.PTQ.CALIBRATION_FORCE_ON_GPU and torch.cuda.is_available()
)
if calibration_force_on_gpu:
# NOTE: model.to(device) may not handle cases such as normalizer, FPN, only
# do move to GPU if specified.
_cast_model_to_device(model, "cuda")
calibration_iters = cfg.QUANTIZATION.PTQ.CALIBRATION_NUM_IMAGES
for idx, inputs in enumerate(data_loader):
# Setting CALIBRATION_NUM_IMAGES to 0 allows skipping calibration
if idx == calibration_iters:
break
logger.info("Running calibration iter: {}/{}".format(idx, calibration_iters))
if calibration_force_on_gpu:
iters = recursive_iterate(inputs)
for x in iters:
if isinstance(x, torch.Tensor):
iters.send(x.to("cuda"))
inputs = iters.value
with torch.no_grad():
model(inputs)
else:
logger.warning("Can't run enough calibration iterations")
# cast model back to the original device
if calibration_force_on_gpu:
_cast_model_to_device(model, cfg.MODEL.DEVICE)
return model
@mock_quantization_type
def setup_qat_model(
cfg,
model_fp32,
enable_fake_quant: bool = False,
enable_observer: bool = False,
enable_learnable_observer: bool = False,
):
assert cfg.QUANTIZATION.QAT.FAKE_QUANT_METHOD in [
"default",
"learnable",
"learnable_act",
]
if hasattr(model_fp32, "_non_qat_to_qat_state_dict_map"):
raise RuntimeError("The model is already setup to be QAT, cannot setup again!")
device = model_fp32.device
# FIXME: seems that we can remove this
torch.backends.quantized.engine = smart_decode_backend(cfg.QUANTIZATION.BACKEND)
qat_method = cfg.QUANTIZATION.QAT.FAKE_QUANT_METHOD
# prepare for qat may modify the fp32 model directly so we create a copy
model_fp32_state_dict = model_fp32.state_dict()
# prepare model for qat
model = prepare_fake_quant_model(cfg, model_fp32, True)
# make sure the proper qconfig are used in the model
learnable_qat.check_for_learnable_fake_quant_ops(qat_method, model)
# Move newly added observers to the original device
model.to(device)
if not enable_fake_quant:
logger.info("Disabling fake quant ...")
model.apply(torch.ao.quantization.disable_fake_quant)
model.apply(learnable_qat.disable_lqat_fake_quant)
if not enable_observer:
logger.info("Disabling static observer ...")
model.apply(torch.ao.quantization.disable_observer)
model.apply(learnable_qat.disable_lqat_static_observer)
if not enable_learnable_observer and qat_method.startswith("learnable"):
logger.info("Disabling learnable observer ...")
model.apply(learnable_qat.disable_lqat_learnable_observer)
# qat state dict mapper
if not getattr(model, "_non_qat_to_qat_state_dict_map", None):
model = _setup_non_qat_to_qat_state_dict_map(
model_fp32_state_dict, model, is_eager_mode=cfg.QUANTIZATION.EAGER_MODE
)
# qat optimizer group for learnable qat
model = learnable_qat.setup_qat_get_optimizer_param_groups(model, qat_method)
return model
def _setup_non_qat_to_qat_state_dict_map(
model_fp32_state_dict, model_qat, is_eager_mode
):
original_state_dict_shapes = {k: v.shape for k, v in model_fp32_state_dict.items()}
# fuse_model and prepare_qat may change the state_dict of model, keep a map from the
# orginal model to the key QAT in order to load weight from non-QAT model.
new_state_dict_shapes = {k: v.shape for k, v in model_qat.state_dict().items()}
new_state_dict_non_observer_keys = [
k for k in new_state_dict_shapes if not _is_observer_key(k)
]
assert len(new_state_dict_non_observer_keys) == len(original_state_dict_shapes)
if is_eager_mode:
for n_k, o_k in zip(
new_state_dict_non_observer_keys, original_state_dict_shapes
):
assert (
new_state_dict_shapes[n_k] == original_state_dict_shapes[o_k]
), f"QAT model shapes is inconsistent. FP32.{o_k}={original_state_dict_shapes[o_k]} , QAT.{n_k}={new_state_dict_shapes[n_k]}"
# _q_state_dict_map will store
model_qat._non_qat_to_qat_state_dict_map = dict(
zip(original_state_dict_shapes, new_state_dict_non_observer_keys)
)
else:
# in FX, the order of where modules appear in the state_dict may change,
# so we need to match by key
def get_new_bn_key(old_bn_key):
# tries to adjust the key for conv-bn fusion, where
# root
# - conv
# - bn
#
# becomes
#
# root
# - conv
# - bn
return old_bn_key.replace(".bn.", ".conv.bn.")
model_qat._non_qat_to_qat_state_dict_map = {}
for key in original_state_dict_shapes.keys():
if key in new_state_dict_non_observer_keys:
model_qat._non_qat_to_qat_state_dict_map[key] = key
else:
maybe_new_bn_key = get_new_bn_key(key)
if maybe_new_bn_key in new_state_dict_non_observer_keys:
model_qat._non_qat_to_qat_state_dict_map[key] = maybe_new_bn_key
return model_qat
class QATHook(HookBase):
def __init__(self, cfg, build_data_loader_func=None):
self.cfg = cfg
self.build_data_loader_func = build_data_loader_func
self._applied = {
"enable_fake_quant": False,
"enable_observer": False,
"enable_learnable_observer": False,
"disable_observer": False,
"freeze_bn_stats": False,
}
assert (
cfg.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER
<= cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER
), "Can't diable observer before enabling it"
def before_step(self):
cur_iter = self.trainer.iter
model = self.trainer.model
cfg = self.cfg
if (
not self._applied["enable_fake_quant"]
and cur_iter >= cfg.QUANTIZATION.QAT.START_ITER
):
logger.info(
"[QAT] enable fake quant to start QAT, iter = {}".format(cur_iter)
)
model.apply(torch.ao.quantization.enable_fake_quant)
model.apply(learnable_qat.enable_lqat_fake_quant)
self._applied["enable_fake_quant"] = True
_reset_qat_data_loader_if_needed(
self.cfg, self.trainer, self.build_data_loader_func
)
if (
not self._applied["enable_observer"]
and cur_iter >= cfg.QUANTIZATION.QAT.ENABLE_OBSERVER_ITER
and cur_iter < cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER
):
logger.info("[QAT] enable static observer, iter = {}".format(cur_iter))
model.apply(torch.ao.quantization.enable_observer)
model.apply(learnable_qat.enable_lqat_static_observer)
self._applied["enable_observer"] = True
if (
not self._applied["enable_learnable_observer"]
and cur_iter >= cfg.QUANTIZATION.QAT.ENABLE_LEARNABLE_OBSERVER_ITER
):
logger.info(f"[QAT] enabling learnable observer, iter = {cur_iter}")
model.apply(learnable_qat.enable_lqat_learnable_observer)
self._applied["enable_learnable_observer"] = True
if (
not self._applied["disable_observer"]
and cur_iter >= cfg.QUANTIZATION.QAT.DISABLE_OBSERVER_ITER
):
logger.info(
"[QAT] disabling observer for sub seq iters, iter = {}".format(cur_iter)
)
model.apply(torch.ao.quantization.disable_observer)
model.apply(learnable_qat.disable_lqat_static_observer)
model.apply(learnable_qat.disable_lqat_learnable_observer)
self._applied["disable_observer"] = True
if (
not self._applied["freeze_bn_stats"]
and cur_iter >= cfg.QUANTIZATION.QAT.FREEZE_BN_ITER
):
logger.info(
"[QAT] freezing BN for subseq iters, iter = {}".format(cur_iter)
)
model.apply(torch.nn.intrinsic.qat.freeze_bn_stats)
self._applied["freeze_bn_stats"] = True
if (
self._applied["enable_fake_quant"]
and not self._applied["disable_observer"]
and cfg.QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIODICALLY
and cur_iter % cfg.QUANTIZATION.QAT.UPDATE_OBSERVER_STATS_PERIOD == 0
):
logger.info(f"[QAT] updating observers, iter = {cur_iter}")
model.apply(observer_update_stat)
def _reset_qat_data_loader_if_needed(cfg, trainer, build_loader_func):
if cfg.QUANTIZATION.QAT.BATCH_SIZE_FACTOR != 1.0:
loader_cfg = cfg.clone()
loader_cfg.defrost()
num_gpus = comm.get_world_size()
old_bs = cfg.SOLVER.IMS_PER_BATCH // num_gpus
new_bs = math.ceil(old_bs * cfg.QUANTIZATION.QAT.BATCH_SIZE_FACTOR)
loader_cfg.SOLVER.IMS_PER_BATCH = new_bs * num_gpus
loader_cfg.freeze()
logger.info(
"[QAT] Rebuild data loader with batch size per GPU: {} -> {}".format(
old_bs, new_bs
)
)
assert isinstance(
trainer, SimpleTrainer
), "Trainer needs to be a subclass of SimpleTrainer to support resetting the dataloader"
trainer.reset_data_loader(lambda: build_loader_func(loader_cfg))
def forward_custom_prepare_fx(root, sub_module_name, orig_ret):
"""Helper function to forward return of `custom_prepare_fx` from sub module"""
new_sub_module, callback = orig_ret
setattr(root, sub_module_name, new_sub_module)
def new_callback(m):
setattr(m, sub_module_name, callback(getattr(m, sub_module_name)))
return m
return root, new_callback
def _convert_to_d2(lightning_checkpoint: Dict[str, Any]) -> None:
prefix = "model" # based on DefaultTask.model.
old_keys = [x.lstrip("model.") for x in lightning_checkpoint[_STATE_DICT_KEY]]
for key in old_keys:
if f"{prefix}.{key}" in lightning_checkpoint[_STATE_DICT_KEY]:
lightning_checkpoint[_STATE_DICT_KEY][key] = lightning_checkpoint[
_STATE_DICT_KEY
][f"{prefix}.{key}"]
del lightning_checkpoint[_STATE_DICT_KEY][f"{prefix}.{key}"]
for old, new in zip(
[_STATE_DICT_KEY, "global_step"], [_OLD_STATE_DICT_KEY, "iteration"]
):
lightning_checkpoint[new] = lightning_checkpoint[old]
del lightning_checkpoint[old]
for old, new in zip(
["optimizer_states", "lr_schedulers"], ["optimizer", "scheduler"]
):
if old not in lightning_checkpoint:
continue
lightning_checkpoint[new] = [lightning_checkpoint[old]]
del lightning_checkpoint[old]
for key in [
"epoch",
"pytorch-lightning_versio",
"callbacks",
"hparams_name",
"hyper_parameters",
]:
if key in lightning_checkpoint:
del lightning_checkpoint[key]
| d2go-main | d2go/quantization/modeling.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import ast
import builtins
import contextlib
import glob
import hashlib
import logging
import os
import tempfile
import time
import traceback
from collections import defaultdict
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple
import pkg_resources
import yaml
from mobile_cv.common.misc.py import dynamic_import, MoreMagicMock
from mobile_cv.common.misc.registry import (
CLASS_OR_FUNCTION_TYPES,
LazyRegisterable,
Registry,
)
logger = logging.getLogger(__name__)
orig_import = builtins.__import__
orig_open = builtins.open
orig__register = Registry._register
_INSIDE_BOOTSTRAP = False
_IS_BOOTSTRAPPED = False
_BOOTSTRAP_PACKAGE = "d2go.registry._bootstrap"
_BOOTSTRAP_CACHE_FILENAME = "registry_bootstrap.v1.yaml"
def _log(lvl: int, msg: str):
_VERBOSE_LEVEL = 0
if _VERBOSE_LEVEL >= lvl:
print(msg)
# Simple version copied from fvcore/iopath
def _get_cache_dir() -> str:
cache_dir = os.path.expanduser("~/.torch/d2go_cache")
try:
os.makedirs(cache_dir, exist_ok=True)
assert os.access(cache_dir, os.R_OK | os.W_OK | os.X_OK)
except (OSError, AssertionError):
tmp_dir = os.path.join(tempfile.gettempdir(), "d2go_cache")
logger.warning(f"{cache_dir} is not accessible! Using {tmp_dir} instead!")
os.makedirs(tmp_dir, exist_ok=True)
cache_dir = tmp_dir
return cache_dir
class _catchtime:
def __enter__(self):
self.time = time.perf_counter()
return self
def __exit__(self, type, value, traceback):
self.time = time.perf_counter() - self.time
def _match(name, module_full_name, match_submodule=False):
if name == module_full_name:
return True
if match_submodule:
if name.startswith(module_full_name + "."):
return True
return False
def _match_any(name, module_full_names, match_submodule=False):
return any(
_match(name, module_full_name, match_submodule=match_submodule)
for module_full_name in module_full_names
)
def _import_mock(name, globals=None, locals=None, fromlist=(), level=0):
use_orig_import = False
# enable some first-party packages
if _match_any(
name,
[
# allow using pdb during patch
"pdb",
"readline",
"linecache",
"reprlib",
"io",
# allow using builtins.__import__
"builtins",
],
):
use_orig_import = True
# enable some known third-party packages, these pacakges might have been imported
if _match_any(
name,
[
# "torch",
# "numpy",
# "mobile_cv.arch.fbnet_v2.modeldef_utils",
],
):
use_orig_import = True
# enable modules under d2go.registry
if _match(name, "d2go.registry", match_submodule=True):
use_orig_import = True
if use_orig_import:
# import as normal
return orig_import(name, globals, locals, fromlist=fromlist, level=level)
else:
# return a Mock instead of making a real import
_log(2, f"mock import: {name}; fromlist={fromlist}; level={level}")
m = MoreMagicMock()
return m
def _open_mock(*args, **kwargs):
return MoreMagicMock()
def _register_mock(self, name: Optional[str], obj: Any) -> None:
"""Convert `obj` to LazyRegisterable"""
# Instead of register the (possibly mocked) object which is created under the
# "fake" package _BOOTSTRAP_PACKAGE, register a lazy-object (i.e. a string) pointing
# to its original (possibly un-imported) module.
def _resolve_real_module(module_in_bootstrap_package):
assert module_in_bootstrap_package.startswith(_BOOTSTRAP_PACKAGE + ".")
orig_module = module_in_bootstrap_package[len(_BOOTSTRAP_PACKAGE + ".") :]
return orig_module
if isinstance(obj, MoreMagicMock):
assert obj.mocked_obj_info is not None, obj
if name is None:
name = obj.mocked_obj_info["__name__"]
obj = LazyRegisterable(
module=_resolve_real_module(obj.mocked_obj_info["__module__"]),
name=obj.mocked_obj_info["__qualname__"],
)
elif isinstance(obj, LazyRegisterable):
pass
else:
assert isinstance(obj, CLASS_OR_FUNCTION_TYPES), obj
if name is None:
name = obj.__name__
obj = LazyRegisterable(
module=_resolve_real_module(obj.__module__), name=obj.__qualname__
)
assert isinstance(obj, LazyRegisterable)
# During bootstrap, it's possible that the object is already registered
# (as non-lazy), because importing a library first and then bootstramp it. Simply
# skip the lazy-registration.
if name in self and not isinstance(self[name], LazyRegisterable):
if self[name].__module__ == obj.module and (
obj.name is None or self[name].__name__ == obj.name
):
_log(2, f"{obj} has already registered as {self[name]}, skip...")
return
orig__register(self, name, obj)
@contextlib.contextmanager
def _bootstrap_patch():
global _INSIDE_BOOTSTRAP
builtins.__import__ = _import_mock
builtins.open = _open_mock
Registry._register = _register_mock
_INSIDE_BOOTSTRAP = True
try:
yield
finally:
builtins.__import__ = orig_import
builtins.open = orig_open
Registry._register = orig__register
_INSIDE_BOOTSTRAP = False
def _get_registered_names() -> Dict[str, List[str]]:
"""Return the currently registered names for each registry"""
# NOTE: currently only support D2Go's builtin registry module, which can be extended
# in future.
import d2go.registry.builtin
modules = [
d2go.registry.builtin,
]
registered = {}
for module in modules:
registered_in_module = {
f"{module.__name__}.{name}": obj.get_names()
for name, obj in module.__dict__.items()
if isinstance(obj, Registry)
}
registered.update(registered_in_module)
return registered
class BootstrapStatus(Enum):
CACHED = 0
FULLY_IMPORTED = 1
PARTIALLY_IMPORTED = 2
FAILED = 3
@dataclass
class CachedResult:
sha1: str
registered: Dict[str, str]
status: str # string representation of BootstrapStatus
def _bootstrap_file(
rel_path: str,
catch_exception: bool,
cached_result: Optional[CachedResult] = None,
) -> Tuple[CachedResult, BootstrapStatus]:
# convert relative path to full module name
# eg. ".../d2go/a/b/c.py" -> "d2go.a.b.c"
# eg. ".../d2go/a/b/__init__.py" -> "d2go.a.b"
package_root = os.path.dirname(pkg_resources.resource_filename("d2go", ""))
filename = os.path.join(package_root, rel_path)
assert rel_path.endswith(".py")
module = rel_path[: -len(".py")]
if module.endswith("/__init__"):
module = module[: -len("/__init__")]
module = module.replace("/", ".")
exec_globals = {
"__file__": filename,
# execute in a "fake" package to minimize potential side effect
"__name__": "{}.{}".format(_BOOTSTRAP_PACKAGE, module),
}
with _catchtime() as t:
with open(filename) as f:
content = f.read()
file_hash = hashlib.sha1(content.encode("utf-8")).hexdigest()
if cached_result is not None and file_hash == cached_result.sha1:
_log(
2,
f"Hash {file_hash} matches, lazy registering cached registerables ...",
)
registerables = cached_result.registered
for registry_module_dot_name, names_to_register in registerables.items():
registry = dynamic_import(registry_module_dot_name)
for name in names_to_register:
# we only store the registered name in the cache, here we know the
# module of bootstrapped file, which should be sufficient.
registry.register(name, LazyRegisterable(module=module))
return cached_result, BootstrapStatus.CACHED
tree = ast.parse(content)
# HACK: convert multiple inheritance to single inheritance, this is needed
# because current implementation of MoreMagicMock can't handle this well.
# eg. `class MyClass(MyMixin, nn.Module)` -> `class MyClass(MyMixin)`
def _truncate_multiple_inheritance(ast_tree):
for stmt in ast_tree.body:
if isinstance(stmt, ast.ClassDef):
if len(stmt.bases) > 1:
stmt.bases = stmt.bases[:1]
stmt.keywords.clear()
_truncate_multiple_inheritance(stmt)
_truncate_multiple_inheritance(tree)
_log(2, f"Parsing AST takes {t.time} sec")
prev_registered = _get_registered_names()
with _catchtime() as t:
try:
with _bootstrap_patch():
exec(compile(tree, filename, "exec"), exec_globals) # noqa
status = BootstrapStatus.FULLY_IMPORTED
except _BootstrapBreakException:
status = BootstrapStatus.PARTIALLY_IMPORTED
except Exception as e:
if catch_exception:
_log(
1,
"Encountered the following error during bootstrap:"
+ "".join(traceback.format_exception(type(e), e, e.__traceback__)),
)
else:
raise e
status = BootstrapStatus.FAILED
_log(2, f"Execute file takes {t.time} sec")
# compare and get the newly registered
cur_registered = _get_registered_names()
assert set(cur_registered.keys()) == set(prev_registered.keys())
newly_registered = {
k: sorted(set(cur_registered[k]) - set(prev_registered[k]))
for k in sorted(cur_registered.keys())
}
newly_registered = {k: v for k, v in newly_registered.items() if len(v) > 0}
result = CachedResult(
sha1=file_hash,
registered=newly_registered,
status=status.name,
)
return result, status
class _BootstrapBreakException(Exception):
pass
def break_bootstrap():
"""
In case the file can't be perfectly executed by `_bootstrap_file`, users can call
this function to break the process. Because the remaining content in the file will
be skipped, avoid using registration statement after calling this function.
"""
if _INSIDE_BOOTSTRAP:
# raise a special exception which will be catched later
raise _BootstrapBreakException()
# non-op outside of bootstrap
return
def lazy_on_bootstrap(f: Callable) -> Callable:
"""
A decorator to mark a function as "lazy" during bootstrap, such that the decorated
function will skip the execution and immediately return a MagicMock object during
the bootstrap (the decorator is a non-op outside of bootstrap). This can be used to
hide un-executable code (usually related to import-time computation) during the
bootstrap.
For registration related import-time computation, please consider using the
`LazyRegisterable` since it will also save time for the normal import.
"""
def wrapped(*args, **kwargs):
if _INSIDE_BOOTSTRAP:
return MoreMagicMock()
else:
return f(*args, **kwargs)
return wrapped
def _load_cached_results(filename: str) -> Dict[str, CachedResult]:
with open(filename) as f:
content = f.read()
loaded = yaml.safe_load(content)
assert isinstance(loaded, dict), f"Wrong format: {content}"
results = {
filename: CachedResult(**result_dic) for filename, result_dic in loaded.items()
}
return results
def _dump_cached_results(cached_results: Dict[str, CachedResult], filename: str):
results_dict = {
filename: asdict(result_dic) for filename, result_dic in cached_results.items()
}
dumped = yaml.safe_dump(results_dict)
with open(filename, "w") as f:
f.write(dumped)
def bootstrap_registries(enable_cache: bool = True, catch_exception: bool = True):
"""
Bootstrap all registries so that all objects are effectively registered.
This function will "import" all the files from certain locations (eg. d2go package)
and look for a set of known registries (eg. d2go's builtin registries). The "import"
should not have any side effect, which is achieved by mocking builtin.__import__.
"""
global _IS_BOOTSTRAPPED
if _IS_BOOTSTRAPPED:
logger.warning("Registries are already bootstrapped, skipped!")
return
if _INSIDE_BOOTSTRAP:
_log(1, "calling bootstrap_registries() inside bootstrap process, skip ...")
return
start = time.perf_counter()
# load cached bootstrap results if exist
cached_bootstrap_results: Dict[str, CachedResult] = {}
if enable_cache:
filename = os.path.join(_get_cache_dir(), _BOOTSTRAP_CACHE_FILENAME)
if os.path.isfile(filename):
logger.info(f"Loading bootstrap cache at {filename} ...")
cached_bootstrap_results = _load_cached_results(filename)
else:
logger.info(
f"Can't find the bootstrap cache at {filename}, start from scratch"
)
# locate all the files under d2go package
# NOTE: we may extend to support user-defined locations if necessary
d2go_root = pkg_resources.resource_filename("d2go", "")
logger.info(f"Start bootstrapping for d2go_root: {d2go_root} ...")
all_files = glob.glob(f"{d2go_root}/**/*.py", recursive=True)
all_files = [os.path.relpath(x, os.path.dirname(d2go_root)) for x in all_files]
new_bootstrap_results: Dict[str, CachedResult] = {}
files_per_status = defaultdict(list)
time_per_file = {}
for filename in all_files:
_log(1, f"bootstrap for file: {filename}")
cached_result = cached_bootstrap_results.get(filename, None)
with _catchtime() as t:
result, status = _bootstrap_file(filename, catch_exception, cached_result)
new_bootstrap_results[filename] = result
files_per_status[status].append(filename)
time_per_file[filename] = t.time
end = time.perf_counter()
duration = end - start
status_breakdown = ", ".join(
[f"{len(files_per_status[status])} {status.name}" for status in BootstrapStatus]
)
logger.info(
f"Finished bootstrapping for {len(all_files)} files ({status_breakdown})"
f" in {duration:.2f} seconds."
)
exception_files = [
filename
for filename, result in new_bootstrap_results.items()
if result.status == BootstrapStatus.FAILED.name
]
if len(exception_files) > 0:
logger.warning(
"Found exception for the following {} files (either during this bootstrap"
" run or from previous cached result), registration inside those files"
" might not work!\n{}".format(
len(exception_files),
"\n".join(exception_files),
)
)
# Log slowest Top-N files
TOP_N = 100
_log(2, f"Top-{TOP_N} slowest files during bootstrap:")
all_time = [(os.path.relpath(k, d2go_root), v) for k, v in time_per_file.items()]
for x in sorted(all_time, key=lambda x: x[1])[-TOP_N:]:
_log(2, x)
if enable_cache:
filename = os.path.join(_get_cache_dir(), _BOOTSTRAP_CACHE_FILENAME)
logger.info(f"Writing updated bootstrap results to {filename} ...")
_dump_cached_results(new_bootstrap_results, filename)
_IS_BOOTSTRAPPED = True
| d2go-main | d2go/registry/bootstrap.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| d2go-main | d2go/registry/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from mobile_cv.common.misc.registry import Registry
"""
This file contains all D2Go's builtin registries with global scope.
- These registries can be treated as "static". There'll be a bootstrap process happens
at the beginning of the program to make it works like the registrations happen
at compile time (like C++). In another word, the objects are guaranteed to be
registered to those builtin registries without user importing their code.
- Since the namespace is global, the registered name has to be unique across all projects.
"""
DEMO_REGISTRY = Registry("DEMO")
# Registry for config updater
CONFIG_UPDATER_REGISTRY = Registry("CONFIG_UPDATER")
# Registry for meta-arch, registered nn.Module should follow D2Go's meta-arch API
META_ARCH_REGISTRY = Registry("META_ARCH")
# Modeling hook registry
MODELING_HOOK_REGISTRY = Registry("MODELING_HOOK")
MODELING_HOOK_REGISTRY.__doc__ = """
Registry for modeling hook.
The registered object will be called with `obj(cfg)`
and expected to return a `ModelingHook` object.
"""
# Distillation algorithms
DISTILLATION_ALGORITHM_REGISTRY = Registry("DISTILLATION_ALGORITHM")
# Distillation helper to allow user customization
DISTILLATION_HELPER_REGISTRY = Registry("DISTILLATION_HELPER")
| d2go-main | d2go/registry/builtin.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.evaluation.prediction_count_evaluation import PredictionCountEvaluator
__all__ = [
"PredictionCountEvaluator",
]
# Populating registreis
# @fb-only: from d2go.evaluation import fb as _fb # noqa
| d2go-main | d2go/evaluation/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict, TypeVar, Union
T = TypeVar("T")
# "accuracy" in D2Go is defined by a 4-level dictionary in the order of:
# model_tag -> dataset -> task -> metrics
AccuracyDict = Dict[str, Dict[str, Dict[str, Dict[str, T]]]]
# "metric" in D2Go is a nested dictionary, which may have arbitrary levels.
MetricsDict = Union[Dict[str, "MetricsDict"], T]
| d2go-main | d2go/evaluation/api.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import heapq
import itertools
import logging
from contextlib import contextmanager
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator, SemSegEvaluator
from detectron2.utils.comm import all_gather, synchronize
logger = logging.getLogger(__name__)
class MultiSemSegEvaluator(DatasetEvaluator):
"""
Evaluate multiple results for the same target. SemSegEvaluator requires the
outputs of model to be like:
[
{"sem_seg": Tensor},
]
This evaluator allows evaluating mutliple predictions, it may takes outputs like:
[
{
"prediction_1": {"sem_seg": Tensor},
"prediction_2": {"sem_seg": Tensor},
}
]
"""
_DUMMY_KEY_PREFIX = "dummy_eval"
def __init__(self, dataset_name, *args, distributed, output_dir=None, **kwargs):
self._distributed = distributed
self._output_dir = output_dir
self.evaluators = {}
self.dataset_name = dataset_name
self.init_args = args
self.init_kwargs = kwargs
def _get_evaluator(self, key, superclass_name=None):
if key in self.evaluators:
return self.evaluators[key]
def create_evaluator_and_reset(dataset_name):
logger.info(
"Create an instance of SemSegEvaluator for {} on dataset {} ...".format(
key, dataset_name
)
)
evaluator = SemSegEvaluator(
dataset_name,
*self.init_args,
**self.init_kwargs,
distributed=self._distributed,
output_dir=self._output_dir,
)
evaluator.reset()
return evaluator
if superclass_name is None:
self.evaluators[key] = create_evaluator_and_reset(self.dataset_name)
else:
# NOTE: create temporary single-super-class dataset and use standard
# evaluator for the dataset
metadata = MetadataCatalog.get(self.dataset_name)
tmp_dataset_name = "__AUTOGEN__{}@{}".format(
self.dataset_name, superclass_name
)
from d2go.data.fb.semantic_seg import register_sem_seg
if tmp_dataset_name not in MetadataCatalog:
if superclass_name in metadata.pseudo_gt_classes:
mask_dir = metadata.pseudo_gt_mask_dir
else:
mask_dir = metadata.mask_dir
register_sem_seg(
tmp_dataset_name,
metadata=metadata.mcs_metadata[superclass_name],
image_root=metadata.image_root,
sem_seg_root=metadata.sem_seg_root,
instances_json=metadata.json_file,
mask_dir=mask_dir.format(superclass_name),
)
self.evaluators[key] = create_evaluator_and_reset(tmp_dataset_name)
return self.evaluators[key]
def reset(self):
for evaluator in self.evaluators.values():
evaluator.reset()
def process(self, inputs, outputs):
if "sem_seg" in outputs[0].keys():
# normal eval is compatible with SemSegEvaluator
self._get_evaluator("sem_seg").process(inputs, outputs)
else:
# only the file_name of inputs is needed for SemSegEvaluator
inputs_ = [{"file_name": inp["file_name"]} for inp in inputs]
for frame_name in outputs[0].keys():
if isinstance(outputs[0]["detect"]["sem_seg"], dict): # multi-class
for superclass_name in outputs[0]["detect"]["sem_seg"]:
outputs_ = []
for outp in outputs:
x = outp[frame_name]
x = {"sem_seg": x["sem_seg"][superclass_name]}
outputs_.append(x)
self._get_evaluator(
"sem_seg-{}-{}".format(frame_name, superclass_name),
superclass_name=superclass_name,
).process(inputs_, outputs_)
else:
# convert the output to SemSegEvaluator's format
outputs_ = [outp[frame_name] for outp in outputs]
self._get_evaluator("sem_seg-{}".format(frame_name)).process(
inputs_, outputs_
)
def evaluate(self):
results = {}
# The evaluation will get stuck sometimes if the follwoing code is not used.
# `SemSegEvaluator` will do synchronization between processes when computing
# the metrics. In some cases the number of self.evaluators will not be the
# same between processes and the code will stuck in synchronization.
# For example, evaluate 10 images on 8 GPUs, only 5 GPUs
# will be used for evaluation, each has 2 images, the rest 3 GPUs will have
# zero self.evaluators as they are constructed on-the-fly when calling
# self.process())
# We create additional evaluators so that all processes have the same size
# of evaluators so that the synchronization will not get stuck.
evaluator_size = len(self.evaluators)
synchronize()
evaluator_size_list = all_gather(evaluator_size)
max_evaluator_size = max(evaluator_size_list)
if evaluator_size < max_evaluator_size:
# create additional evaluators so that all processes have the same
# size of evaluators
metadata = MetadataCatalog.get(self.dataset_name)
mcs_metadata = metadata.get("mcs_metadata")
for idx in range(max_evaluator_size - evaluator_size):
dummy_key = f"{self._DUMMY_KEY_PREFIX}_{idx}"
assert dummy_key not in self.evaluators
if mcs_metadata:
for k in mcs_metadata:
self._get_evaluator(dummy_key, superclass_name=k).reset()
else:
self._get_evaluator(dummy_key).reset()
for name, evaluator in self.evaluators.items():
result = evaluator.evaluate()
# NOTE: .evaluate() returns None for non-main process
if result is not None:
results[name] = result["sem_seg"]
return results
class MultiSemSegVidEvaluator(MultiSemSegEvaluator):
"""
Evaluate semantic segmentation results for video clips. MultiSemSegVidEvaluator
requires the outputs of model to be like:
[
{"file_names": Tensor},
]
"""
def process(self, inputs, outputs):
assert "file_names" in inputs[0]
inputs_ = []
for batch_id in range(len(inputs)):
for frame_i in range(len(inputs[batch_id]["file_names"])):
inputs_.append({"file_name": inputs[batch_id]["file_names"][frame_i]})
for name in outputs[0].keys():
# convert the output to SemSegEvaluator's format
outputs_ = [outp[name] for outp in outputs]
self.evaluators["sem_seg_{}".format(name)].process(inputs_, outputs_)
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
"""
A context manager that will prevent any logging messages
triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL
is defined.
"""
# two kind-of hacks here:
# * can't get the highest logging level in effect => delegate to the user
# * can't get the current module-level override => use an undocumented
# (but non-private!) interface
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
class PerImageEvaluator(object):
def __init__(
self,
evaluator,
callback,
distributed=True,
playback_criterion=None,
playback_limit=0,
):
self._evaluator = evaluator
self._evaluator._distributed = False
self._evaluator._output_dir = None
self._distributed = distributed
self.callback = callback
self.results_per_image = []
# record the N most interesting results for playback
self.playback_heap = []
self.playback_criterion = playback_criterion
self.playback_limit = playback_limit
def reset(self):
self._evaluator.reset()
def process(self, inputs, outputs):
self._evaluator.process(inputs, outputs)
assert len(inputs) == 1
with all_logging_disabled():
result = self._evaluator.evaluate()
self.results_per_image.append((inputs[0], result))
if self.playback_criterion:
score = self.playback_criterion(result)
heapq.heappush(self.playback_heap, (score, inputs[0], outputs[0], result))
if len(self.playback_heap) > self.playback_limit:
heapq.heappop(self.playback_heap)
self._evaluator.reset()
def evaluate(self):
if self._distributed:
synchronize()
results_per_image = all_gather(self.results_per_image)
self.results_per_image = list(itertools.chain(*results_per_image))
playback_heap = all_gather(self.playback_heap)
playback_heap = list(itertools.chain(*playback_heap))
# each GPU has local N mininums, sort and take global mininums
playback_heap = sorted(playback_heap, key=lambda x: x[0])
self.playback_heap = playback_heap[: self.playback_limit]
self.callback(self)
return {}
| d2go-main | d2go/evaluation/sem_seg_evaluation.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import logging
from collections import OrderedDict
import detectron2.utils.comm as comm
import numpy as np
from detectron2.evaluation import DatasetEvaluator
logger = logging.getLogger(__name__)
class PredictionCountEvaluator(DatasetEvaluator):
"""
Custom Detectron2 evaluator class to simply count the number of predictions
e.g. on a dataset of hard negatives where there are no annotations, and
summarize results into interpretable metrics.
See class pattern from detectron2.evaluation.evaluator.py, especially
:func:`inference_on_dataset` to see how this class will be called.
"""
def __init__(self, distributed: bool = True):
self._distributed = distributed
self.prediction_counts = []
self.confidence_scores = []
def reset(self):
self.prediction_counts = []
self.confidence_scores = []
def process(self, inputs, outputs):
"""
Params:
input: the input that's used to call the model.
output: the return value of `model(output)`
"""
# outputs format:
# [{
# "instances": Instances(
# num_instances=88,
# fields=[scores = tensor([list of len num_instances])]
# ), ...
# },
# ... other dicts
# ]
for output_dict in outputs:
instances = output_dict["instances"]
self.prediction_counts.append(len(instances))
self.confidence_scores.extend(instances.get("scores").tolist())
def evaluate(self):
"""
Returns:
In detectron2.tools.train_net.py, following format expected:
dict:
* key: the name of the task (e.g., bbox)
* value: a dict of {metric name: score}, e.g.: {"AP50": 80}
"""
if self._distributed:
comm.synchronize()
prediction_counts = comm.gather(self.prediction_counts, dst=0)
prediction_counts = list(itertools.chain(*prediction_counts))
confidence_scores = comm.gather(self.confidence_scores, dst=0)
confidence_scores = list(itertools.chain(*confidence_scores))
if not comm.is_main_process():
return {}
else:
prediction_counts = self.prediction_counts
confidence_scores = self.confidence_scores
mpi = np.mean(prediction_counts)
mcp = np.mean(confidence_scores)
output_metrics = OrderedDict(
{
"false_positives": {
"predictions_per_image": mpi,
"confidence_per_prediction": mcp,
}
}
)
logger.info(f"mean predictions per image: {mpi}")
logger.info(f"mean confidence per prediction: {mcp}")
return output_metrics
| d2go-main | d2go/evaluation/prediction_count_evaluation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
from collections import abc
from typing import Any, Iterable, List, Union
import torch
from detectron2.evaluation import (
DatasetEvaluator,
DatasetEvaluators,
inference_on_dataset as inference_on_dataset_d2,
)
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
logger = logging.getLogger(__name__)
def DatasetEvaluators_has_finished_process(self):
ret = True
for x in self._evaluators:
if hasattr(x, "has_finished_process"):
ret &= x.has_finished_process()
else:
ret &= False
return ret
# patch evaluators defined in d2
DatasetEvaluators.has_finished_process = DatasetEvaluators_has_finished_process
def inference_on_dataset(
model: torch.nn.Module,
data_loader: Iterable,
evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None],
):
"""
A drop-in replacement for d2's inference_on_dataset to run inference on datasets,
supports customization for checkpointing
* has_finished_process(self) -> bool: return True if `self.process()` could be skipped
"""
if evaluator is None:
return inference_on_dataset_d2(model, data_loader, evaluator)
if isinstance(evaluator, abc.MutableSequence):
evaluator = DatasetEvaluators(evaluator)
if not (
hasattr(evaluator, "has_finished_process") and evaluator.has_finished_process()
):
return inference_on_dataset_d2(model, data_loader, evaluator)
evaluator.reset()
results = evaluator.evaluate()
if results is None:
results = {}
return results
class ResultCache(object):
def __init__(self, cache_dir: str):
"""A utility class to handle save/load cache data across processes"""
self.cache_str = cache_dir
@property
def cache_file(self):
if self.cache_str is None:
return None
return os.path.join(self.cache_str, f"_result_cache_.{comm.get_rank()}.pkl")
def has_cache(self):
return PathManager.isfile(self.cache_file)
def load(self, gather: bool = False):
"""
Load cache results.
gather (bool): gather cache results arcoss ranks to a list
"""
if self.cache_file is None or not PathManager.exists(self.cache_file):
ret = None
else:
with PathManager.open(self.cache_file, "rb") as fp:
ret = torch.load(fp)
logger.info(f"Loaded from checkpoint {self.cache_file}")
if gather:
ret = comm.all_gather(ret)
return ret
def save(self, data: Any):
if self.cache_file is None:
return
PathManager.mkdirs(os.path.dirname(self.cache_file))
with PathManager.open(self.cache_file, "wb") as fp:
torch.save(data, fp)
logger.info(f"Saved checkpoint to {self.cache_file}")
| d2go-main | d2go/evaluation/evaluator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
API for exporting a pytorch model to a predictor, the predictor contains model(s) in
deployable format and predefined functions as glue code. The exported predictor should
generate same output as the original pytorch model. (See predictor/api.py for details of
predictor)
This API defines customizable methods for the pytorch model:
prepare_for_export (required by the default export_predictor): returns
PredictorExportConfig which tells information about how export the predictor.
NOTE:
1: There's a difference between predictor type and model type. model type
refers to predefined deployable format such as caffe2, torchscript(_int8),
while the predictor type can be anything that "export_predictor" can
recognize.
2: The standard model exporting methods are provided by the library code, they're
meant to be modularized and can be used by customized export_predictor as well.
"""
import json
import logging
import os
from typing import Iterable
import torch.nn as nn
from d2go.config import CfgNode
from d2go.export.api import ModelExportMethod, ModelExportMethodRegistry
from d2go.quantization.modeling import (
convert_to_quantized_model,
post_training_quantize,
)
from detectron2.utils.file_io import PathManager
from mobile_cv.arch.utils import fuse_utils
from mobile_cv.predictor.api import ModelInfo, PredictorInfo
logger = logging.getLogger(__name__)
def is_predictor_quantized(predictor_type: str) -> bool:
return "int8" in predictor_type
def convert_model(
cfg: CfgNode,
pytorch_model: nn.Module,
predictor_type: str,
data_loader: Iterable,
):
"""Converts pytorch model to pytorch model (fuse for fp32, fake quantize for int8)"""
return (
convert_quantized_model(cfg, pytorch_model, data_loader)
if is_predictor_quantized(predictor_type)
else _convert_fp_model(cfg, pytorch_model, data_loader)
)
def convert_quantized_model(
cfg: CfgNode, pytorch_model: nn.Module, data_loader: Iterable
) -> nn.Module:
if not cfg.QUANTIZATION.QAT.ENABLED:
# For PTQ, converts pytorch model to fake-quantized pytorch model. For QAT, the
# built pytorch model is already fake-quantized.
logger.info(
"The model is not quantized during training, running post"
" training quantization ..."
)
pytorch_model = post_training_quantize(cfg, pytorch_model, data_loader)
# only check bn exists in ptq as qat still has bn inside fused ops
if fuse_utils.check_bn_exist(pytorch_model):
logger.warn("Post training quantized model has bn inside fused ops")
logger.info(f"Converting quantized model {cfg.QUANTIZATION.BACKEND}...")
# convert the fake-quantized model to int8 model
pytorch_model = convert_to_quantized_model(cfg, pytorch_model)
logger.info(f"Quantized Model:\n{pytorch_model}")
return pytorch_model
def _convert_fp_model(
cfg: CfgNode, pytorch_model: nn.Module, data_loader: Iterable
) -> nn.Module:
"""Converts floating point predictor"""
pytorch_model = fuse_utils.fuse_model(pytorch_model)
logger.info(f"Fused Model:\n{pytorch_model}")
if fuse_utils.count_bn_exist(pytorch_model) > 0:
logger.warning("BN existed in pytorch model after fusing.")
return pytorch_model
def convert_and_export_predictor(
cfg,
pytorch_model,
predictor_type,
output_dir,
data_loader,
):
"""
Entry point for convert and export model. This involves two steps:
- convert: converting the given `pytorch_model` to another format, currently
mainly for quantizing the model.
- export: exporting the converted `pytorch_model` to predictor. This step
should not alter the behaviour of model.
"""
pytorch_model = convert_model(cfg, pytorch_model, predictor_type, data_loader)
return export_predictor(cfg, pytorch_model, predictor_type, output_dir, data_loader)
def export_predictor(cfg, pytorch_model, predictor_type, output_dir, data_loader):
"""
Interface for exporting a pytorch model to predictor of given type. This function
can be override to achieve customized exporting procedure, eg. using non-default
optimization passes, composing traced models, etc.
Args:
cfg (CfgNode): the config
pytorch_model (nn.Module): a pytorch model, mostly also a meta-arch
predictor_type (str): a string which specifies the type of predictor, note that
the definition of type is interpreted by "export_predictor", the default
implementation uses the deployable model format (eg. caffe2_fp32,
torchscript_int8) as predictor type.
output_dir (str): the parent directory where the predictor will be saved
data_loader: data loader for the pytorch model
Returns:
predictor_path (str): the directory of exported predictor, a sub-directory of
"output_dir"
"""
return default_export_predictor(
cfg, pytorch_model, predictor_type, output_dir, data_loader
)
def _export_single_model(
predictor_path,
model,
input_args,
save_path,
model_export_method,
model_export_kwargs,
):
assert isinstance(model, nn.Module), model
# model_export_method either inherits ModelExportMethod or is a key in the registry
model_export_method_str = None
if isinstance(model_export_method, str):
model_export_method_str = model_export_method
model_export_method = ModelExportMethodRegistry.get(model_export_method)
assert issubclass(model_export_method, ModelExportMethod), model_export_method
logger.info(f"Using model export method: {model_export_method}")
load_kwargs = model_export_method.export(
model=model,
input_args=input_args,
save_path=save_path,
export_method=model_export_method_str,
**model_export_kwargs,
)
assert isinstance(load_kwargs, dict)
model_rel_path = os.path.relpath(save_path, predictor_path)
return ModelInfo(
path=model_rel_path,
export_method=f"{model_export_method.__module__}.{model_export_method.__qualname__}",
load_kwargs=load_kwargs,
)
def default_export_predictor(
cfg, pytorch_model, predictor_type, output_dir, data_loader
):
# The default implementation acts based on the PredictorExportConfig returned by
# calling "prepare_for_export". It'll export all sub models in standard way
# according to the "predictor_type".
assert hasattr(pytorch_model, "prepare_for_export"), pytorch_model
inputs = next(iter(data_loader))
export_config = pytorch_model.prepare_for_export(cfg, inputs, predictor_type)
model_inputs = (
export_config.data_generator(inputs)
if export_config.data_generator is not None
else (inputs,)
)
predictor_path = os.path.join(output_dir, predictor_type)
PathManager.mkdirs(predictor_path)
predictor_init_kwargs = {
"preprocess_info": export_config.preprocess_info,
"postprocess_info": export_config.postprocess_info,
"run_func_info": export_config.run_func_info,
}
if isinstance(export_config.model, dict):
models_info = {}
for name, model in export_config.model.items():
save_path = os.path.join(predictor_path, name)
model_info = _export_single_model(
predictor_path=predictor_path,
model=model,
input_args=model_inputs[name] if model_inputs is not None else None,
save_path=save_path,
model_export_method=(
predictor_type
if export_config.model_export_method is None
else export_config.model_export_method[name]
),
model_export_kwargs=(
{}
if export_config.model_export_kwargs is None
else export_config.model_export_kwargs[name]
),
)
models_info[name] = model_info
predictor_init_kwargs["models"] = models_info
else:
save_path = predictor_path # for single model exported files are put under `predictor_path` together with predictor_info.json
model_info = _export_single_model(
predictor_path=predictor_path,
model=export_config.model,
input_args=model_inputs,
save_path=save_path,
model_export_method=export_config.model_export_method or predictor_type,
model_export_kwargs=export_config.model_export_kwargs or {},
)
predictor_init_kwargs["model"] = model_info
# assemble predictor
predictor_info = PredictorInfo(**predictor_init_kwargs)
with PathManager.open(
os.path.join(predictor_path, "predictor_info.json"), "w"
) as f:
json.dump(predictor_info.to_dict(), f, indent=4)
return predictor_path
| d2go-main | d2go/export/exporter.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.export import torchscript as _torchscript # noqa
| d2go-main | d2go/export/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import sys
from abc import ABC, abstractmethod
from typing import Callable, Dict, NamedTuple, Optional, Union
import torch.nn as nn
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.registry import Registry
from mobile_cv.predictor.api import FuncInfo
from mobile_cv.predictor.builtin_functions import (
IdentityPostprocess,
IdentityPreprocess,
NaiveRunFunc,
)
if sys.version_info >= (3, 8):
from typing import final
else:
# If final decorator not available when using older python version, replace with the
# dummy implementation that does nothing.
def final(func):
return func
class PredictorExportConfig(NamedTuple):
"""
Storing information for exporting a predictor.
Args:
model (any nested iterable structure of nn.Module): the model(s) to be exported
(via tracing/onnx or scripting). This can be sub-model(s) when the predictor
consists of multiple models in deployable format, and/or pre/post processing
is excluded due to requirement of tracing or hardware incompatibility.
data_generator (Callable): a function to generate all data needed for tracing,
such that data = data_generator(x), the returned data has the same nested
structure as model. The data for each model will be treated as positional
arguments, i.e. model(*data).
model_export_kwargs (Dict): additional kwargs when exporting each sub-model, it
follows the same nested structure as the model, and may contains information
such as scriptable.
preprocess_info (FuncInfo): info for predictor's preprocess
postprocess_info (FuncInfo): info for predictor's postprocess
run_func_info (FuncInfo): info for predictor's run_fun
"""
model: Union[nn.Module, Dict[str, nn.Module]]
data_generator: Optional[Callable] = None
model_export_method: Optional[Union[str, Dict[str, str]]] = None
model_export_kwargs: Optional[Union[Dict, Dict[str, Dict]]] = None
preprocess_info: FuncInfo = FuncInfo.gen_func_info(IdentityPreprocess, params={})
postprocess_info: FuncInfo = FuncInfo.gen_func_info(IdentityPostprocess, params={})
run_func_info: FuncInfo = FuncInfo.gen_func_info(NaiveRunFunc, params={})
class ModelExportMethod(ABC):
"""
Base class for "model export method". Each model export method can export a pytorch
model to a certain deployable format, such as torchscript or caffe2. It consists
with `export` and `load` methods.
"""
@classmethod
@abstractmethod
def export(cls, model, input_args, save_path, export_method, **export_kwargs):
"""
Export the model to deployable format.
Args:
model (nn.Module): a pytorch model to export
input_args (Tuple[Any]): inputs of model, called as model(*input_args)
save_path (str): directory where the model will be exported
export_method (str): string name for the export method
export_kwargs (Dict): additional parameters for exporting model defined
by each model export method.
Return:
load_kwargs (Dict): additional information (besides save_path) needed in
order to load the exported model. This needs to be JSON serializable.
"""
pass
@classmethod
@abstractmethod
def load(cls, save_path, **load_kwargs):
"""
Load the exported model back for inference.
Args:
save_path (str): directory where the model is stored.
load_kwargs (Dict): addtional information to load the exported model.
Returns:
model (nn.Module): a nn.Module (often time a wrapper for non torchscript
types like caffe2), it works the same as the original pytorch model,
i.e. getting the same output when called as model(*input_args)
"""
pass
@classmethod
@final
def test_export_and_load(
cls, model, input_args, export_method, export_kwargs, output_checker
):
"""
Illustrate the life-cycle of export and load, used for testing.
"""
with make_temp_directory("test_export_and_load") as save_path:
# run the orginal model
assert isinstance(model, nn.Module), model
assert isinstance(input_args, (list, tuple)), input_args
original_output = model(*input_args)
# export the model
model.eval() # TODO: decide where eval() should be called
load_kwargs = cls.export(
model, input_args, save_path, export_method, **export_kwargs
)
# sanity check for load_kwargs
assert isinstance(load_kwargs, dict), load_kwargs
assert json.dumps(load_kwargs), load_kwargs
# loaded model back
loaded_model = cls.load(save_path, **load_kwargs)
# run the loaded model
assert isinstance(loaded_model, nn.Module), loaded_model
new_output = loaded_model(*input_args)
# compare outputs
output_checker(new_output, original_output)
ModelExportMethodRegistry = Registry("ModelExportMethod", allow_override=True)
| d2go-main | d2go/export/api.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import contextlib
import logging
import os
from typing import Any, AnyStr, Dict, List, NamedTuple, Optional, Set, Tuple
import torch
from d2go.export.api import ModelExportMethod, ModelExportMethodRegistry
from detectron2.config.instantiate import dump_dataclass, instantiate
from detectron2.export import dump_torchscript_IR
from detectron2.export.flatten import flatten_to_tuple, TracingAdapter
from detectron2.export.torchscript_patch import patch_builtin_len
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.file_utils import make_temp_directory
from mobile_cv.common.misc.iter_utils import recursive_iterate
from torch import nn
from torch.utils.bundled_inputs import augment_model_with_bundled_inputs
from torch.utils.mobile_optimizer import MobileOptimizerType, optimize_for_mobile
logger = logging.getLogger(__name__)
TORCHSCRIPT_FILENAME_KEY: str = "torchscript_filename"
DEFAULT_JIT_MODE = "trace"
class MobileOptimizationConfig(NamedTuple):
# optimize_for_mobile
optimization_blocklist: Set[MobileOptimizerType] = None
preserved_methods: List[AnyStr] = None
backend: str = "CPU"
torchscript_filename: str = "mobile_optimized.ptl"
def export_optimize_and_save_torchscript(
model: nn.Module,
inputs: Optional[Tuple[Any]],
output_path: str,
*,
jit_mode: Optional[str] = DEFAULT_JIT_MODE,
torchscript_filename: str = "model.jit",
mobile_optimization: Optional[MobileOptimizationConfig] = None,
_extra_files: Optional[Dict[str, bytes]] = None,
) -> str:
"""
The primary function for exporting PyTorch model to TorchScript.
Args:
model (nn.Module): the model to export. When given a ScriptModule, skip the export
and only optimize and save model.
inputs (tuple or None): input arguments of model, can be called as model(*inputs).
Will not be used when scripting the model.
output_path (str): directory that the model will be saved.
jit_mode (str): trace/script or None if the model is already a ScriptModule.
torchscript_filename (str): the filename of non-mobile-optimized model.
mobile_optimization (MobileOptimizationConfig): when provided, the mobile optimization
will be applied.
_extra_files (Dict[str, bytes]): when provided, extra files will be saved.
Returns:
(str): filename of the final model no matter optmized or not.
"""
logger.info("Export, optimize and saving TorchScript to {} ...".format(output_path))
PathManager.mkdirs(output_path)
if _extra_files is None:
_extra_files = {}
if isinstance(model, torch.jit.ScriptModule):
if jit_mode is not None:
logger.info("The input model is already a ScriptModule, skip the jit step")
elif jit_mode == "trace":
logger.info("Tracing the model ...")
with torch.no_grad():
script_model = torch.jit.trace(model, inputs)
elif jit_mode == "script":
logger.info("Scripting the model ...")
script_model = torch.jit.script(model)
else:
raise ValueError("Unsupported jit_mode: {}".format(jit_mode))
with make_temp_directory("export_optimize_and_save_torchscript") as tmp_dir:
@contextlib.contextmanager
def _synced_local_file(rel_path):
remote_file = os.path.join(output_path, rel_path)
local_file = os.path.join(tmp_dir, rel_path)
yield local_file
PathManager.copy_from_local(local_file, remote_file, overwrite=True)
with _synced_local_file(torchscript_filename) as model_file:
logger.info(f"Saving torchscript model to: {torchscript_filename}")
torch.jit.save(script_model, model_file, _extra_files=_extra_files)
dump_torchscript_IR(script_model, os.path.join(output_path, "torchscript_IR"))
data_filename = "data.pth"
with _synced_local_file(data_filename) as data_file:
logger.info(f"Saving example data to: {data_filename}")
torch.save(inputs, data_file)
if mobile_optimization is not None:
logger.info("Applying optimize_for_mobile ...")
liteopt_model = optimize_for_mobile(
script_model,
optimization_blocklist=mobile_optimization.optimization_blocklist,
preserved_methods=mobile_optimization.preserved_methods,
backend=mobile_optimization.backend,
)
torchscript_filename = mobile_optimization.torchscript_filename
with _synced_local_file(torchscript_filename) as lite_path:
logger.info(f"Saving mobile optimized model to: {torchscript_filename}")
liteopt_model._save_for_lite_interpreter(
lite_path, _extra_files=_extra_files
)
op_names = torch.jit.export_opnames(liteopt_model)
logger.info(
"Operator names from lite interpreter:\n{}".format("\n".join(op_names))
)
logger.info("Applying augment_model_with_bundled_inputs ...")
# make all tensors zero-like to save storage
iters = recursive_iterate(inputs)
for x in iters:
if isinstance(x, torch.Tensor):
iters.send(torch.zeros_like(x).contiguous())
inputs = iters.value
augment_model_with_bundled_inputs(liteopt_model, [inputs])
# For non-cpu backends (e.g. Metal, Vulkan) the bundled inputs need to be
# converted with `torch.to(<myDevice>)` in order to predict successfully
# This is a temporary bypass until PT Edge supports automatic backend
# conversion in the bundled inputs interface, or we can auto-add a input tensor
# conversion op to Metal and Vulkan models.
target_backend = mobile_optimization.backend.lower()
if target_backend == "cpu":
# Sanity check by running
logger.info("Running sanity check for the mobile optimized model ...")
liteopt_model(*liteopt_model.get_all_bundled_inputs()[0])
name, ext = os.path.splitext(torchscript_filename)
input_bundled_path = name + "_bundled" + ext
with _synced_local_file(input_bundled_path) as lite_path:
logger.info(f"Saving input bundled model to: {input_bundled_path}")
liteopt_model._save_for_lite_interpreter(lite_path)
return torchscript_filename
# For backward compatibility, TODO: remove this function.
def trace_and_save_torchscript(
model: nn.Module,
inputs: Optional[Tuple[Any]],
output_path: str,
torchscript_filename: str = "model.jit",
mobile_optimization: Optional[MobileOptimizationConfig] = None,
_extra_files: Optional[Dict[str, bytes]] = None,
):
return export_optimize_and_save_torchscript(
model,
inputs,
output_path,
jit_mode="trace",
torchscript_filename=torchscript_filename,
mobile_optimization=mobile_optimization,
_extra_files=_extra_files,
)
class TorchscriptWrapper(nn.Module):
""" """
def __init__(self, module, int8_backend=None):
super().__init__()
self.module = module
self.int8_backend = int8_backend
def forward(self, *args, **kwargs):
# TODO: set int8 backend accordingly if needed
return self.module(*args, **kwargs)
def get_wrapped_models(self):
return self.module
def load_torchscript(model_path):
extra_files = {}
# NOTE: may support loading extra_file specified by model_info
# extra_files["predictor_info.json"] = ""
with PathManager.open(model_path, "rb") as f:
ts = torch.jit.load(f, _extra_files=extra_files)
return TorchscriptWrapper(ts)
def _is_data_flattened_tensors(data):
if isinstance(data, torch.Tensor):
return True
if isinstance(data, (tuple, list)):
if all(isinstance(x, torch.Tensor) for x in data):
return True
return False
def tracing_adapter_wrap_export(old_f):
def new_f(cls, model, input_args, save_path, export_method, **export_kwargs):
force_disable_tracing_adapter = export_kwargs.pop(
"force_disable_tracing_adapter", False
)
is_trace_mode = export_kwargs.get("jit_mode", "trace") == "trace"
if force_disable_tracing_adapter or not is_trace_mode:
logger.info("Not trace mode, export normally")
return old_f(
cls, model, input_args, save_path, export_method, **export_kwargs
)
if _is_data_flattened_tensors(input_args):
logger.info("Dry run the model to check if TracingAdapter is needed ...")
outputs = model(*input_args)
if _is_data_flattened_tensors(outputs):
logger.info(
"Both inputs and outputs are flattened tensors, export the model as is."
)
load_kwargs = old_f(
cls, model, input_args, save_path, export_method, **export_kwargs
)
assert "tracing_adapted" not in load_kwargs
load_kwargs.update({"tracing_adapted": False})
return load_kwargs
else:
logger.info(
"The outputs are not flattened tensors, can't trace normally."
)
else:
logger.info("The inputs are not flattened tensors, can't trace normally.")
logger.warning(
"Wrap the model with TracingAdapter to handle non-flattened inputs/outputs,"
" please be aware that the exported model will have different input/output data structure."
)
adapter = TracingAdapter(model, input_args)
load_kwargs = old_f(
cls,
adapter,
adapter.flattened_inputs,
save_path,
export_method,
**export_kwargs,
)
inputs_schema = dump_dataclass(adapter.inputs_schema)
outputs_schema = dump_dataclass(adapter.outputs_schema)
assert "tracing_adapted" not in load_kwargs
assert "inputs_schema" not in load_kwargs
assert "outputs_schema" not in load_kwargs
load_kwargs.update(
{
"tracing_adapted": True,
"inputs_schema": inputs_schema,
"outputs_schema": outputs_schema,
}
)
return load_kwargs
return new_f
class TracingAdapterModelWrapper(nn.Module):
def __init__(self, traced_model, inputs_schema, outputs_schema):
super().__init__()
self.traced_model = traced_model
self.inputs_schema = inputs_schema
self.outputs_schema = outputs_schema
def forward(self, *input_args):
flattened_inputs, _ = flatten_to_tuple(input_args)
flattened_outputs = self.traced_model(*flattened_inputs)
return self.outputs_schema(flattened_outputs)
def get_wrapped_models(self):
return self.traced_model
def tracing_adapter_wrap_load(old_f):
def new_f(cls, save_path, **load_kwargs):
tracing_adapted = load_kwargs.pop("tracing_adapted", False)
if not tracing_adapted:
logger.info("The model is not tracing adapted, load it normally.")
return old_f(cls, save_path, **load_kwargs)
logger.info(
"The model is tracing adapted, load the schema and wrap the model for inference."
)
assert "inputs_schema" in load_kwargs, load_kwargs.keys()
assert "outputs_schema" in load_kwargs, load_kwargs.keys()
inputs_schema = instantiate(load_kwargs.pop("inputs_schema"))
outputs_schema = instantiate(load_kwargs.pop("outputs_schema"))
traced_model = old_f(cls, save_path, **load_kwargs)
return TracingAdapterModelWrapper(traced_model, inputs_schema, outputs_schema)
return new_f
def update_export_kwargs_from_export_method(old_f):
"""
Provide some convenient way of updating export_kwargs by adding trigger words in
`export_method`. For example, instead of setting `mobile_optimization` in the
model_export_kwargs of the PredictorExportConfig, user can simply put the `_mobile`
trigger word in the --predictor-type (which will then be forwarded as `export_method`
in most cases) to enable mobile optimizaiton.
Please note that there's a finite set of allowed "export_method" values,
and an error will be raised if the string cannot be fully parsed.
The recognized values generally follow a pattern of:
"torchscript[_mobile][_int8][-vulkan | -metal][@scripting | @tracing]"
Some examples (not comprehensive because flag words' order can be swapped):
"torchscript"
"torchscript_mobile"
"torchscript_mobile-metal"
"torchscript_mobile-vulkan"
"torchscript_mobile_int8"
"torchscript@scripting"
"torchscript_int8@scripting"
"torchscript_mobile@scripting"
"torchscript_mobile-metal@scripting"
"torchscript_mobile-vulkan@scripting"
"torchscript_mobile_int8@scripting"
"torchscript@tracing"
"torchscript_int8@tracing"
"torchscript_mobile@tracing"
"torchscript_mobile-metal@tracing"
"torchscript_mobile-vulkan@tracing"
"torchscript_mobile_int8@tracing"
"""
def new_f(cls, model, input_args, save_path, export_method, **export_kwargs):
if export_method is not None:
assert isinstance(export_method, str)
original_export_method = export_method
if "_mobile" in export_method:
if "mobile_optimization" in export_kwargs:
logger.warning(
"`mobile_optimization` is already specified, keep using it"
)
else:
# Infer a MobileOptimizationConfig if none was provided
# "CPU" backend default. If found appropriate suffix, update the backend
if "-metal" in export_method:
mobile_opt_config = MobileOptimizationConfig(backend="metal")
export_method = export_method.replace("-metal", "", 1)
elif "-vulkan" in export_method:
mobile_opt_config = MobileOptimizationConfig(backend="vulkan")
export_method = export_method.replace("-vulkan", "", 1)
else:
mobile_opt_config = MobileOptimizationConfig()
export_kwargs["mobile_optimization"] = mobile_opt_config
export_method = export_method.replace("_mobile", "", 1)
if "@scripting" in export_method:
jit_mode = export_kwargs.get("jit_mode", None)
if jit_mode and jit_mode != "script":
logger.warning(
"`jit_mode` is already specified as {}, overwrite it to `script`"
" since @scripting appears in export_method".format(jit_mode)
)
export_kwargs["jit_mode"] = "script"
export_method = export_method.replace("@scripting", "", 1)
if "@tracing" in export_method:
jit_mode = export_kwargs.get("jit_mode", None)
if jit_mode and jit_mode != "trace":
logger.warning(
"`jit_mode` is already specified as {}, overwrite it to `trace`"
" since @tracing appears in export_method".format(jit_mode)
)
export_kwargs["jit_mode"] = "trace"
export_method = export_method.replace("@tracing", "", 1)
if "_int8" in export_method:
export_method = export_method.replace("_int8", "", 1)
if export_method != "torchscript":
logger.warning(
"Suspcious export_method after removing triggering words,"
" original export_method: {}, remaining: {}".format(
original_export_method, export_method
)
)
return old_f(cls, model, input_args, save_path, export_method, **export_kwargs)
return new_f
class DefaultTorchscriptExport(ModelExportMethod):
@classmethod
@update_export_kwargs_from_export_method
def export(
cls,
model: nn.Module,
input_args: Tuple[Tuple[torch.Tensor]],
save_path: str,
export_method: Optional[str],
**export_kwargs,
):
expected_arguments = {
"jit_mode",
"torchscript_filename",
"mobile_optimization",
"_extra_files",
}
filtered_kwargs = {
k: v for k, v in export_kwargs.items() if k in expected_arguments
}
torchscript_filename = export_optimize_and_save_torchscript(
model, input_args, save_path, **filtered_kwargs
)
return {TORCHSCRIPT_FILENAME_KEY: torchscript_filename}
@classmethod
def load(cls, save_path, *, torchscript_filename="model.jit"):
model_path = os.path.join(save_path, torchscript_filename)
return load_torchscript(model_path)
@ModelExportMethodRegistry.register("torchscript")
@ModelExportMethodRegistry.register("torchscript_int8")
@ModelExportMethodRegistry.register("torchscript_mobile")
@ModelExportMethodRegistry.register("torchscript_mobile-metal")
@ModelExportMethodRegistry.register("torchscript_mobile-vulkan")
@ModelExportMethodRegistry.register("torchscript_mobile_int8")
@ModelExportMethodRegistry.register("torchscript@scripting")
@ModelExportMethodRegistry.register("torchscript_int8@scripting")
@ModelExportMethodRegistry.register("torchscript_mobile@scripting")
@ModelExportMethodRegistry.register("torchscript_mobile-metal@scripting")
@ModelExportMethodRegistry.register("torchscript_mobile-vulkan@scripting")
@ModelExportMethodRegistry.register("torchscript_mobile_int8@scripting")
@ModelExportMethodRegistry.register("torchscript@tracing")
@ModelExportMethodRegistry.register("torchscript_int8@tracing")
@ModelExportMethodRegistry.register("torchscript_mobile@tracing")
@ModelExportMethodRegistry.register("torchscript_mobile-metal@tracing")
@ModelExportMethodRegistry.register("torchscript_mobile-vulkan@tracing")
@ModelExportMethodRegistry.register("torchscript_mobile_int8@tracing")
class TracingAdaptedTorchscriptExport(DefaultTorchscriptExport):
@classmethod
@update_export_kwargs_from_export_method
@tracing_adapter_wrap_export
def export(cls, model, input_args, save_path, export_method, **export_kwargs):
with patch_builtin_len():
return super().export(
model, input_args, save_path, export_method, **export_kwargs
)
@classmethod
@tracing_adapter_wrap_load
def load(cls, save_path, **load_kwargs):
return super().load(save_path, **load_kwargs)
| d2go-main | d2go/export/torchscript.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import itertools
import logging
import operator
from collections import defaultdict, OrderedDict
from typing import Dict
import torch
from d2go.config import CfgNode
from d2go.data.dataset_mappers.build import build_dataset_mapper
from d2go.data.utils import ClipLengthGroupedDataset
from detectron2.data import (
build_batch_data_loader,
build_detection_train_loader,
get_detection_dataset_dicts,
)
from detectron2.data.build import worker_init_reset_seed
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.samplers import RepeatFactorTrainingSampler
from detectron2.utils.comm import get_world_size
from mobile_cv.common.misc.oss_utils import fb_overwritable
from tabulate import tabulate
logger = logging.getLogger(__name__)
def add_weighted_training_sampler_default_configs(cfg: CfgNode):
"""
The CfgNode under cfg.DATASETS.TRAIN_REPEAT_FACTOR should be a list of
tuples (dataset_name, scalar-repeat-factor) specifying upsampled frequencies
for each dataset when using RepeatFactorTrainingSampler. An example looks like:
DATASETS:
TRAIN:
- "train_1"
- "train_2"
- "small_train_3"
TEST: ...
TRAIN_REPEAT_FACTOR:
- ["small_train_3", 2.5]
"""
cfg.DATASETS.TRAIN_REPEAT_FACTOR = []
def add_random_subset_training_sampler_default_configs(cfg: CfgNode):
"""
Add default cfg.DATALOADER.RANDOM_SUBSET_RATIO for RandomSubsetTrainingSampler
The CfgNode under cfg.DATALOADER.RANDOM_SUBSET_RATIO should be a float > 0 and <= 1
"""
cfg.DATALOADER.RANDOM_SUBSET_RATIO = 1.0
def get_train_datasets_repeat_factors(cfg: CfgNode) -> Dict[str, float]:
repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR
assert all(len(tup) == 2 for tup in repeat_factors)
name_to_weight = defaultdict(lambda: 1, dict(repeat_factors))
# The sampling weights map should only contain datasets in train config
unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN)
assert not unrecognized, f"unrecognized datasets: {unrecognized}"
logger.info(f"Found repeat factors: {list(name_to_weight.items())}")
# pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`.
return name_to_weight
def get_sampling_probability_table(
dataset_sizes: Dict[str, int], dataset_repeat_factors: Dict[str, float]
) -> str:
total_sum = sum(
dataset_repeat_factors.get(dsname, 1.0) * size
for dsname, size in dataset_sizes.items()
)
sample_prob_data = [
(
dsname,
size,
dataset_repeat_factors.get(dsname, 1.0),
(dataset_repeat_factors.get(dsname, 1.0) * size) * 100 / total_sum,
)
for dsname, size in dataset_sizes.items()
]
headers = ["Dataset", "Samples", "Repeat factor", "Sample Prob (%)"]
table = tabulate(sample_prob_data, headers=headers, tablefmt="pipe")
return table
def build_weighted_detection_train_loader(
cfg: CfgNode, mapper=None, enable_category_balance=False
):
dataset_repeat_factors = get_train_datasets_repeat_factors(cfg)
# OrderedDict to guarantee order of values() consistent with repeat factors
dataset_name_to_dicts = OrderedDict(
{
name: get_detection_dataset_dicts(
[name],
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
for name in cfg.DATASETS.TRAIN
}
)
# Repeat factor for every sample in the dataset
repeat_factors = [
[dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname])
for dsname in cfg.DATASETS.TRAIN
]
sampling_prob_table = get_sampling_probability_table(
{dsname: len(dataset_name_to_dicts[dsname]) for dsname in cfg.DATASETS.TRAIN},
dataset_repeat_factors,
)
logger.info("Dataset TRAIN sampling probability: \n" + sampling_prob_table)
repeat_factors = list(itertools.chain.from_iterable(repeat_factors))
dataset_dicts = dataset_name_to_dicts.values()
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
repeat_factors = torch.tensor(repeat_factors)
if enable_category_balance:
"""
1. Calculate repeat factors using category frequency for each dataset and then merge them.
2. Element wise dot producting the dataset frequency repeat factors with
the category frequency repeat factors gives the final repeat factors.
"""
category_repeat_factors = [
RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD
)
for dataset_dict in dataset_name_to_dicts.values()
]
# flatten the category repeat factors from all datasets
category_repeat_factors = list(
itertools.chain.from_iterable(category_repeat_factors)
)
category_repeat_factors = torch.tensor(category_repeat_factors)
repeat_factors = torch.mul(category_repeat_factors, repeat_factors)
repeat_factors = repeat_factors / torch.min(repeat_factors)
logger.info(
"Using WeightedCategoryTrainingSampler with repeat_factors={}".format(
cfg.DATASETS.TRAIN_REPEAT_FACTOR
)
)
else:
logger.info(
"Using WeightedTrainingSampler with repeat_factors={}".format(
cfg.DATASETS.TRAIN_REPEAT_FACTOR
)
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
return build_batch_data_loader(
dataset,
sampler,
cfg.SOLVER.IMS_PER_BATCH,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
def build_clip_grouping_data_loader(dataset, sampler, total_batch_size, num_workers=0):
"""
Build a batched dataloader for training with video clips.
Args:
dataset (torch.utils.data.Dataset): map-style PyTorch dataset. Can be indexed.
sampler (torch.utils.data.sampler.Sampler): a sampler that produces indices
total_batch_size (int): total batch size across GPUs.
num_workers (int): number of parallel data loading workers
Returns:
iterable[list]. Length of each list is the batch size of the current
GPU. Each element in the list comes from the dataset.
"""
world_size = get_world_size()
assert (
total_batch_size > 0 and total_batch_size % world_size == 0
), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
total_batch_size, world_size
)
batch_size = total_batch_size // world_size
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=num_workers,
batch_sampler=None,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
return ClipLengthGroupedDataset(data_loader, batch_size)
@fb_overwritable()
def build_mapped_train_loader(cfg, mapper):
if cfg.DATALOADER.SAMPLER_TRAIN == "WeightedTrainingSampler":
# balancing only datasets frequencies
data_loader = build_weighted_detection_train_loader(cfg, mapper=mapper)
elif cfg.DATALOADER.SAMPLER_TRAIN == "WeightedCategoryTrainingSampler":
# balancing both datasets and its categories
data_loader = build_weighted_detection_train_loader(
cfg, mapper=mapper, enable_category_balance=True
)
else:
data_loader = build_detection_train_loader(cfg, mapper=mapper)
return data_loader
def build_d2go_train_loader(cfg, mapper=None):
"""
Build the dataloader for training in D2Go. This is the main entry and customizations
will be done by using Registry.
This interface is currently experimental.
"""
logger.info("Building D2Go's train loader ...")
# TODO: disallow passing mapper and use registry for all mapper registering
mapper = mapper or build_dataset_mapper(cfg, is_train=True)
logger.info("Using dataset mapper:\n{}".format(mapper))
data_loader = build_mapped_train_loader(cfg, mapper)
# TODO: decide if move vis_wrapper inside this interface
return data_loader
| d2go-main | d2go/data/build.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.config import CfgNode as CN
def add_d2go_data_default_configs(_C):
_C.D2GO_DATA = CN()
# Config for "detectron2go.data.extended_coco.extended_coco_load"
_C.D2GO_DATA.DATASETS = CN()
# List of class names to use when loading the data, this applies to train
# and test separately. Default value means using all classes, otherwise it'll create
# new json file containing only given categories.
_C.D2GO_DATA.DATASETS.TRAIN_CATEGORIES = ()
_C.D2GO_DATA.DATASETS.TEST_CATEGORIES = ()
# Register a list of COCO datasets in config
# The following specifies additional coco data to inject. The required is the
# name (NAMES), image root (IM_DIRS), coco json file (JSON_FILES) while keypoint
# metadata (KEYPOINT_METADATA) is optional. The keypoint metadata name provided
# here is used to lookup the metadata specified within the KEYPOINT_METADATA
# metadata registry specified in "data/keypoint_metadata_registry.py". For adding
# new use cases, simply register new metadata to that registry.
_C.D2GO_DATA.DATASETS.COCO_INJECTION = CN()
_C.D2GO_DATA.DATASETS.COCO_INJECTION.NAMES = []
_C.D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS = []
_C.D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES = []
_C.D2GO_DATA.DATASETS.COCO_INJECTION.KEYPOINT_METADATA = []
_C.D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION = "_register_extended_coco"
# On-the-fly register a list of datasets located under detectron2go/datasets
# by specifying the filename (without .py).
_C.D2GO_DATA.DATASETS.DYNAMIC_DATASETS = []
# Config for caching the dataset annotations on local disk
_C.D2GO_DATA.DATASETS.DISK_CACHE = CN()
_C.D2GO_DATA.DATASETS.DISK_CACHE.ENABLED = False
# TODO: potentially add this config
# # List of extra keys in annotation, the item will be forwarded by
# # extended_coco_load.
# _C.D2GO_DATA.DATASETS.ANNOTATION_FIELDS_TO_FORWARD = ()
# Config for D2GoDatasetMapper
_C.D2GO_DATA.MAPPER = CN()
# dataset mapper name
_C.D2GO_DATA.MAPPER.NAME = "D2GoDatasetMapper"
# When enabled, image item from json dataset doesn't need to have width/hegiht,
# they will be backfilled once image is loaded. This may cause issue when
# width/hegiht is acutally been used by extended_coco_load, eg. grouping
# by aspect ratio.
_C.D2GO_DATA.MAPPER.BACKFILL_SIZE = False
_C.D2GO_DATA.MAPPER.RETRY = 3
_C.D2GO_DATA.MAPPER.CATCH_EXCEPTION = True
_C.D2GO_DATA.AUG_OPS = CN()
# List of transforms that are represented by string. Each string starts with
# a registered name in TRANSFORM_OP_REGISTRY, optionally followed by a string
# argument (separated by "::") which can be used for initializing the
# transform object. See build_transform_gen for the detail.
# Some examples are:
# example 1: RandomFlipOp
# example 2: RandomFlipOp::{}
# example 3: RandomFlipOp::{"prob":0.5}
# example 4: RandomBrightnessOp::{"intensity_min":1.0, "intensity_max":2.0}
# NOTE: search "example repr:" in fbcode for examples.
_C.D2GO_DATA.AUG_OPS.TRAIN = ["ResizeShortestEdgeOp", "RandomFlipOp"]
_C.D2GO_DATA.AUG_OPS.TEST = ["ResizeShortestEdgeOp"]
_C.D2GO_DATA.TEST = CN()
# Evaluate on the first specified number of images for each datset during
# testing, default value 0 means using all images.
# NOTE: See maybe_subsample_n_images for details.
_C.D2GO_DATA.TEST.MAX_IMAGES = 0
_C.D2GO_DATA.TEST.SUBSET_SAMPLING = "frontmost" # one of {"frontmost", "random"}
return _C
| d2go-main | d2go/data/config.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import logging
import pickle
import shutil
import uuid
import numpy as np
from detectron2.utils import comm
from detectron2.utils.logger import log_every_n_seconds
logger = logging.getLogger(__name__)
# NOTE: Use unique ROOT_CACHE_DIR for each run, during the run, each instance of data
# loader will create a `cache_dir` under ROOT_CACHE_DIR. When the DL instance is GC-ed,
# the `cache_dir` will be removed by __del__; when the run is finished or interrupted,
# atexit.register will be triggered to remove the ROOT_CACHE_DIR to make sure there's no
# leftovers. Regarding DDP, although each GPU process has their own random value for
# ROOT_CACHE_DIR, but each GPU process uses the same `cache_dir` broadcasted from local
# master rank, which is then inherited by each data loader worker, this makes sure that
# `cache_dir` is in-sync between all GPUs and DL works on the same node.
ROOT_CACHE_DIR = "/tmp/DatasetFromList_cache_" + uuid.uuid4().hex[:8]
def _local_master_gather(func, check_equal=False):
if comm.get_local_rank() == 0:
x = func()
assert x is not None
else:
x = None
x_all = comm.all_gather(x)
x_local_master = [x for x in x_all if x is not None]
if check_equal:
master = x_local_master[0]
assert all(x == master for x in x_local_master), x_local_master
return x_local_master
class DiskCachedList(object):
"""
Wrap a list, the underlying storage is off-loaded to disk to save RAM usage.
"""
def __init__(self, lst, strategy="batched_static"):
"""
Args:
lst (list): a list which contains elements to produce.
strategy (str): strategy of using diskcache, supported strategies:
- native: saving each item individually.
- batched_static: group N items together, where N is calculated from
the average item size.
"""
self._lst = lst
self._diskcache_strategy = strategy
def _serialize(data):
buffer = pickle.dumps(data, protocol=-1)
return np.frombuffer(buffer, dtype=np.uint8)
logger.info(
"Serializing {} elements to byte tensors ...".format(len(self._lst))
)
self._lst = [_serialize(x) for x in self._lst]
total_size = sum(len(x) for x in self._lst)
# TODO: only enabling DiskCachedDataset for large enough dataset
logger.info(
"Serialized dataset takes {:.2f} MiB".format(total_size / 1024**2)
)
self._initialize_diskcache()
def _initialize_diskcache(self):
from mobile_cv.common.misc.local_cache import LocalCache
cache_dir = "{}/{}".format(ROOT_CACHE_DIR, uuid.uuid4().hex[:8])
cache_dir = comm.all_gather(cache_dir)[0] # use same cache_dir
logger.info("Creating diskcache database in: {}".format(cache_dir))
self._cache = LocalCache(cache_dir=cache_dir, num_shards=8)
# self._cache.cache.clear(retry=True) # seems faster if index exists
if comm.get_local_rank() == 0:
if self._diskcache_strategy == "naive":
for i, item in enumerate(self._lst):
ret = self._write_to_local_db((i, item))
assert ret, "Error writing index {} to local db".format(i)
pct = 100.0 * i / len(self._lst)
self._log_progress(pct)
# NOTE: each item might be small in size (hundreds of bytes),
# writing million of them can take a pretty long time (hours)
# because of frequent disk access. One solution is grouping a batch
# of items into larger blob.
elif self._diskcache_strategy == "batched_static":
TARGET_BYTES = 50 * 1024
average_bytes = np.average(
[
self._lst[int(x)].size
for x in np.linspace(0, len(self._lst) - 1, 1000)
]
)
self._chuck_size = max(1, int(TARGET_BYTES / average_bytes))
logger.info(
"Average data size: {} bytes; target chuck data size {} KiB;"
" {} items per chuck; {} chucks in total".format(
average_bytes,
TARGET_BYTES / 1024,
self._chuck_size,
int(len(self._lst) / self._chuck_size),
)
)
for i in range(0, len(self._lst), self._chuck_size):
chunk = self._lst[i : i + self._chuck_size]
chunk_i = int(i / self._chuck_size)
ret = self._write_to_local_db((chunk_i, chunk))
assert ret, "Error writing index {} to local db".format(chunk_i)
pct = 100.0 * i / len(self._lst)
self._log_progress(pct)
# NOTE: instead of using fixed chuck size, items can be grouped dynamically
elif self._diskcache_strategy == "batched_dynamic":
raise NotImplementedError()
else:
raise NotImplementedError(self._diskcache_strategy)
comm.synchronize()
logger.info(
"Finished writing to local disk, db size: {:.2f} MiB".format(
self._cache.cache.volume() / 1024**2
)
)
# Optional sync for some strategies
if self._diskcache_strategy == "batched_static":
# propagate chuck size and make sure all local rank 0 uses the same value
self._chuck_size = _local_master_gather(
lambda: self._chuck_size, check_equal=True
)[0]
logger.info("Gathered chuck size: {}".format(self._chuck_size))
# free the memory of self._lst
self._size = _local_master_gather(lambda: len(self._lst), check_equal=True)[0]
logger.info("Gathered list size: {}".format(self._size))
del self._lst
def _write_to_local_db(self, task):
index, record = task
db_path = str(index)
# suc = self._cache.load(lambda path, x: x, db_path, record)
# record = BytesIO(np.random.bytes(np.random.randint(70000, 90000)))
suc = self._cache.cache.set(db_path, record, retry=True)
return suc
def _log_progress(self, percentage):
log_every_n_seconds(
logging.INFO,
"({:.2f}%) Wrote {} elements to local disk cache, db size: {:.2f} MiB".format(
percentage,
len(self._cache.cache),
self._cache.cache.volume() / 1024**2,
),
n=10,
)
def __len__(self):
if self._diskcache_strategy == "batched_static":
return self._size
else:
raise NotImplementedError()
def __getitem__(self, idx):
if self._diskcache_strategy == "naive":
bytes = memoryview(self._cache.cache[str(idx)])
return pickle.loads(bytes)
elif self._diskcache_strategy == "batched_static":
chunk_i, residual = divmod(idx, self._chuck_size)
chunk = self._cache.cache[str(chunk_i)]
bytes = memoryview(chunk[residual])
return pickle.loads(bytes)
else:
raise NotImplementedError()
@property
def cache_dir(self):
"""return the current cache dirs of DiskCachedDatasetFromList instance"""
return self._cache.cache_dir
@staticmethod
@atexit.register
def _clean_up_root_cache_dir():
# in case the program exists unexpectly, clean all the cache dirs created by
# this session.
if comm.get_local_rank() == 0:
_clean_up_cache_dir(ROOT_CACHE_DIR)
def __del__(self):
# when data loader goes are GC-ed, remove the cache dir. This is needed to not
# waste disk space in case that multiple data loaders are used, eg. running
# evaluations on multiple datasets during training.
if comm.get_local_rank() == 0:
_clean_up_cache_dir(self.cache_dir)
def _clean_up_cache_dir(cache_dir):
print("Cleaning up cache dir: {}".format(cache_dir))
shutil.rmtree(
cache_dir,
onerror=lambda func, path, ex: print(
"Catch error when removing {}; func: {}; exc_info: {}".format(
path, func, ex
)
),
)
| d2go-main | d2go/data/disk_cache.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import logging
import os
import shlex
import subprocess
from collections import defaultdict
from typing import Callable, Dict, List, Optional
import detectron2.utils.comm as comm
from d2go.data.cache_util import _cache_json_file
from detectron2.data import MetadataCatalog
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from pycocotools.coco import COCO
logger = logging.getLogger(__name__)
class InMemoryCOCO(COCO):
def __init__(self, loaded_json):
"""
In this in-memory version of COCO we don't load json from the file,
but direclty use a loaded_json instead. This approach improves
both robustness and efficiency, as when we convert from other formats
to COCO format, we don't need to save and re-load the json again.
"""
# load dataset
self.dataset = loaded_json
self.anns = {}
self.cats = {}
self.imgs = {}
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
self.createIndex()
def extract_archive_file(archive_fn: str, im_dir: str):
if not PathManager.exists(im_dir) or not PathManager.ls(im_dir):
# Dataset is not deployed. Deploy it.
archive_fns = archive_fn
# A dataset may be composed of several tgz files, or only one.
# If one, make it into a list to make the code later more general
if not isinstance(archive_fns, list):
archive_fns = [archive_fns]
logger.info(
"Extracting datasets {} to local machine at {}".format(archive_fns, im_dir)
)
if not PathManager.exists(im_dir):
PathManager.mkdirs(im_dir)
for archive_fn in archive_fns:
# Extract the tgz file directly into the target directory,
# without precopy.
# Note that the tgz file contains a root directory that
# we do not want, hence the strip-components=1
commandUnpack = (
"tar -mxzf {src_file} -C {tgt_dir} " "--strip-components=1"
).format(src_file=archive_fn, tgt_dir=im_dir)
assert not subprocess.call(shlex.split(commandUnpack)), "Failed to unpack"
logger.info("Extracted {}".format(archive_fn))
COCOTEXT_DATASET_CONVERSION_STATUS = {}
def save_converted_json(target_json, convert_coco_dict):
if target_json in COCOTEXT_DATASET_CONVERSION_STATUS:
return
PathManager.mkdirs(os.path.dirname(target_json))
if comm.get_local_rank() == 0:
with PathManager.open(target_json, "w") as f:
json.dump(convert_coco_dict, f)
comm.synchronize()
COCOTEXT_DATASET_CONVERSION_STATUS[target_json] = True
def convert_coco_text_to_coco_detection_json(
source_json: str,
target_json: str,
set_type: Optional[str] = None,
min_img_size: int = 100,
text_cat_id: int = 1,
) -> Dict:
"""
This function converts a COCOText style JSON to a COCODetection style
JSON.
For COCOText see: https://vision.cornell.edu/se3/coco-text-2/
For COCODetection see: http://cocodataset.org/#overview
"""
with PathManager.open(source_json, "r") as f:
coco_text_json = json.load(f)
coco_text_json["annotations"] = list(coco_text_json["anns"].values())
coco_text_json["images"] = list(coco_text_json["imgs"].values())
if set_type is not None:
# COCO Text style JSONs often mix test, train, and val sets.
# We need to make sure we only use the data type we want.
coco_text_json["images"] = [
x for x in coco_text_json["images"] if x["set"] == set_type
]
coco_text_json["categories"] = [{"name": "text", "id": text_cat_id}]
del coco_text_json["cats"]
del coco_text_json["imgs"]
del coco_text_json["anns"]
for ann in coco_text_json["annotations"]:
ann["category_id"] = text_cat_id
ann["iscrowd"] = 0
# Don't evaluate the model on illegible words
if set_type == "val" and ann["legibility"] != "legible":
ann["ignore"] = True
# Some datasets seem to have extremely small images which break downstream
# operations. If min_img_size is set, we can remove these.
coco_text_json["images"] = [
x
for x in coco_text_json["images"]
if x["height"] >= min_img_size and x["width"] >= min_img_size
]
# Remap image_ids if necessary
if isinstance(coco_text_json["images"][0]["id"], str):
image_id_remap = {
x["id"]: id_no for (id_no, x) in enumerate(coco_text_json["images"])
}
for x in coco_text_json["images"]:
x["id"] = image_id_remap[x["id"]]
for x in coco_text_json["annotations"]:
if x["image_id"] in image_id_remap:
x["image_id"] = image_id_remap[x["image_id"]]
save_converted_json(target_json, coco_text_json)
return coco_text_json
def valid_bbox(bbox_xywh: List[int], img_w: int, img_h: int) -> bool:
if (
bbox_xywh is None
or not len(bbox_xywh) == 4
or (bbox_xywh[3] == 0 or bbox_xywh[2] == 0)
or not (0 <= bbox_xywh[0] <= img_w - bbox_xywh[2])
or not (0 <= bbox_xywh[1] <= img_h - bbox_xywh[3])
):
return False
return True
def valid_bbox_rotated(bbox_xywha: List[int], img_w: int, img_h: int) -> bool:
if (
bbox_xywha is None
or (bbox_xywha[3] == 0 or bbox_xywha[2] == 0)
or not (
0.4 * bbox_xywha[2] <= bbox_xywha[0] <= img_w - bbox_xywha[2] * 0.4
) # using 0.4*h and 0.4*w to give some leeway for rotation but still remove huge bboxes for training stability
or not (0.4 * bbox_xywha[3] <= bbox_xywha[1] <= img_h - bbox_xywha[3] * 0.4)
):
return False
return True
def convert_coco_annotations(
anno_dict_list: List[Dict],
record: Dict,
remapped_id: Dict,
error_report: Dict,
filter_invalid_bbox: Optional[bool] = True,
):
"""
Converts annotations format of coco to internal format while applying
some filtering
"""
converted_annotations = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same. This fails
# only when the data parsing logic or the annotation file is buggy.
assert anno["image_id"] == record["image_id"]
assert anno.get("ignore", 0) == 0
# Copy fields that do not need additional conversion
fields_to_copy = [
"iscrowd",
"bbox",
"bbox_mode",
"keypoints",
"category_id",
"extras",
"point_coords",
"point_labels",
"associations",
"file_name",
]
# NOTE: maybe use MetadataCatalog for this
obj = {field: anno[field] for field in fields_to_copy if field in anno}
# Filter out bad annotations where category do not match
if obj.get("category_id", None) not in remapped_id:
continue
# Bounding boxes: convert and filter out bad bounding box annotations
bbox_object = obj.get("bbox", None)
if bbox_object:
if "bbox_mode" in obj:
bbox_object = BoxMode.convert(
bbox_object, obj["bbox_mode"], BoxMode.XYWH_ABS
)
else:
# Assume default box mode is always (x, y, w h)
error_report["without_bbox_mode"].cnt += 1
obj["bbox_mode"] = (
BoxMode.XYWHA_ABS if len(obj["bbox"]) == 5 else BoxMode.XYWH_ABS
)
if obj["bbox_mode"] != BoxMode.XYWHA_ABS: # for horizontal bboxes
if (
filter_invalid_bbox
and record.get("width")
and record.get("height")
and not valid_bbox(bbox_object, record["width"], record["height"])
):
error_report["without_valid_bounding_box"].cnt += 1
continue
else: # for rotated bboxes in XYWHA format
if (
filter_invalid_bbox
and record.get("width")
and record.get("height")
and not valid_bbox_rotated(
bbox_object, record["width"], record["height"]
)
):
error_report["without_valid_bounding_box"].cnt += 1
continue
# Segmentation: filter and add segmentation
segm = anno.get("segmentation", None)
if segm: # either list[list[float]] or dict(RLE)
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
error_report["without_valid_segmentation"].cnt += 1
continue # ignore this instance
obj["segmentation"] = segm
# Remap ids
obj["category_id"] = remapped_id[obj["category_id"]]
converted_annotations.append(obj)
return converted_annotations
# Error entry class for reporting coco conversion issues
class ErrorEntry:
def __init__(self, error_name, msg, cnt=0):
self.error_name = error_name
self.cnt = cnt
self.msg = msg
def __repr__(self):
return f"{self.msg} for {self.error_name}, count = {self.cnt}"
def print_conversion_report(ann_error_report, image_error_report, ex_warning_fn):
# Report image errors
report_str = ""
for error_key in image_error_report:
if image_error_report[error_key].cnt > 0:
report_str += f"\t{image_error_report[error_key]}\n"
if error_key == "ignore_image_root" and ex_warning_fn:
report_str += f"\texample file name {ex_warning_fn}\n"
# Report annotation errors
for error_key in ann_error_report:
if ann_error_report[error_key].cnt > 0:
report_str += f"\t{ann_error_report[error_key]}\n"
if len(report_str):
logger.warning(f"Conversion issues:\n{report_str}")
def _assign_annotations_to_record(
record: Dict, converted_anns: List[Dict], all_cat_names: Optional[List[str]]
) -> None:
record["annotations"] = converted_anns
if converted_anns and all(
[ann.get("file_name", "").endswith(".png") for ann in converted_anns]
):
if len(converted_anns) == 1:
record["sem_seg_file_name"] = converted_anns[0]["file_name"]
return
assert (
all_cat_names
), f"all_cat_names needs to be specified for MCS dataset: {converted_anns}"
record["multi_sem_seg_file_names"] = {
all_cat_names[ann["category_id"]]: ann["file_name"]
for ann in converted_anns
}
def _process_associations(
record: Dict, converted_anns: List[Dict], _post_process_: Optional[Callable]
) -> None:
post_process_dict = {"_post_process_": _post_process_} if _post_process_ else {}
record.update(post_process_dict)
if "associations" not in record or "associations" not in converted_anns[0]:
return
assert (
len(converted_anns) == 1
), "Only one annotation expected when associated frames exist!"
for key, associated_ann in converted_anns[0]["associations"].items():
if key not in record["associations"]:
continue
record["associations"][key] = {
"file_name": record["associations"][key],
"sem_seg_file_name": associated_ann,
}
record["associations"][key].update(post_process_dict)
# Following D23593142 to save memory
record["associations"] = list(record["associations"])
def convert_to_dict_list(
image_root: str,
remapped_id: Dict,
imgs: List[Dict],
anns: List[Dict],
dataset_name: Optional[str] = None,
all_cat_names: Optional[List[str]] = None,
image_direct_copy_keys: Optional[List[str]] = None,
filter_invalid_bbox: Optional[bool] = True,
filter_empty_annotations: Optional[bool] = True,
_post_process_: Optional[Callable] = None,
) -> List[Dict]:
ann_error_report = {
name: ErrorEntry(name, msg, 0)
for name, msg in [
("without_valid_segmentation", "Instance filtered"),
("without_valid_bounding_box", "Instance filtered"),
("without_bbox_mode", "Warning"),
]
}
image_error_report = {
name: ErrorEntry(name, msg, 0)
for name, msg in [
("ignore_image_root", f"Image root ignored {image_root}"),
(
"no_annotations",
"Image filtered" if filter_empty_annotations else "Warning",
),
]
}
ex_warning_fn = None
default_record = {"dataset_name": dataset_name} if dataset_name else {}
converted_dict_list = []
for (img_dict, anno_dict_list) in zip(imgs, anns):
record = copy.deepcopy(default_record)
# NOTE: besides using (relative path) in the "file_name" filed to represent
# the image resource, "extended coco" also supports using uri which
# represents an image using a single string, eg. "everstore_handle://xxx",
if "://" not in img_dict["file_name"]:
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
else:
if image_root is not None:
image_error_report["ignore_image_root"].cnt += 1
ex_warning_fn = (
ex_warning_fn if ex_warning_fn else img_dict["file_name"]
)
record["file_name"] = img_dict["file_name"]
# Setup image info and id
if "height" in img_dict or "width" in img_dict:
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["image_id"] = img_dict["id"]
# Convert annotation for dataset_dict
converted_anns = convert_coco_annotations(
anno_dict_list,
record,
remapped_id,
ann_error_report,
filter_invalid_bbox=filter_invalid_bbox,
)
if len(converted_anns) == 0:
image_error_report["no_annotations"].cnt += 1
if filter_empty_annotations:
continue
_assign_annotations_to_record(record, converted_anns, all_cat_names)
if "associations" in img_dict:
record["associations"] = img_dict["associations"]
_process_associations(record, converted_anns, _post_process_)
# Copy keys if additionally asked
if image_direct_copy_keys:
for c_key in image_direct_copy_keys:
assert c_key in img_dict, f"{c_key} not in coco image entry annotation"
record[c_key] = img_dict[c_key]
converted_dict_list.append(record)
print_conversion_report(ann_error_report, image_error_report, ex_warning_fn)
assert len(converted_dict_list) != 0, (
f"Loaded zero entries from {dataset_name}. \n"
f" Size of inputs (imgs={len(imgs)}, anns={len(anns)})\n"
f" Image issues ({image_error_report})\n"
f" Instance issues ({ann_error_report})\n"
)
return converted_dict_list
def coco_text_load(
coco_json_file: str,
image_root: str,
source_json_file: Optional[str] = None,
dataset_name: Optional[str] = None,
archive_file: Optional[str] = None,
) -> List[Dict]:
if archive_file is not None:
if comm.get_local_rank() == 0:
extract_archive_file(archive_file, image_root)
comm.synchronize()
if source_json_file is not None:
# Need to convert to coco detection format
loaded_json = convert_coco_text_to_coco_detection_json(
source_json_file, coco_json_file
)
return extended_coco_load(coco_json_file, image_root, dataset_name, loaded_json)
return extended_coco_load(
coco_json_file, image_root, dataset_name, loaded_json=None
)
def extended_coco_load(
json_file: str,
image_root: str,
dataset_name: Optional[str] = None,
loaded_json: Optional[str] = None,
image_direct_copy_keys: List[str] = None,
filter_invalid_bbox: Optional[bool] = True,
filter_empty_annotations: Optional[bool] = True,
_post_process_: Optional[Callable] = None,
) -> List[Dict]:
"""
Load a json file with COCO's annotation format.
Currently only supports instance segmentation annotations.
Args:
json_file (str): full path to the json file in COCO annotation format.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "coco", "cityscapes").
If provided, this function will also put "thing_classes" into
the metadata associated with this dataset.
loaded_json (str): optional loaded json content, used in InMemoryCOCO to
avoid loading from json_file again.
Returns:
list[dict]: a list of dicts in "Detectron2 Dataset" format. (See DATASETS.md)
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
2. When `dataset_name=='coco'`,
this function will translate COCO's
incontiguous category ids to contiguous ids in [0, 80).
"""
json_file = _cache_json_file(json_file)
if loaded_json is None:
coco_api = COCO(json_file)
else:
coco_api = InMemoryCOCO(loaded_json)
associations = coco_api.dataset.get("associations", {})
# Collect classes and remap them starting from 0
all_cat_ids = coco_api.getCatIds()
all_cats = coco_api.loadCats(all_cat_ids)
all_cat_names = [c["name"] for c in sorted(all_cats, key=lambda x: x["id"])]
# Setup id remapping
remapped_id = {}
for cat_id, cat in zip(all_cat_ids, all_cats):
remapped_id[cat_id] = all_cat_names.index(cat["name"])
# Register dataset in metadata catalog
if dataset_name is not None:
# overwrite attrs
meta_dict = MetadataCatalog.get(dataset_name).as_dict()
meta_dict["thing_classes"] = all_cat_names
meta_dict["thing_dataset_id_to_contiguous_id"] = remapped_id
# update MetadataCatalog (cannot change inplace, have to remove)
MetadataCatalog.remove(dataset_name)
MetadataCatalog.get(dataset_name).set(**meta_dict)
# assert the change
assert MetadataCatalog.get(dataset_name).thing_classes == all_cat_names
# Sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
logger.info("Loaded {} images from {}".format(len(imgs), json_file))
for img in imgs:
association = associations.get(img["file_name"], {})
if association:
img["associations"] = association
# Return the coco converted to record list
return convert_to_dict_list(
image_root,
remapped_id,
imgs,
anns,
dataset_name=dataset_name,
all_cat_names=all_cat_names,
image_direct_copy_keys=image_direct_copy_keys,
filter_invalid_bbox=filter_invalid_bbox,
filter_empty_annotations=filter_empty_annotations,
_post_process_=_post_process_,
)
if __name__ == "__main__":
"""
Test the COCO json dataset loader.
Usage:
python -m detectron2.data.datasets.coco \
path/to/json path/to/image_root dataset_name
"""
import sys
import cv2
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
logger = setup_logger(name=__name__)
meta = MetadataCatalog.get(sys.argv[3])
dicts = extended_coco_load(sys.argv[1], sys.argv[2], sys.argv[3], ["cat", "dog"])
logger.info("Done loading {} samples.".format(len(dicts)))
for d in dicts:
img = cv2.imread(d["file_name"])[:, :, ::-1]
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
fpath = os.path.join("coco-data-vis", os.path.basename(d["file_name"]))
vis.save(fpath)
| d2go-main | d2go/data/extended_coco.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from mobile_cv.common.misc.oss_utils import fb_overwritable
@fb_overwritable()
def _cache_json_file(json_file):
# TODO: entirely rely on PathManager for caching
json_file = os.fspath(json_file)
return json_file
| d2go-main | d2go/data/cache_util.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import os
from d2go.data.extended_coco import _cache_json_file
from detectron2.data import MetadataCatalog
from detectron2.structures import BoxMode
from fvcore.common.timer import Timer
"""
This file contains functions to parse LVIS-format annotations into dicts in the
"Detectron2 format".
"""
logger = logging.getLogger(__name__)
def extended_lvis_load(json_file, image_root, dataset_name=None):
"""
Load a json file in LVIS's annotation format.
Args:
json_file (str): full path to the LVIS json annotation file.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
If provided, this function will put "thing_classes" into the metadata
associated with this dataset.
Returns:
list[dict]: a list of dicts in "Detectron2 Dataset" format. (See DATASETS.md)
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from lvis import LVIS
json_file = _cache_json_file(json_file)
timer = Timer()
lvis_api = LVIS(json_file)
if timer.seconds() > 1:
logger.info(
"Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
)
# sort indices for reproducible results
img_ids = sorted(list(lvis_api.imgs.keys()))
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = lvis_api.load_imgs(img_ids)
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images. Example of anns[0]:
# [{'segmentation': [[192.81,
# 247.09,
# ...
# 219.03,
# 249.06]],
# 'area': 1035.749,
# 'image_id': 1268,
# 'bbox': [192.81, 224.8, 74.73, 33.43],
# 'category_id': 16,
# 'id': 42986},
# ...]
anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
# Sanity check that each annotation has a unique id
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(
ann_ids
), "Annotation ids in '{}' are not unique".format(json_file)
imgs_anns = list(zip(imgs, anns))
logger.info(
"Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)
)
dataset_dicts = []
count_ignore_image_root_warning = 0
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
if "://" not in img_dict["file_name"]:
file_name = img_dict["file_name"]
if img_dict["file_name"].startswith("COCO"):
# Convert form the COCO 2014 file naming convention of
# COCO_[train/val/test]2014_000000000000.jpg to the 2017 naming
# convention of 000000000000.jpg (LVIS v1 will fix this naming issue)
file_name = file_name[-16:]
record["file_name"] = os.path.join(image_root, file_name)
else:
if image_root is not None:
count_ignore_image_root_warning += 1
if count_ignore_image_root_warning == 1:
logger.warning(
(
"Found '://' in file_name: {}, ignore image_root: {}"
"(logged once per dataset)."
).format(img_dict["file_name"], image_root)
)
record["file_name"] = img_dict["file_name"]
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["not_exhaustive_category_ids"] = img_dict.get(
"not_exhaustive_category_ids", []
)
record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# Fails only when the data parsing logic or the annotation file is buggy.
assert anno["image_id"] == image_id
obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
obj["category_id"] = (
anno["category_id"] - 1
) # Convert 1-indexed to 0-indexed
segm = anno["segmentation"]
# filter out invalid polygons (< 3 points)
valid_segm = [
poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6
]
assert len(segm) == len(
valid_segm
), "Annotation contains an invalid polygon with < 3 points"
assert len(segm) > 0
obj["segmentation"] = segm
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if dataset_name:
meta = MetadataCatalog.get(dataset_name)
meta.thing_classes = get_extended_lvis_instances_meta(lvis_api)["thing_classes"]
return dataset_dicts
def get_extended_lvis_instances_meta(lvis_api):
cat_ids = lvis_api.get_cat_ids()
categories = lvis_api.load_cats(cat_ids)
assert min(cat_ids) == 1 and max(cat_ids) == len(
cat_ids
), "Category ids are not in [1, #categories], as expected"
extended_lvis_categories = [k for k in sorted(categories, key=lambda x: x["id"])]
thing_classes = [k["name"] for k in extended_lvis_categories]
meta = {"thing_classes": thing_classes}
return meta
if __name__ == "__main__":
"""
Test the LVIS json dataset loader.
Usage:
python -m detectron2.data.datasets.lvis \
path/to/json path/to/image_root dataset_name vis_limit
"""
import sys
import detectron2.data.datasets # noqa # add pre-defined metadata
import numpy as np
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
from PIL import Image
logger = setup_logger(name=__name__)
meta = MetadataCatalog.get(sys.argv[3])
dicts = extended_lvis_load(sys.argv[1], sys.argv[2], sys.argv[3])
logger.info("Done loading {} samples.".format(len(dicts)))
dirname = "lvis-data-vis"
os.makedirs(dirname, exist_ok=True)
for d in dicts[: int(sys.argv[4])]:
img = np.array(Image.open(d["file_name"]))
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
vis.save(fpath)
| d2go-main | d2go/data/extended_lvis.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import importlib
import logging
import os
from collections import namedtuple
from d2go.data.extended_coco import coco_text_load, extended_coco_load
from d2go.data.extended_lvis import extended_lvis_load
from d2go.data.keypoint_metadata_registry import get_keypoint_metadata
from d2go.utils.helper import get_dir_path
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.registry import Registry
from mobile_cv.common.misc.oss_utils import fb_overwritable
logger = logging.getLogger(__name__)
D2GO_DATASETS_BASE_MODULE = "d2go.datasets"
IM_DIR = "image_directory"
ANN_FN = "annotation_file"
LOAD_KWARGS = "load_kwargs"
COCO_REGISTER_FUNCTION_REGISTRY = Registry("COCO_REGISTER_FUNCTION_REGISTRY")
COCO_REGISTER_FUNCTION_REGISTRY.__doc__ = "Registry - coco register function"
InjectedCocoEntry = namedtuple("InjectedCocoEntry", ["func", "split_dict"])
INJECTED_COCO_DATASETS_LUT = {}
def get_coco_register_function(cfg):
name = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.REGISTER_FUNCTION
return COCO_REGISTER_FUNCTION_REGISTRY.get(name)
def _import_dataset(module_name):
return importlib.import_module(
"{}.{}".format(D2GO_DATASETS_BASE_MODULE, module_name)
)
@COCO_REGISTER_FUNCTION_REGISTRY.register()
def _register_extended_coco(dataset_name, split_dict):
json_file = split_dict[ANN_FN]
image_root = split_dict[IM_DIR]
load_kwargs = split_dict.get(LOAD_KWARGS, {})
# 1. register a function which returns dicts
load_coco_json_func = functools.partial(
extended_coco_load,
json_file=json_file,
image_root=image_root,
dataset_name=dataset_name,
**load_kwargs,
)
DatasetCatalog.register(dataset_name, load_coco_json_func)
# 2. Optionally, add metadata about this split,
# since they might be useful in evaluation, visualization or logging
evaluator_type = split_dict.get("evaluator_type", "coco")
meta_data = split_dict.get("meta_data", {})
MetadataCatalog.get(dataset_name).set(
evaluator_type=evaluator_type,
json_file=json_file,
image_root=image_root,
**meta_data,
)
_add_additional_extended_coco_metadata(dataset_name)
@fb_overwritable()
def _add_additional_extended_coco_metadata(dataset_name):
pass
def _register_extended_lvis(dataset_name, split_dict):
json_file = split_dict[ANN_FN]
image_root = split_dict[IM_DIR]
# 1. register a function which returns dicts
load_lvis_json_func = functools.partial(
extended_lvis_load,
json_file=json_file,
image_root=image_root,
dataset_name=dataset_name,
)
DatasetCatalog.register(dataset_name, load_lvis_json_func)
# 2. Optionally, add metadata about this split,
# since they might be useful in evaluation, visualization or logging
evaluator_type = split_dict.get("evaluator_type", "lvis")
MetadataCatalog.get(dataset_name).set(
evaluator_type=evaluator_type, json_file=json_file, image_root=image_root
)
def _register_coco_text(dataset_name, split_dict):
source_json_file = split_dict[ANN_FN]
coco_json_file = "/tmp/{}.json".format(dataset_name)
ARCHIVE_FN = "archive_file"
# 1. register a function which returns dicts
DatasetCatalog.register(
dataset_name,
functools.partial(
coco_text_load,
coco_json_file=coco_json_file,
image_root=split_dict[IM_DIR],
source_json_file=source_json_file,
dataset_name=dataset_name,
archive_file=split_dict.get(ARCHIVE_FN, None),
),
)
# 2. Optionally, add metadata about this split,
# since they might be useful in evaluation, visualization or logging
evaluator_type = split_dict.get("evaluator_type", "coco")
MetadataCatalog.get(dataset_name).set(
json_file=coco_json_file,
image_root=split_dict[IM_DIR],
evaluator_type=evaluator_type,
)
def inject_coco_datasets(cfg):
names = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.NAMES
im_dirs = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.IM_DIRS
json_files = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.JSON_FILES
metadata_type = cfg.D2GO_DATA.DATASETS.COCO_INJECTION.KEYPOINT_METADATA
_register_func = get_coco_register_function(cfg)
assert len(names) == len(im_dirs) == len(json_files)
for ds_index, (name, im_dir, json_file) in enumerate(
zip(names, im_dirs, json_files)
):
split_dict = {IM_DIR: im_dir, ANN_FN: json_file}
if len(metadata_type) != 0:
split_dict["meta_data"] = get_keypoint_metadata(metadata_type[ds_index])
logger.info("Inject coco dataset {}: {}".format(name, split_dict))
_register_func(name, split_dict)
INJECTED_COCO_DATASETS_LUT[name] = InjectedCocoEntry(
func=_register_func, split_dict=split_dict
)
def register_dataset_split(dataset_name, split_dict):
"""
Register a dataset to detectron2's DatasetCatalog and MetadataCatalog.
"""
_DATASET_TYPE_LOAD_FUNC_MAP = {
"COCODataset": _register_extended_coco,
"COCOText": _register_coco_text,
"COCOTextDataset": _register_coco_text,
"LVISDataset": _register_extended_lvis,
}
factory = split_dict.get("DS_TYPE", "COCODataset")
_DATASET_TYPE_LOAD_FUNC_MAP[factory](
dataset_name=dataset_name, split_dict=split_dict
)
def register_json_datasets():
json_dataset_names = [
os.path.splitext(filename)[0]
for filename in os.listdir(
get_dir_path(D2GO_DATASETS_BASE_MODULE.replace(".", "/"))
)
if filename.startswith("json_dataset_")
]
json_dataset_names = [
x
for x in json_dataset_names
if x
not in [
"json_dataset_lvis",
"json_dataset_oculus_external",
"json_dataset_people_ai_foot_tracking",
]
]
# load all splits from json datasets
all_splits = {}
for dataset in json_dataset_names:
module = _import_dataset(dataset)
assert (
len(set(all_splits).intersection(set(module.DATASETS))) == 0
), "Name confliction when loading {}".format(dataset)
all_splits.update(module.DATASETS)
# register all splits
for split_name in all_splits:
split_dict = all_splits[split_name]
register_dataset_split(split_name, split_dict)
def register_builtin_datasets():
builtin_dataset_names = [
os.path.splitext(filename)[0]
for filename in os.listdir(
get_dir_path(D2GO_DATASETS_BASE_MODULE.replace(".", "/"))
)
if filename.startswith("builtin_dataset_")
]
for dataset in builtin_dataset_names:
_import_dataset(dataset)
def register_dynamic_datasets(cfg):
for dataset in cfg.D2GO_DATA.DATASETS.DYNAMIC_DATASETS:
assert dataset.startswith("dynamic_dataset_")
_import_dataset(dataset)
| d2go-main | d2go/data/datasets.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| d2go-main | d2go/data/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.