python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Any, Dict, List, Optional, Union
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .container import Container
class Maintainer(abc.ABC):
@abc.abstractmethod
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> Container:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
Container object
"""
pass
@abc.abstractmethod
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
pass
| DeepLearningExamples-master | PyTorch/Forecasting/TFT/triton/runner/maintainer/maintainer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerNotStarted(Exception):
pass
| DeepLearningExamples-master | PyTorch/Forecasting/TFT/triton/runner/maintainer/exceptions.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PyTorch/Forecasting/TFT/triton/runner/maintainer/docker/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import docker
from docker.models.containers import ExecResult
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..container import Container
class DockerContainer(Container):
def __init__(self, name: str):
super().__init__(name)
self._container = None
self._docker_client = docker.from_env()
self._docker_api_client = docker.APIClient()
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> ExecResult:
"""
Run command inside container
Args:
command: command to execute
Returns:
ExecResult
"""
pass
| DeepLearningExamples-master | PyTorch/Forecasting/TFT/triton/runner/maintainer/docker/container.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Any, Dict, List, Optional, Union
import docker
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...logger import LOGGER
from ..maintainer import Maintainer
from .container import DockerContainer
from .containers import TritonServerContainer
class DockerMaintainer(Maintainer):
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> DockerContainer:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
DockerContainer object
"""
return TritonServerContainer(
name="triton-server",
command=command,
image=image,
devices=devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
workdir_path = workdir_path or image_file_path.parent
build_args = build_args or {}
LOGGER.info(f"Building {image_name} docker image.")
LOGGER.debug(f" Using workdir: {workdir_path}")
LOGGER.debug(f" Dockerfile: {image_file_path}")
LOGGER.debug(f" Build args: {build_args}")
build_logs = list()
try:
docker_client = docker.from_env()
_, build_logs = docker_client.images.build(
path=workdir_path.resolve().as_posix(),
dockerfile=image_file_path.resolve().as_posix(),
tag=image_name,
buildargs=build_args,
network_mode="host",
rm=True,
)
except docker.errors.BuildError as e:
build_logs = e.build_log
raise e
finally:
for chunk in build_logs:
log = chunk.get("stream")
if log:
LOGGER.debug(log.rstrip())
| DeepLearningExamples-master | PyTorch/Forecasting/TFT/triton/runner/maintainer/docker/maintainer.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .triton_server_container import TritonServerContainer
| DeepLearningExamples-master | PyTorch/Forecasting/TFT/triton/runner/maintainer/docker/containers/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
from threading import Thread
from typing import Dict, Generator, Union
from docker.models.containers import ExecResult
from docker.types import DeviceRequest, Ulimit
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ....logger import LOGGER
from ...exceptions import ContainerNotStarted
from ..container import DockerContainer
class TritonServerContainer(DockerContainer):
def __init__(
self,
name: str,
command: str,
image: str,
volumes: Dict,
devices: Union[list, int],
environment: Dict,
log_file: Union[pathlib.Path, str],
network: str = "host",
shm_size: str = "1G",
):
"""
Initialize Triton Server Container
Args:
name: Container name
command: Triton Server command to exec on container start
image: Docker Image
volumes: Volumes to mount inside container
devices: Devices which has to be visible in container
environment: Environment variables
log_file: Path where logs should be saved
network: Network mode
shm_size: Shared memory size
"""
super().__init__(name)
self._image = image
self._command = command
self._volumes = volumes
self._devices = devices
self._environment = environment
self._network = network
self._shm_size = shm_size
self._triton_exec = None
self._logging_thread = None
self._log_file_path = pathlib.Path(log_file)
def start(self) -> None:
"""
Start Triton Server Container
"""
devices = [
DeviceRequest(capabilities=[["gpu"]], device_ids=self._devices),
]
LOGGER.info(f"Triton environment: {json.dumps(self._environment, indent=4)}")
LOGGER.info(f"Starting Triton container {self.name}.")
self._container = self._docker_client.containers.run(
image=self._image,
name=self.name,
device_requests=devices,
detach=True,
tty=True,
shm_size=self._shm_size,
ulimits=[
Ulimit(name="memlock", soft=-1, hard=-1),
Ulimit(name="stack", soft=67108864, hard=67108864),
],
volumes=self._volumes,
environment=self._environment,
network_mode=self._network,
auto_remove=True,
ipc_mode="host",
)
LOGGER.info(f"Triton command:")
LOGGER.info(f" {self._command}")
LOGGER.info(f"Starting Triton Server {self.name}.")
self._triton_exec = self._docker_api_client.exec_create(
container=self._container.id,
cmd=self._command,
)
stream_generator = self._docker_api_client.exec_start(exec_id=self._triton_exec["Id"], stream=True)
self._logging_thread = Thread(target=TritonServerContainer._logging, args=(self, stream_generator), daemon=True)
self._logging_thread.start()
def stop(self) -> None:
"""
Stop Triton Server Container and save logs to file
"""
if self._container is not None:
triton_result = self._docker_api_client.exec_inspect(self._triton_exec["Id"])
if triton_result.get("ExitCode") not in (0, None):
LOGGER.info(
f"Triton Inference Server instance {self.name} failed. Exit code: {triton_result.get('ExitCode')}"
)
LOGGER.info(f"Stopping triton server {self.name}.")
self._container.stop()
self._container = None
self._docker_client.close()
self._docker_api_client.close()
def run(self, command: str) -> ExecResult:
"""
Run command in container
Args:
command: Command to execute
Returns:
ExecResult
"""
if not self._container:
raise ContainerNotStarted("Triton Server Container is not running. Use .start() first.")
return self._container.exec_run(command)
def _logging(self, generator: Generator) -> None:
"""Triton logging thread for Triton Inference Server
Args:
generator (string generator): Triton log stream.
"""
with open(self._log_file_path, mode="w") as file:
try:
while True:
log = next(generator)
txt = log.decode("utf-8")
file.write(txt)
except StopIteration:
LOGGER.info(f"Saving Triton Inference Server {self.name} logs in {self._log_file_path}.")
| DeepLearningExamples-master | PyTorch/Forecasting/TFT/triton/runner/maintainer/docker/containers/triton_server_container.py |
DeepLearningExamples-master | PyTorch/Detection/SSD/__init__.py |
|
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from argparse import ArgumentParser
import torch
import numpy as np
from torch.optim.lr_scheduler import MultiStepLR
import torch.utils.data.distributed
from ssd.model import SSD300, ResNet, Loss
from ssd.utils import dboxes300_coco, Encoder
from ssd.logger import Logger, BenchLogger
from ssd.evaluate import evaluate
from ssd.train import train_loop, tencent_trick, load_checkpoint, benchmark_train_loop, benchmark_inference_loop
from ssd.data import get_train_loader, get_val_dataset, get_val_dataloader, get_coco_ground_truth
import dllogger as DLLogger
# Apex imports
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install APEX from https://github.com/nvidia/apex")
def generate_mean_std(args):
mean_val = [0.485, 0.456, 0.406]
std_val = [0.229, 0.224, 0.225]
mean = torch.tensor(mean_val).cuda()
std = torch.tensor(std_val).cuda()
view = [1, len(mean_val), 1, 1]
mean = mean.view(*view)
std = std.view(*view)
return mean, std
def make_parser():
parser = ArgumentParser(description="Train Single Shot MultiBox Detector"
" on COCO")
parser.add_argument('--data', '-d', type=str, default='/coco', required=True,
help='path to test and training data files')
parser.add_argument('--epochs', '-e', type=int, default=65,
help='number of epochs for training')
parser.add_argument('--batch-size', '--bs', type=int, default=32,
help='number of examples for each iteration')
parser.add_argument('--eval-batch-size', '--ebs', type=int, default=32,
help='number of examples for each evaluation iteration')
parser.add_argument('--no-cuda', action='store_true',
help='use available GPUs')
parser.add_argument('--seed', '-s', type=int,
help='manually set random seed for torch')
parser.add_argument('--checkpoint', type=str, default=None,
help='path to model checkpoint file')
parser.add_argument('--torchvision-weights-version', type=str, default="IMAGENET1K_V2",
choices=['IMAGENET1K_V1', 'IMAGENET1K_V2', 'DEFAULT'],
help='The torchvision weights version to use when --checkpoint is not specified')
parser.add_argument('--save', type=str, default=None,
help='save model checkpoints in the specified directory')
parser.add_argument('--mode', type=str, default='training',
choices=['training', 'evaluation', 'benchmark-training', 'benchmark-inference'])
parser.add_argument('--evaluation', nargs='*', type=int, default=[21, 31, 37, 42, 48, 53, 59, 64],
help='epochs at which to evaluate')
parser.add_argument('--multistep', nargs='*', type=int, default=[43, 54],
help='epochs at which to decay learning rate')
# Hyperparameters
parser.add_argument('--learning-rate', '--lr', type=float, default=2.6e-3,
help='learning rate')
parser.add_argument('--momentum', '-m', type=float, default=0.9,
help='momentum argument for SGD optimizer')
parser.add_argument('--weight-decay', '--wd', type=float, default=0.0005,
help='momentum argument for SGD optimizer')
parser.add_argument('--warmup', type=int, default=None)
parser.add_argument('--benchmark-iterations', type=int, default=20, metavar='N',
help='Run N iterations while benchmarking (ignored when training and validation)')
parser.add_argument('--benchmark-warmup', type=int, default=20, metavar='N',
help='Number of warmup iterations for benchmarking')
parser.add_argument('--backbone', type=str, default='resnet50',
choices=['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'])
parser.add_argument('--backbone-path', type=str, default=None,
help='Path to chekcpointed backbone. It should match the'
' backbone model declared with the --backbone argument.'
' When it is not provided, pretrained model from torchvision'
' will be downloaded.')
parser.add_argument('--num-workers', type=int, default=8)
parser.add_argument("--amp", dest='amp', action="store_true",
help="Enable Automatic Mixed Precision (AMP).")
parser.add_argument("--no-amp", dest='amp', action="store_false",
help="Disable Automatic Mixed Precision (AMP).")
parser.set_defaults(amp=True)
parser.add_argument("--allow-tf32", dest='allow_tf32', action="store_true",
help="Allow TF32 computations on supported GPUs.")
parser.add_argument("--no-allow-tf32", dest='allow_tf32', action="store_false",
help="Disable TF32 computations.")
parser.set_defaults(allow_tf32=True)
parser.add_argument('--data-layout', default="channels_last", choices=['channels_first', 'channels_last'],
help="Model data layout. It's recommended to use channels_first with --no-amp")
parser.add_argument('--log-interval', type=int, default=20,
help='Logging interval.')
parser.add_argument('--json-summary', type=str, default=None,
help='If provided, the json summary will be written to'
'the specified file.')
# Distributed
parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK',0), type=int,
help='Used for multi-process training. Can either be manually set ' +
'or automatically set by using \'python -m multiproc\'.')
return parser
def train(train_loop_func, logger, args):
# Check that GPUs are actually available
use_cuda = not args.no_cuda
# Setup multi-GPU if necessary
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.N_gpu = torch.distributed.get_world_size()
else:
args.N_gpu = 1
if args.seed is None:
args.seed = np.random.randint(1e4)
if args.distributed:
args.seed = (args.seed + torch.distributed.get_rank()) % 2**32
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed)
np.random.seed(seed=args.seed)
# Setup data, defaults
dboxes = dboxes300_coco()
encoder = Encoder(dboxes)
cocoGt = get_coco_ground_truth(args)
train_loader = get_train_loader(args, args.seed - 2**31)
val_dataset = get_val_dataset(args)
val_dataloader = get_val_dataloader(val_dataset, args)
ssd300 = SSD300(backbone=ResNet(backbone=args.backbone,
backbone_path=args.backbone_path,
weights=args.torchvision_weights_version))
args.learning_rate = args.learning_rate * args.N_gpu * (args.batch_size / 32)
start_epoch = 0
iteration = 0
loss_func = Loss(dboxes)
if use_cuda:
ssd300.cuda()
loss_func.cuda()
optimizer = torch.optim.SGD(tencent_trick(ssd300), lr=args.learning_rate,
momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = MultiStepLR(optimizer=optimizer, milestones=args.multistep, gamma=0.1)
if args.distributed:
ssd300 = DDP(ssd300)
if args.checkpoint is not None:
if os.path.isfile(args.checkpoint):
load_checkpoint(ssd300.module if args.distributed else ssd300, args.checkpoint)
checkpoint = torch.load(args.checkpoint,
map_location=lambda storage, loc: storage.cuda(torch.cuda.current_device()))
start_epoch = checkpoint['epoch']
iteration = checkpoint['iteration']
scheduler.load_state_dict(checkpoint['scheduler'])
optimizer.load_state_dict(checkpoint['optimizer'])
else:
print('Provided checkpoint is not path to a file')
return
inv_map = {v: k for k, v in val_dataset.label_map.items()}
total_time = 0
if args.mode == 'evaluation':
acc = evaluate(ssd300, val_dataloader, cocoGt, encoder, inv_map, args)
if args.local_rank == 0:
print('Model precision {} mAP'.format(acc))
return
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
mean, std = generate_mean_std(args)
for epoch in range(start_epoch, args.epochs):
start_epoch_time = time.time()
iteration = train_loop_func(ssd300, loss_func, scaler,
epoch, optimizer, train_loader, val_dataloader, encoder, iteration,
logger, args, mean, std)
if args.mode in ["training", "benchmark-training"]:
scheduler.step()
end_epoch_time = time.time() - start_epoch_time
total_time += end_epoch_time
if args.local_rank == 0:
logger.update_epoch_time(epoch, end_epoch_time)
if epoch in args.evaluation:
acc = evaluate(ssd300, val_dataloader, cocoGt, encoder, inv_map, args)
if args.local_rank == 0:
logger.update_epoch(epoch, acc)
if args.save and args.local_rank == 0:
print("saving model...")
obj = {'epoch': epoch + 1,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict(),
'label_map': val_dataset.label_info}
if args.distributed:
obj['model'] = ssd300.module.state_dict()
else:
obj['model'] = ssd300.state_dict()
os.makedirs(args.save, exist_ok=True)
save_path = os.path.join(args.save, f'epoch_{epoch}.pt')
torch.save(obj, save_path)
logger.log('model path', save_path)
train_loader.reset()
DLLogger.log((), { 'total time': total_time })
logger.log_summary()
def log_params(logger, args):
logger.log_params({
"dataset path": args.data,
"epochs": args.epochs,
"batch size": args.batch_size,
"eval batch size": args.eval_batch_size,
"no cuda": args.no_cuda,
"seed": args.seed,
"checkpoint path": args.checkpoint,
"mode": args.mode,
"eval on epochs": args.evaluation,
"lr decay epochs": args.multistep,
"learning rate": args.learning_rate,
"momentum": args.momentum,
"weight decay": args.weight_decay,
"lr warmup": args.warmup,
"backbone": args.backbone,
"backbone path": args.backbone_path,
"num workers": args.num_workers,
"AMP": args.amp,
"precision": 'amp' if args.amp else 'fp32',
})
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args()
args.local_rank = int(os.environ.get('LOCAL_RANK', args.local_rank))
if args.local_rank == 0:
os.makedirs('./models', exist_ok=True)
torch.backends.cuda.matmul.allow_tf32 = args.allow_tf32
torch.backends.cudnn.allow_tf32 = args.allow_tf32
torch.backends.cudnn.benchmark = True
# write json only on the main thread
args.json_summary = args.json_summary if args.local_rank == 0 else None
if args.mode == 'benchmark-training':
train_loop_func = benchmark_train_loop
logger = BenchLogger('Training benchmark', log_interval=args.log_interval,
json_output=args.json_summary)
args.epochs = 1
elif args.mode == 'benchmark-inference':
train_loop_func = benchmark_inference_loop
logger = BenchLogger('Inference benchmark', log_interval=args.log_interval,
json_output=args.json_summary)
args.epochs = 1
else:
train_loop_func = train_loop
logger = Logger('Training logger', log_interval=args.log_interval,
json_output=args.json_summary)
log_params(logger, args)
train(train_loop_func, logger, args)
| DeepLearningExamples-master | PyTorch/Detection/SSD/main.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import time
import logging
import numpy as np
import torch
# DALI imports
import nvidia.dali as dali
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.types import to_numpy_type
class COCOPipeline(Pipeline):
def __init__(self, batch_size, file_root, annotations_file, default_boxes,
device_id, num_shards,
output_fp16=False, output_nhwc=False, pad_output=False,
num_threads=1, seed=15):
super(COCOPipeline, self).__init__(batch_size=batch_size,
device_id=device_id,
num_threads=num_threads,
seed=seed)
if torch.distributed.is_initialized():
shard_id = torch.distributed.get_rank()
else:
shard_id = 0
# Data loader and image decoder
self.input = dali.ops.readers.COCO(file_root=file_root,
annotations_file=annotations_file,
shard_id=shard_id,
num_shards=num_shards,
ratio=True,
ltrb=True,
shuffle_after_epoch=True,
skip_empty=True)
self.decode_slice = dali.ops.decoders.ImageSlice(device="cpu",
output_type=dali.types.RGB)
# Augumentation techniques
## Random crop
self.crop = dali.ops.RandomBBoxCrop(device="cpu",
aspect_ratio=[0.5, 2.0],
thresholds=[0, 0.1, 0.3, 0.5, 0.7, 0.9],
scaling=[0.3, 1.0],
bbox_layout="xyXY",
allow_no_crop=True,
num_attempts=1)
## Color twist
self.hsv = dali.ops.Hsv(device="gpu",
dtype=dali.types.FLOAT) # use float to avoid clipping and quantizing the intermediate result
self.bc = dali.ops.BrightnessContrast(device="gpu",
contrast_center=128, # input is in the [0, 255] range
dtype=dali.types.UINT8)
## Cropping and normalization
dtype = dali.types.FLOAT16 if output_fp16 else dali.types.FLOAT
output_layout = dali.types.NHWC if output_nhwc else dali.types.NCHW
self.normalize = dali.ops.CropMirrorNormalize(
device="gpu",
crop=(300, 300),
mean=[0.0, 0.0, 0.0],
std=[255.0, 255.0, 255.0],
mirror=0,
dtype=dtype,
output_layout=output_layout,
pad_output=pad_output)
## Flipping
self.flip = dali.ops.Flip(device="cpu")
self.bbflip = dali.ops.BbFlip(device="cpu", ltrb=True)
# Resize
self.resize = dali.ops.Resize(device="cpu",
resize_x=300,
resize_y=300)
# Random variables
self.rng1 = dali.ops.random.Uniform(range=[0.5, 1.5])
self.rng2 = dali.ops.random.Uniform(range=[0.875, 1.125])
self.rng3 = dali.ops.random.Uniform(range=[-0.5, 0.5])
self.flip_coin = dali.ops.random.CoinFlip(probability=0.5)
# bbox encoder
self.anchors = default_boxes(order='ltrb').cpu().numpy().flatten().tolist()
self.box_encoder = dali.ops.BoxEncoder(device="cpu",
criteria=0.5,
anchors=self.anchors)
def define_graph(self):
saturation = self.rng1()
contrast = self.rng1()
brightness = self.rng2()
hue = self.rng3()
coin_rnd = self.flip_coin()
inputs, bboxes, labels = self.input(name="Reader")
crop_begin, crop_size, bboxes, labels = self.crop(bboxes, labels)
images = self.decode_slice(inputs, crop_begin, crop_size)
images = self.flip(images, horizontal=coin_rnd)
bboxes = self.bbflip(bboxes, horizontal=coin_rnd)
images = self.resize(images)
images = images.gpu()
images = self.hsv(images, hue=hue, saturation=saturation)
images = self.bc(images, brightness=brightness, contrast=contrast)
images = self.normalize(images)
bboxes, labels = self.box_encoder(bboxes, labels)
# bboxes and images and labels on GPU
return (images, bboxes.gpu(), labels.gpu())
to_torch_type = {
np.float32 : torch.float32,
np.float64 : torch.float64,
np.float16 : torch.float16,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64
}
def feed_ndarray(dali_tensor, arr):
"""
Copy contents of DALI tensor to pyTorch's Tensor.
Parameters
----------
`dali_tensor` : nvidia.dali.backend.TensorCPU or nvidia.dali.backend.TensorGPU
Tensor from which to copy
`arr` : torch.Tensor
Destination of the copy
"""
assert dali_tensor.shape() == list(arr.size()), \
("Shapes do not match: DALI tensor has size {0}"
", but PyTorch Tensor has size {1}".format(dali_tensor.shape(), list(arr.size())))
#turn raw int to a c void pointer
c_type_pointer = ctypes.c_void_p(arr.data_ptr())
dali_tensor.copy_to_external(c_type_pointer)
return arr
class DALICOCOIterator(object):
"""
COCO DALI iterator for pyTorch.
Parameters
----------
pipelines : list of nvidia.dali.pipeline.Pipeline
List of pipelines to use
size : int
Epoch size.
"""
def __init__(self, pipelines, size):
if not isinstance(pipelines, list):
pipelines = [pipelines]
self._num_gpus = len(pipelines)
assert pipelines is not None, "Number of provided pipelines has to be at least 1"
self.batch_size = pipelines[0].max_batch_size
self._size = size
self._pipes = pipelines
# Build all pipelines
for p in self._pipes:
p.build()
# Use double-buffering of data batches
self._data_batches = [[None, None, None, None] for i in range(self._num_gpus)]
self._counter = 0
self._current_data_batch = 0
self.output_map = ["image", "bboxes", "labels"]
# We need data about the batches (like shape information),
# so we need to run a single batch as part of setup to get that info
self._first_batch = None
self._first_batch = self.next()
def __next__(self):
if self._first_batch is not None:
batch = self._first_batch
self._first_batch = None
return batch
if self._counter > self._size:
raise StopIteration
# Gather outputs
outputs = []
for p in self._pipes:
p._prefetch()
for p in self._pipes:
outputs.append(p.share_outputs())
for i in range(self._num_gpus):
dev_id = self._pipes[i].device_id
out_images = []
bboxes = []
labels = []
# segregate outputs into image/labels/bboxes entries
for j, out in enumerate(outputs[i]):
if self.output_map[j] == "image":
out_images.append(out)
elif self.output_map[j] == "bboxes":
bboxes.append(out)
elif self.output_map[j] == "labels":
labels.append(out)
# Change DALI TensorLists into Tensors
images = [x.as_tensor() for x in out_images]
images_shape = [x.shape() for x in images]
# Prepare bboxes shapes
bboxes_shape = []
for j in range(len(bboxes)):
bboxes_shape.append([])
for k in range(len(bboxes[j])):
bboxes_shape[j].append(bboxes[j][k].shape())
# Prepare labels shapes and offsets
labels_shape = []
bbox_offsets = []
torch.cuda.synchronize()
for j in range(len(labels)):
labels_shape.append([])
bbox_offsets.append([0])
for k in range(len(labels[j])):
lshape = labels[j][k].shape()
bbox_offsets[j].append(bbox_offsets[j][k] + lshape[0])
labels_shape[j].append(lshape)
# We always need to alocate new memory as bboxes and labels varies in shape
images_torch_type = to_torch_type[to_numpy_type(images[0].dtype)]
bboxes_torch_type = to_torch_type[to_numpy_type(bboxes[0][0].dtype)]
labels_torch_type = to_torch_type[to_numpy_type(labels[0][0].dtype)]
torch_gpu_device = torch.device('cuda', dev_id)
torch_cpu_device = torch.device('cpu')
pyt_images = [torch.zeros(shape, dtype=images_torch_type, device=torch_gpu_device) for shape in images_shape]
pyt_bboxes = [[torch.zeros(shape, dtype=bboxes_torch_type, device=torch_gpu_device) for shape in shape_list] for shape_list in bboxes_shape]
pyt_labels = [[torch.zeros(shape, dtype=labels_torch_type, device=torch_gpu_device) for shape in shape_list] for shape_list in labels_shape]
pyt_offsets = [torch.zeros(len(offset), dtype=torch.int32, device=torch_cpu_device) for offset in bbox_offsets]
self._data_batches[i][self._current_data_batch] = (pyt_images, pyt_bboxes, pyt_labels, pyt_offsets)
# Copy data from DALI Tensors to torch tensors
for j, i_arr in enumerate(images):
feed_ndarray(i_arr, pyt_images[j])
for j, b_list in enumerate(bboxes):
for k in range(len(b_list)):
if (pyt_bboxes[j][k].shape[0] != 0):
feed_ndarray(b_list[k], pyt_bboxes[j][k])
pyt_bboxes[j] = torch.cat(pyt_bboxes[j])
for j, l_list in enumerate(labels):
for k in range(len(l_list)):
if (pyt_labels[j][k].shape[0] != 0):
feed_ndarray(l_list[k], pyt_labels[j][k])
pyt_labels[j] = torch.cat(pyt_labels[j])
for j in range(len(pyt_offsets)):
pyt_offsets[j] = torch.IntTensor(bbox_offsets[j])
for p in self._pipes:
p.release_outputs()
p.schedule_run()
copy_db_index = self._current_data_batch
# Change index for double buffering
self._current_data_batch = (self._current_data_batch + 1) % 2
self._counter += self._num_gpus * self.batch_size
return [db[copy_db_index] for db in self._data_batches]
def next(self):
"""
Returns the next batch of data.
"""
return self.__next__();
def __iter__(self):
return self
def reset(self):
"""
Resets the iterator after the full epoch.
DALI iterators do not support resetting before the end of the epoch
and will ignore such request.
"""
if self._counter > self._size:
self._counter = self._counter % self._size
else:
logging.warning("DALI iterator does not support resetting while epoch is not finished. Ignoring...")
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/coco_pipeline.py |
from .entrypoints import nvidia_ssd, nvidia_ssd_processing_utils
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/__init__.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import dllogger as DLLogger
class EpochMeter:
def __init__(self, name):
self.name = name
self.data = []
def update(self, epoch, val):
self.data.append((epoch, val))
class IterationMeter:
def __init__(self, name):
self.name = name
self.data = []
def update(self, epoch, iteration, val):
self.data.append((epoch, iteration, val))
class IterationAverageMeter:
def __init__(self, name):
self.name = name
self.data = []
self.n = 0
self.sum = 0
def update_iter(self, val):
if math.isfinite(val): # sometimes loss === 'inf'
self.n += 1
self.sum += 0 if math.isinf(val) else val
def update_epoch(self, epoch):
self.data.append((epoch, self.sum / self.n))
self.n = 0
self.sum = 0
class Logger:
def __init__(self, name, json_output=None, log_interval=20):
self.name = name
self.train_loss_logger = IterationAverageMeter("Training loss")
self.train_epoch_time_logger = EpochMeter("Training 1 epoch time")
self.val_acc_logger = EpochMeter("Validation accuracy")
self.log_interval = log_interval
backends = [ DLLogger.StdOutBackend(DLLogger.Verbosity.DEFAULT) ]
if json_output:
backends.append(DLLogger.JSONStreamBackend(DLLogger.Verbosity.VERBOSE, json_output))
DLLogger.init(backends)
DLLogger.metadata("mAP", {"unit": None})
self.epoch = 0
self.train_iter = 0
self.summary = {}
def step(self):
return (
self.epoch,
self.train_iter,
)
def log_params(self, data):
DLLogger.log("PARAMETER", data)
DLLogger.flush()
def log(self, key, value):
DLLogger.log(self.step(), { key: value })
DLLogger.flush()
def add_to_summary(self, data):
for key, value in data.items():
self.summary[key] = value
def log_summary(self):
DLLogger.log((), self.summary)
def update_iter(self, epoch, iteration, loss):
self.epoch = epoch
self.train_iter = iteration
self.train_loss_logger.update_iter(loss)
if iteration % self.log_interval == 0:
self.log('loss', loss)
def update_epoch(self, epoch, acc):
self.epoch = epoch
self.train_loss_logger.update_epoch(epoch)
self.val_acc_logger.update(epoch, acc)
data = { 'mAP': acc }
self.add_to_summary(data)
DLLogger.log((self.epoch,), data)
def update_epoch_time(self, epoch, time):
self.epoch = epoch
self.train_epoch_time_logger.update(epoch, time)
DLLogger.log((self.epoch,), { 'time': time })
def print_results(self):
return self.train_loss_logger.data, self.val_acc_logger.data, self.train_epoch_time_logger
class BenchmarkMeter:
def __init__(self, name):
self.name = name
self.data = []
self.total_images = 0
self.total_time = 0
self.avr_images_per_second = 0
def update(self, bs, time):
self.total_images += bs
self.total_time += time
self.avr_images_per_second = self.total_images / self.total_time
self.data.append(bs / time)
class BenchLogger(Logger):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.images_per_ses = BenchmarkMeter(self.name)
DLLogger.metadata("avg_img/sec", {"unit": "images/s"})
DLLogger.metadata("med_img/sec", {"unit": "images/s"})
DLLogger.metadata("min_img/sec", {"unit": "images/s"})
DLLogger.metadata("max_img/sec", {"unit": "images/s"})
def update(self, bs, time):
self.images_per_ses.update(bs, time)
def print_result(self):
total_bs = self.images_per_ses.total_images
total_time = self.images_per_ses.total_time
avr = self.images_per_ses.avr_images_per_second
data = np.array(self.images_per_ses.data)
med = np.median(data)
DLLogger.log((), {
'avg_img/sec': avr,
'med_img/sec': np.median(data),
'min_img/sec': np.min(data),
'max_img/sec': np.max(data),
})
print("Done benchmarking. Total images: {}\ttotal time: {:.3f}\tAverage images/sec: {:.3f}\tMedian images/sec: {:.3f}".format(
total_bs,
total_time,
avr,
med
))
return med
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/logger.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152
class ResNet(nn.Module):
def __init__(self, backbone='resnet50', backbone_path=None, weights="IMAGENET1K_V1"):
super().__init__()
if backbone == 'resnet18':
backbone = resnet18(weights=None if backbone_path else weights)
self.out_channels = [256, 512, 512, 256, 256, 128]
elif backbone == 'resnet34':
backbone = resnet34(weights=None if backbone_path else weights)
self.out_channels = [256, 512, 512, 256, 256, 256]
elif backbone == 'resnet50':
backbone = resnet50(weights=None if backbone_path else weights)
self.out_channels = [1024, 512, 512, 256, 256, 256]
elif backbone == 'resnet101':
backbone = resnet101(weights=None if backbone_path else weights)
self.out_channels = [1024, 512, 512, 256, 256, 256]
else: # backbone == 'resnet152':
backbone = resnet152(weights=None if backbone_path else weights)
self.out_channels = [1024, 512, 512, 256, 256, 256]
if backbone_path:
backbone.load_state_dict(torch.load(backbone_path))
self.feature_extractor = nn.Sequential(*list(backbone.children())[:7])
conv4_block1 = self.feature_extractor[-1][0]
conv4_block1.conv1.stride = (1, 1)
conv4_block1.conv2.stride = (1, 1)
conv4_block1.downsample[0].stride = (1, 1)
def forward(self, x):
x = self.feature_extractor(x)
return x
class SSD300(nn.Module):
def __init__(self, backbone=ResNet('resnet50')):
super().__init__()
self.feature_extractor = backbone
self.label_num = 81 # number of COCO classes
self._build_additional_features(self.feature_extractor.out_channels)
self.num_defaults = [4, 6, 6, 6, 4, 4]
self.loc = []
self.conf = []
for nd, oc in zip(self.num_defaults, self.feature_extractor.out_channels):
self.loc.append(nn.Conv2d(oc, nd * 4, kernel_size=3, padding=1))
self.conf.append(nn.Conv2d(oc, nd * self.label_num, kernel_size=3, padding=1))
self.loc = nn.ModuleList(self.loc)
self.conf = nn.ModuleList(self.conf)
self._init_weights()
def _build_additional_features(self, input_size):
self.additional_blocks = []
for i, (input_size, output_size, channels) in enumerate(zip(input_size[:-1], input_size[1:], [256, 256, 128, 128, 128])):
if i < 3:
layer = nn.Sequential(
nn.Conv2d(input_size, channels, kernel_size=1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=True),
nn.Conv2d(channels, output_size, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(output_size),
nn.ReLU(inplace=True),
)
else:
layer = nn.Sequential(
nn.Conv2d(input_size, channels, kernel_size=1, bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=True),
nn.Conv2d(channels, output_size, kernel_size=3, bias=False),
nn.BatchNorm2d(output_size),
nn.ReLU(inplace=True),
)
self.additional_blocks.append(layer)
self.additional_blocks = nn.ModuleList(self.additional_blocks)
def _init_weights(self):
layers = [*self.additional_blocks, *self.loc, *self.conf]
for layer in layers:
for param in layer.parameters():
if param.dim() > 1: nn.init.xavier_uniform_(param)
# Shape the classifier to the view of bboxes
def bbox_view(self, src, loc, conf):
ret = []
for s, l, c in zip(src, loc, conf):
ret.append((l(s).reshape(s.size(0), 4, -1), c(s).reshape(s.size(0), self.label_num, -1)))
locs, confs = list(zip(*ret))
locs, confs = torch.cat(locs, 2).contiguous(), torch.cat(confs, 2).contiguous()
return locs, confs
def forward(self, x):
x = self.feature_extractor(x)
detection_feed = [x]
for l in self.additional_blocks:
x = l(x)
detection_feed.append(x)
# Feature Map 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4
locs, confs = self.bbox_view(detection_feed, self.loc, self.conf)
# For SSD 300, shall return nbatch x 8732 x {nlabels, nlocs} results
return locs, confs
class Loss(nn.Module):
"""
Implements the loss as the sum of the followings:
1. Confidence Loss: All labels, with hard negative mining
2. Localization Loss: Only on positive labels
Suppose input dboxes has the shape 8732x4
"""
def __init__(self, dboxes):
super(Loss, self).__init__()
self.scale_xy = 1.0/dboxes.scale_xy
self.scale_wh = 1.0/dboxes.scale_wh
self.sl1_loss = nn.SmoothL1Loss(reduction='none')
self.dboxes = nn.Parameter(dboxes(order="xywh").transpose(0, 1).unsqueeze(dim = 0),
requires_grad=False)
# Two factor are from following links
# http://jany.st/post/2017-11-05-single-shot-detector-ssd-from-scratch-in-tensorflow.html
self.con_loss = nn.CrossEntropyLoss(reduction='none')
def _loc_vec(self, loc):
"""
Generate Location Vectors
"""
gxy = self.scale_xy*(loc[:, :2, :] - self.dboxes[:, :2, :])/self.dboxes[:, 2:, ]
gwh = self.scale_wh*(loc[:, 2:, :]/self.dboxes[:, 2:, :]).log()
return torch.cat((gxy, gwh), dim=1).contiguous()
def forward(self, ploc, plabel, gloc, glabel):
"""
ploc, plabel: Nx4x8732, Nxlabel_numx8732
predicted location and labels
gloc, glabel: Nx4x8732, Nx8732
ground truth location and labels
"""
mask = glabel > 0
pos_num = mask.sum(dim=1)
vec_gd = self._loc_vec(gloc)
# sum on four coordinates, and mask
sl1 = self.sl1_loss(ploc, vec_gd).sum(dim=1)
sl1 = (mask.float()*sl1).sum(dim=1)
# hard negative mining
con = self.con_loss(plabel, glabel)
# postive mask will never selected
con_neg = con.clone()
con_neg[mask] = 0
_, con_idx = con_neg.sort(dim=1, descending=True)
_, con_rank = con_idx.sort(dim=1)
# number of negative three times positive
neg_num = torch.clamp(3*pos_num, max=mask.size(1)).unsqueeze(-1)
neg_mask = con_rank < neg_num
#print(con.shape, mask.shape, neg_mask.shape)
closs = (con*((mask + neg_mask).float())).sum(dim=1)
# avoid no object detected
total_loss = sl1 + closs
num_mask = (pos_num > 0).float()
pos_num = pos_num.float().clamp(min=1e-6)
ret = (total_loss*num_mask/pos_num).mean(dim=0)
return ret
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/model.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
from PIL import Image
import os
import numpy as np
import random
import itertools
import torch.nn.functional as F
import json
import time
import bz2
import pickle
from math import sqrt
# This function is from https://github.com/kuangliu/pytorch-ssd.
def calc_iou_tensor(box1, box2):
""" Calculation of IoU based on two boxes tensor,
Reference to https://github.com/kuangliu/pytorch-src
input:
box1 (N, 4)
box2 (M, 4)
output:
IoU (N, M)
"""
N = box1.size(0)
M = box2.size(0)
be1 = box1.unsqueeze(1).expand(-1, M, -1)
be2 = box2.unsqueeze(0).expand(N, -1, -1)
# Left Top & Right Bottom
lt = torch.max(be1[:,:,:2], be2[:,:,:2])
#mask1 = (be1[:,:, 0] < be2[:,:, 0]) ^ (be1[:,:, 1] < be2[:,:, 1])
#mask1 = ~mask1
rb = torch.min(be1[:,:,2:], be2[:,:,2:])
#mask2 = (be1[:,:, 2] < be2[:,:, 2]) ^ (be1[:,:, 3] < be2[:,:, 3])
#mask2 = ~mask2
delta = rb - lt
delta[delta < 0] = 0
intersect = delta[:,:,0]*delta[:,:,1]
#*mask1.float()*mask2.float()
delta1 = be1[:,:,2:] - be1[:,:,:2]
area1 = delta1[:,:,0]*delta1[:,:,1]
delta2 = be2[:,:,2:] - be2[:,:,:2]
area2 = delta2[:,:,0]*delta2[:,:,1]
iou = intersect/(area1 + area2 - intersect)
return iou
# This function is from https://github.com/kuangliu/pytorch-ssd.
class Encoder(object):
"""
Inspired by https://github.com/kuangliu/pytorch-src
Transform between (bboxes, lables) <-> SSD output
dboxes: default boxes in size 8732 x 4,
encoder: input ltrb format, output xywh format
decoder: input xywh format, output ltrb format
encode:
input : bboxes_in (Tensor nboxes x 4), labels_in (Tensor nboxes)
output : bboxes_out (Tensor 8732 x 4), labels_out (Tensor 8732)
criteria : IoU threshold of bboexes
decode:
input : bboxes_in (Tensor 8732 x 4), scores_in (Tensor 8732 x nitems)
output : bboxes_out (Tensor nboxes x 4), labels_out (Tensor nboxes)
criteria : IoU threshold of bboexes
max_output : maximum number of output bboxes
"""
def __init__(self, dboxes):
self.dboxes = dboxes(order="ltrb")
self.dboxes_xywh = dboxes(order="xywh").unsqueeze(dim=0)
self.nboxes = self.dboxes.size(0)
self.scale_xy = dboxes.scale_xy
self.scale_wh = dboxes.scale_wh
def encode(self, bboxes_in, labels_in, criteria = 0.5):
ious = calc_iou_tensor(bboxes_in, self.dboxes)
best_dbox_ious, best_dbox_idx = ious.max(dim=0)
best_bbox_ious, best_bbox_idx = ious.max(dim=1)
# set best ious 2.0
best_dbox_ious.index_fill_(0, best_bbox_idx, 2.0)
idx = torch.arange(0, best_bbox_idx.size(0), dtype=torch.int64)
best_dbox_idx[best_bbox_idx[idx]] = idx
# filter IoU > 0.5
masks = best_dbox_ious > criteria
labels_out = torch.zeros(self.nboxes, dtype=torch.long)
labels_out[masks] = labels_in[best_dbox_idx[masks]]
bboxes_out = self.dboxes.clone()
bboxes_out[masks, :] = bboxes_in[best_dbox_idx[masks], :]
# Transform format to xywh format
x, y, w, h = 0.5*(bboxes_out[:, 0] + bboxes_out[:, 2]), \
0.5*(bboxes_out[:, 1] + bboxes_out[:, 3]), \
-bboxes_out[:, 0] + bboxes_out[:, 2], \
-bboxes_out[:, 1] + bboxes_out[:, 3]
bboxes_out[:, 0] = x
bboxes_out[:, 1] = y
bboxes_out[:, 2] = w
bboxes_out[:, 3] = h
return bboxes_out, labels_out
def scale_back_batch(self, bboxes_in, scores_in):
"""
Do scale and transform from xywh to ltrb
suppose input Nx4xnum_bbox Nxlabel_numxnum_bbox
"""
if bboxes_in.device == torch.device("cpu"):
self.dboxes = self.dboxes.cpu()
self.dboxes_xywh = self.dboxes_xywh.cpu()
else:
self.dboxes = self.dboxes.cuda()
self.dboxes_xywh = self.dboxes_xywh.cuda()
bboxes_in = bboxes_in.permute(0, 2, 1)
scores_in = scores_in.permute(0, 2, 1)
bboxes_in[:, :, :2] = self.scale_xy*bboxes_in[:, :, :2]
bboxes_in[:, :, 2:] = self.scale_wh*bboxes_in[:, :, 2:]
bboxes_in[:, :, :2] = bboxes_in[:, :, :2]*self.dboxes_xywh[:, :, 2:] + self.dboxes_xywh[:, :, :2]
bboxes_in[:, :, 2:] = bboxes_in[:, :, 2:].exp()*self.dboxes_xywh[:, :, 2:]
# Transform format to ltrb
l, t, r, b = bboxes_in[:, :, 0] - 0.5*bboxes_in[:, :, 2],\
bboxes_in[:, :, 1] - 0.5*bboxes_in[:, :, 3],\
bboxes_in[:, :, 0] + 0.5*bboxes_in[:, :, 2],\
bboxes_in[:, :, 1] + 0.5*bboxes_in[:, :, 3]
bboxes_in[:, :, 0] = l
bboxes_in[:, :, 1] = t
bboxes_in[:, :, 2] = r
bboxes_in[:, :, 3] = b
return bboxes_in, F.softmax(scores_in, dim=-1)
def decode_batch(self, bboxes_in, scores_in, criteria = 0.45, max_output=200):
bboxes, probs = self.scale_back_batch(bboxes_in, scores_in)
output = []
for bbox, prob in zip(bboxes.split(1, 0), probs.split(1, 0)):
bbox = bbox.squeeze(0)
prob = prob.squeeze(0)
output.append(self.decode_single(bbox, prob, criteria, max_output))
return output
# perform non-maximum suppression
def decode_single(self, bboxes_in, scores_in, criteria, max_output, max_num=200):
# Reference to https://github.com/amdegroot/ssd.pytorch
bboxes_out = []
scores_out = []
labels_out = []
for i, score in enumerate(scores_in.split(1, 1)):
# skip background
# print(score[score>0.90])
if i == 0: continue
# print(i)
score = score.squeeze(1)
mask = score > 0.05
bboxes, score = bboxes_in[mask, :], score[mask]
if score.size(0) == 0: continue
score_sorted, score_idx_sorted = score.sort(dim=0)
# select max_output indices
score_idx_sorted = score_idx_sorted[-max_num:]
candidates = []
#maxdata, maxloc = scores_in.sort()
while score_idx_sorted.numel() > 0:
idx = score_idx_sorted[-1].item()
bboxes_sorted = bboxes[score_idx_sorted, :]
bboxes_idx = bboxes[idx, :].unsqueeze(dim=0)
iou_sorted = calc_iou_tensor(bboxes_sorted, bboxes_idx).squeeze()
# we only need iou < criteria
score_idx_sorted = score_idx_sorted[iou_sorted < criteria]
candidates.append(idx)
bboxes_out.append(bboxes[candidates, :])
scores_out.append(score[candidates])
labels_out.extend([i]*len(candidates))
if not bboxes_out:
return [torch.tensor([]) for _ in range(3)]
bboxes_out, labels_out, scores_out = torch.cat(bboxes_out, dim=0), \
torch.tensor(labels_out, dtype=torch.long), \
torch.cat(scores_out, dim=0)
_, max_ids = scores_out.sort(dim=0)
max_ids = max_ids[-max_output:].to("cpu")
return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids]
class DefaultBoxes(object):
def __init__(self, fig_size, feat_size, steps, scales, aspect_ratios, \
scale_xy=0.1, scale_wh=0.2):
self.feat_size = feat_size
self.fig_size = fig_size
self.scale_xy_ = scale_xy
self.scale_wh_ = scale_wh
# According to https://github.com/weiliu89/caffe
# Calculation method slightly different from paper
self.steps = steps
self.scales = scales
fk = fig_size/np.array(steps)
self.aspect_ratios = aspect_ratios
self.default_boxes = []
# size of feature and number of feature
for idx, sfeat in enumerate(self.feat_size):
sk1 = scales[idx]/fig_size
sk2 = scales[idx+1]/fig_size
sk3 = sqrt(sk1*sk2)
all_sizes = [(sk1, sk1), (sk3, sk3)]
for alpha in aspect_ratios[idx]:
w, h = sk1*sqrt(alpha), sk1/sqrt(alpha)
all_sizes.append((w, h))
all_sizes.append((h, w))
for w, h in all_sizes:
for i, j in itertools.product(range(sfeat), repeat=2):
cx, cy = (j+0.5)/fk[idx], (i+0.5)/fk[idx]
self.default_boxes.append((cx, cy, w, h))
self.dboxes = torch.tensor(self.default_boxes, dtype=torch.float)
self.dboxes.clamp_(min=0, max=1)
# For IoU calculation
self.dboxes_ltrb = self.dboxes.clone()
self.dboxes_ltrb[:, 0] = self.dboxes[:, 0] - 0.5 * self.dboxes[:, 2]
self.dboxes_ltrb[:, 1] = self.dboxes[:, 1] - 0.5 * self.dboxes[:, 3]
self.dboxes_ltrb[:, 2] = self.dboxes[:, 0] + 0.5 * self.dboxes[:, 2]
self.dboxes_ltrb[:, 3] = self.dboxes[:, 1] + 0.5 * self.dboxes[:, 3]
@property
def scale_xy(self):
return self.scale_xy_
@property
def scale_wh(self):
return self.scale_wh_
def __call__(self, order="ltrb"):
if order == "ltrb": return self.dboxes_ltrb
if order == "xywh": return self.dboxes
def dboxes300_coco():
figsize = 300
feat_size = [38, 19, 10, 5, 3, 1]
steps = [8, 16, 32, 64, 100, 300]
# use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
scales = [21, 45, 99, 153, 207, 261, 315]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
return dboxes
# This function is from https://github.com/chauhan-utk/ssd.DomainAdaptation.
class SSDCropping(object):
""" Cropping for SSD, according to original paper
Choose between following 3 conditions:
1. Preserve the original image
2. Random crop minimum IoU is among 0.1, 0.3, 0.5, 0.7, 0.9
3. Random crop
Reference to https://github.com/chauhan-utk/src.DomainAdaptation
"""
def __init__(self):
self.sample_options = (
# Do nothing
None,
# min IoU, max IoU
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
# no IoU requirements
(None, None),
)
def __call__(self, img, img_size, bboxes, labels):
# Ensure always return cropped image
while True:
mode = random.choice(self.sample_options)
if mode is None:
return img, img_size, bboxes, labels
htot, wtot = img_size
min_iou, max_iou = mode
min_iou = float("-inf") if min_iou is None else min_iou
max_iou = float("+inf") if max_iou is None else max_iou
# Implementation use 50 iteration to find possible candidate
for _ in range(1):
# suze of each sampled path in [0.1, 1] 0.3*0.3 approx. 0.1
w = random.uniform(0.3 , 1.0)
h = random.uniform(0.3 , 1.0)
if w/h < 0.5 or w/h > 2:
continue
# left 0 ~ wtot - w, top 0 ~ htot - h
left = random.uniform(0, 1.0 - w)
top = random.uniform(0, 1.0 - h)
right = left + w
bottom = top + h
ious = calc_iou_tensor(bboxes, torch.tensor([[left, top, right, bottom]]))
# tailor all the bboxes and return
if not ((ious > min_iou) & (ious < max_iou)).all():
continue
# discard any bboxes whose center not in the cropped image
xc = 0.5*(bboxes[:, 0] + bboxes[:, 2])
yc = 0.5*(bboxes[:, 1] + bboxes[:, 3])
masks = (xc > left) & (xc < right) & (yc > top) & (yc < bottom)
# if no such boxes, continue searching again
if not masks.any():
continue
bboxes[bboxes[:, 0] < left, 0] = left
bboxes[bboxes[:, 1] < top, 1] = top
bboxes[bboxes[:, 2] > right, 2] = right
bboxes[bboxes[:, 3] > bottom, 3] = bottom
bboxes = bboxes[masks, :]
labels = labels[masks]
left_idx = int(left*wtot)
top_idx = int(top*htot)
right_idx = int(right*wtot)
bottom_idx = int(bottom*htot)
img = img.crop((left_idx, top_idx, right_idx, bottom_idx))
bboxes[:, 0] = (bboxes[:, 0] - left)/w
bboxes[:, 1] = (bboxes[:, 1] - top)/h
bboxes[:, 2] = (bboxes[:, 2] - left)/w
bboxes[:, 3] = (bboxes[:, 3] - top)/h
htot = bottom_idx - top_idx
wtot = right_idx - left_idx
return img, (htot, wtot), bboxes, labels
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, image, bboxes):
if random.random() < self.p:
bboxes[:, 0], bboxes[:, 2] = 1.0 - bboxes[:, 2], 1.0 - bboxes[:, 0]
return image.transpose(Image.FLIP_LEFT_RIGHT), bboxes
return image, bboxes
# Do data augumentation
class SSDTransformer(object):
""" SSD Data Augumentation, according to original paper
Composed by several steps:
Cropping
Resize
Flipping
Jittering
"""
def __init__(self, dboxes, size = (300, 300), val=False):
# define vgg16 mean
self.size = size
self.val = val
self.dboxes_ = dboxes #DefaultBoxes300()
self.encoder = Encoder(self.dboxes_)
self.crop = SSDCropping()
self.img_trans = transforms.Compose([
transforms.Resize(self.size),
transforms.ColorJitter(brightness=0.125, contrast=0.5,
saturation=0.5, hue=0.05
),
transforms.ToTensor()
])
self.hflip = RandomHorizontalFlip()
# All Pytorch Tensor will be normalized
# https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.trans_val = transforms.Compose([
transforms.Resize(self.size),
transforms.ToTensor(),
#ToTensor(),
self.normalize,])
@property
def dboxes(self):
return self.dboxes_
def __call__(self, img, img_size, bbox=None, label=None, max_num=200):
#img = torch.tensor(img)
if self.val:
bbox_out = torch.zeros(max_num, 4)
label_out = torch.zeros(max_num, dtype=torch.long)
bbox_out[:bbox.size(0), :] = bbox
label_out[:label.size(0)] = label
return self.trans_val(img), img_size, bbox_out, label_out
img, img_size, bbox, label = self.crop(img, img_size, bbox, label)
img, bbox = self.hflip(img, bbox)
img = self.img_trans(img).contiguous()
img = self.normalize(img)
bbox, label = self.encoder.encode(bbox, label)
return img, img_size, bbox, label
# Implement a datareader for COCO dataset
class COCODetection(data.Dataset):
def __init__(self, img_folder, annotate_file, transform=None):
self.img_folder = img_folder
self.annotate_file = annotate_file
# Start processing annotation
with open(annotate_file) as fin:
self.data = json.load(fin)
self.images = {}
self.label_map = {}
self.label_info = {}
start_time = time.time()
# 0 stand for the background
cnt = 0
self.label_info[cnt] = "background"
for cat in self.data["categories"]:
cnt += 1
self.label_map[cat["id"]] = cnt
self.label_info[cnt] = cat["name"]
# build inference for images
for img in self.data["images"]:
img_id = img["id"]
img_name = img["file_name"]
img_size = (img["height"],img["width"])
if img_id in self.images: raise Exception("dulpicated image record")
self.images[img_id] = (img_name, img_size, [])
# read bboxes
for bboxes in self.data["annotations"]:
img_id = bboxes["image_id"]
category_id = bboxes["category_id"]
bbox = bboxes["bbox"]
bbox_label = self.label_map[bboxes["category_id"]]
self.images[img_id][2].append((bbox, bbox_label))
for k, v in list(self.images.items()):
if len(v[2]) == 0:
self.images.pop(k)
self.img_keys = list(self.images.keys())
self.transform = transform
@property
def labelnum(self):
return len(self.label_info)
@staticmethod
def load(pklfile):
with bz2.open(pklfile, "rb") as fin:
ret = pickle.load(fin)
return ret
def save(self, pklfile):
with bz2.open(pklfile, "wb") as fout:
pickle.dump(self, fout)
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
img_id = self.img_keys[idx]
img_data = self.images[img_id]
fn = img_data[0]
img_path = os.path.join(self.img_folder, fn)
img = Image.open(img_path).convert("RGB")
htot, wtot = img_data[1]
bbox_sizes = []
bbox_labels = []
#for (xc, yc, w, h), bbox_label in img_data[2]:
for (l,t,w,h), bbox_label in img_data[2]:
r = l + w
b = t + h
#l, t, r, b = xc - 0.5*w, yc - 0.5*h, xc + 0.5*w, yc + 0.5*h
bbox_size = (l/wtot, t/htot, r/wtot, b/htot)
bbox_sizes.append(bbox_size)
bbox_labels.append(bbox_label)
bbox_sizes = torch.tensor(bbox_sizes)
bbox_labels = torch.tensor(bbox_labels)
if self.transform != None:
img, (htot, wtot), bbox_sizes, bbox_labels = \
self.transform(img, (htot, wtot), bbox_sizes, bbox_labels)
else:
pass
return img, img_id, (htot, wtot), bbox_sizes, bbox_labels
def draw_patches(img, bboxes, labels, order="xywh", label_map={}):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Suppose bboxes in fractional coordinate:
# cx, cy, w, h
# img = img.numpy()
img = np.array(img)
labels = np.array(labels)
bboxes = bboxes.numpy()
if label_map:
labels = [label_map.get(l) for l in labels]
if order == "ltrb":
xmin, ymin, xmax, ymax = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
cx, cy, w, h = (xmin + xmax)/2, (ymin + ymax)/2, xmax - xmin, ymax - ymin
else:
cx, cy, w, h = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
htot, wtot,_ = img.shape
cx *= wtot
cy *= htot
w *= wtot
h *= htot
bboxes = zip(cx, cy, w, h)
plt.imshow(img)
ax = plt.gca()
for (cx, cy, w, h), label in zip(bboxes, labels):
if label == "background": continue
ax.add_patch(patches.Rectangle((cx-0.5*w, cy-0.5*h),
w, h, fill=False, color="r"))
bbox_props = dict(boxstyle="round", fc="y", ec="0.5", alpha=0.3)
ax.text(cx-0.5*w, cy-0.5*h, label, ha="center", va="center", size=15, bbox=bbox_props)
plt.show()
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import sys
import urllib.request
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.1.', '')
new_key = new_key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def _download_checkpoint(checkpoint, force_reload):
model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
if not os.path.exists(ckpt_file) or force_reload:
sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
urllib.request.urlretrieve(checkpoint, ckpt_file)
return ckpt_file
def nvidia_ssd_processing_utils():
import numpy as np
import skimage
from skimage import io, transform
from .utils import dboxes300_coco, Encoder
class Processing:
@staticmethod
def load_image(image_path):
"""Code from Loading_Pretrained_Models.ipynb - a Caffe2 tutorial"""
img = skimage.img_as_float(io.imread(image_path))
if len(img.shape) == 2:
img = np.array([img, img, img]).swapaxes(0, 2)
return img
@staticmethod
def rescale(img, input_height, input_width):
"""Code from Loading_Pretrained_Models.ipynb - a Caffe2 tutorial"""
aspect = img.shape[1] / float(img.shape[0])
if (aspect > 1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = transform.resize(img, (input_width, res))
if (aspect < 1):
# portrait orientation - tall image
res = int(input_width / aspect)
imgScaled = transform.resize(img, (res, input_height))
if (aspect == 1):
imgScaled = transform.resize(img, (input_width, input_height))
return imgScaled
@staticmethod
def crop_center(img, cropx, cropy):
"""Code from Loading_Pretrained_Models.ipynb - a Caffe2 tutorial"""
y, x, c = img.shape
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[starty:starty + cropy, startx:startx + cropx]
@staticmethod
def normalize(img, mean=128, std=128):
img = (img * 256 - mean) / std
return img
@staticmethod
def prepare_tensor(inputs, fp16=False):
NHWC = np.array(inputs)
NCHW = np.swapaxes(np.swapaxes(NHWC, 1, 3), 2, 3)
tensor = torch.from_numpy(NCHW)
tensor = tensor.contiguous()
tensor = tensor.cuda()
tensor = tensor.float()
if fp16:
tensor = tensor.half()
return tensor
@staticmethod
def prepare_input(img_uri):
img = Processing.load_image(img_uri)
img = Processing.rescale(img, 300, 300)
img = Processing.crop_center(img, 300, 300)
img = Processing.normalize(img)
return img
@staticmethod
def decode_results(predictions):
dboxes = dboxes300_coco()
encoder = Encoder(dboxes)
ploc, plabel = [val.float() for val in predictions]
results = encoder.decode_batch(ploc, plabel, criteria=0.5, max_output=20)
return [[pred.detach().cpu().numpy() for pred in detections] for detections in results]
@staticmethod
def pick_best(detections, threshold=0.3):
bboxes, classes, confidences = detections
best = np.argwhere(confidences > threshold)[:, 0]
return [pred[best] for pred in detections]
@staticmethod
def get_coco_object_dictionary():
import os
file_with_coco_names = "category_names.txt"
if not os.path.exists(file_with_coco_names):
print("Downloading COCO annotations.")
import urllib
import zipfile
import json
import shutil
urllib.request.urlretrieve("http://images.cocodataset.org/annotations/annotations_trainval2017.zip", "cocoanno.zip")
with zipfile.ZipFile("cocoanno.zip", "r") as f:
f.extractall()
print("Downloading finished.")
with open("annotations/instances_val2017.json", 'r') as COCO:
js = json.loads(COCO.read())
class_names = [category['name'] for category in js['categories']]
open("category_names.txt", 'w').writelines([c+"\n" for c in class_names])
os.remove("cocoanno.zip")
shutil.rmtree("annotations")
else:
class_names = open("category_names.txt").readlines()
class_names = [c.strip() for c in class_names]
return class_names
return Processing()
def nvidia_ssd(pretrained=True, **kwargs):
"""Constructs an SSD300 model.
For detailed information on model input and output, training recipies, inference and performance
visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com
Args:
pretrained (bool, True): If True, returns a model pretrained on COCO dataset.
model_math (str, 'fp32'): returns a model in given precision ('fp32' or 'fp16')
"""
from . import model as ssd
fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
force_reload = "force_reload" in kwargs and kwargs["force_reload"]
m = ssd.SSD300()
if fp16:
m = m.half()
def batchnorm_to_float(module):
"""Converts batch norm to FP32"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
batchnorm_to_float(child)
return module
m = batchnorm_to_float(m)
if pretrained:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/ssd_pyt_ckpt_amp/versions/20.06.0/files/nvidia_ssdpyt_amp_200703.pt'
ckpt_file = _download_checkpoint(checkpoint, force_reload)
ckpt = torch.load(ckpt_file)
ckpt = ckpt['model']
if checkpoint_from_distributed(ckpt):
ckpt = unwrap_distributed(ckpt)
m.load_state_dict(ckpt)
return m
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/entrypoints.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from torch.autograd import Variable
import torch
import time
from apex import amp
def train_loop(model, loss_func, scaler, epoch, optim, train_dataloader, val_dataloader, encoder, iteration, logger, args, mean, std):
for nbatch, data in enumerate(train_dataloader):
img = data[0][0][0]
bbox = data[0][1][0]
label = data[0][2][0]
label = label.type(torch.cuda.LongTensor)
bbox_offsets = data[0][3][0]
bbox_offsets = bbox_offsets.cuda()
img.sub_(mean).div_(std)
if not args.no_cuda:
img = img.cuda()
bbox = bbox.cuda()
label = label.cuda()
bbox_offsets = bbox_offsets.cuda()
N = img.shape[0]
if bbox_offsets[-1].item() == 0:
print("No labels in batch")
continue
# output is ([N*8732, 4], [N*8732], need [N, 8732, 4], [N, 8732] respectively
M = bbox.shape[0] // N
bbox = bbox.view(N, M, 4)
label = label.view(N, M)
with torch.cuda.amp.autocast(enabled=args.amp):
if args.data_layout == 'channels_last':
img = img.to(memory_format=torch.channels_last)
ploc, plabel = model(img)
ploc, plabel = ploc.float(), plabel.float()
trans_bbox = bbox.transpose(1, 2).contiguous().cuda()
gloc = Variable(trans_bbox, requires_grad=False)
glabel = Variable(label, requires_grad=False)
loss = loss_func(ploc, plabel, gloc, glabel)
if args.warmup is not None:
warmup(optim, args.warmup, iteration, args.learning_rate)
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
optim.zero_grad()
if args.local_rank == 0:
logger.update_iter(epoch, iteration, loss.item())
iteration += 1
return iteration
def benchmark_train_loop(model, loss_func, scaler, epoch, optim, train_dataloader, val_dataloader, encoder, iteration, logger, args, mean, std):
start_time = None
# tensor for results
result = torch.zeros((1,)).cuda()
for nbatch, data in enumerate(loop(train_dataloader)):
if nbatch >= args.benchmark_warmup:
torch.cuda.synchronize()
start_time = time.time()
img = data[0][0][0]
bbox = data[0][1][0]
label = data[0][2][0]
label = label.type(torch.cuda.LongTensor)
bbox_offsets = data[0][3][0]
bbox_offsets = bbox_offsets.cuda()
img.sub_(mean).div_(std)
if not args.no_cuda:
img = img.cuda()
bbox = bbox.cuda()
label = label.cuda()
bbox_offsets = bbox_offsets.cuda()
N = img.shape[0]
if bbox_offsets[-1].item() == 0:
print("No labels in batch")
continue
# output is ([N*8732, 4], [N*8732], need [N, 8732, 4], [N, 8732] respectively
M = bbox.shape[0] // N
bbox = bbox.view(N, M, 4)
label = label.view(N, M)
with torch.cuda.amp.autocast(enabled=args.amp):
if args.data_layout == 'channels_last':
img = img.to(memory_format=torch.channels_last)
ploc, plabel = model(img)
ploc, plabel = ploc.float(), plabel.float()
trans_bbox = bbox.transpose(1, 2).contiguous().cuda()
gloc = Variable(trans_bbox, requires_grad=False)
glabel = Variable(label, requires_grad=False)
loss = loss_func(ploc, plabel, gloc, glabel)
if args.warmup is not None:
warmup(optim, args.warmup, iteration, args.learning_rate)
scaler.scale(loss).backward()
scaler.step(optim)
scaler.update()
optim.zero_grad()
if nbatch >= args.benchmark_warmup + args.benchmark_iterations:
break
if nbatch >= args.benchmark_warmup:
torch.cuda.synchronize()
logger.update(args.batch_size*args.N_gpu, time.time() - start_time)
result.data[0] = logger.print_result()
if args.N_gpu > 1:
torch.distributed.reduce(result, 0)
if args.local_rank == 0:
print('Training performance = {} FPS'.format(float(result.data[0])))
def loop(dataloader, reset=True):
while True:
for data in dataloader:
yield data
if reset:
dataloader.reset()
def benchmark_inference_loop(model, loss_func, scaler, epoch, optim, train_dataloader, val_dataloader, encoder, iteration, logger, args, mean, std):
assert args.N_gpu == 1, 'Inference benchmark only on 1 gpu'
model.eval()
val_datas = loop(val_dataloader, False)
for i in range(args.benchmark_warmup + args.benchmark_iterations):
torch.cuda.synchronize()
start_time = time.time()
data = next(val_datas)
img = data[0]
with torch.no_grad():
if not args.no_cuda:
img = img.cuda()
img.sub_(mean).div_(std)
with torch.cuda.amp.autocast(enabled=args.amp):
_ = model(img)
torch.cuda.synchronize()
end_time = time.time()
if i >= args.benchmark_warmup:
logger.update(args.eval_batch_size, end_time - start_time)
logger.print_result()
def warmup(optim, warmup_iters, iteration, base_lr):
if iteration < warmup_iters:
new_lr = 1. * base_lr / warmup_iters * iteration
for param_group in optim.param_groups:
param_group['lr'] = new_lr
def load_checkpoint(model, checkpoint):
"""
Load model from checkpoint.
"""
print("loading model checkpoint", checkpoint)
od = torch.load(checkpoint)
# remove proceeding 'N.' from checkpoint that comes from DDP wrapper
saved_model = od["model"]
model.load_state_dict(saved_model)
def tencent_trick(model):
"""
Divide parameters into 2 groups.
First group is BNs and all biases.
Second group is the remaining model's parameters.
Weight decay will be disabled in first group (aka tencent trick).
"""
decay, no_decay = [], []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias"):
no_decay.append(param)
else:
decay.append(param)
return [{'params': no_decay, 'weight_decay': 0.0},
{'params': decay}]
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/train.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import time
import numpy as np
from contextlib import redirect_stdout
import io
from pycocotools.cocoeval import COCOeval
def evaluate(model, coco, cocoGt, encoder, inv_map, args):
if args.distributed:
N_gpu = torch.distributed.get_world_size()
else:
N_gpu = 1
model.eval()
if not args.no_cuda:
model.cuda()
ret = []
start = time.time()
# for idx, image_id in enumerate(coco.img_keys):
for nbatch, (img, img_id, img_size, _, _) in enumerate(coco):
print("Parsing batch: {}/{}".format(nbatch, len(coco)), end='\r')
with torch.no_grad():
inp = img.cuda()
with torch.cuda.amp.autocast(enabled=args.amp):
# Get predictions
ploc, plabel = model(inp)
ploc, plabel = ploc.float(), plabel.float()
# Handle the batch of predictions produced
# This is slow, but consistent with old implementation.
for idx in range(ploc.shape[0]):
# ease-of-use for specific predictions
ploc_i = ploc[idx, :, :].unsqueeze(0)
plabel_i = plabel[idx, :, :].unsqueeze(0)
try:
result = encoder.decode_batch(ploc_i, plabel_i, 0.50, 200)[0]
except Exception as e:
print("Skipping idx {}, failed to decode with message {}, Skipping.".format(idx, e))
continue
htot, wtot = img_size[0][idx].item(), img_size[1][idx].item()
loc, label, prob = [r.cpu().numpy() for r in result]
for loc_, label_, prob_ in zip(loc, label, prob):
ret.append([img_id[idx], loc_[0] * wtot, \
loc_[1] * htot,
(loc_[2] - loc_[0]) * wtot,
(loc_[3] - loc_[1]) * htot,
prob_,
inv_map[label_]])
# Now we have all predictions from this rank, gather them all together
# if necessary
ret = np.array(ret).astype(np.float32)
# Multi-GPU eval
if args.distributed:
# NCCL backend means we can only operate on GPU tensors
ret_copy = torch.tensor(ret).cuda()
# Everyone exchanges the size of their results
ret_sizes = [torch.tensor(0).cuda() for _ in range(N_gpu)]
torch.cuda.synchronize()
torch.distributed.all_gather(ret_sizes, torch.tensor(ret_copy.shape[0]).cuda())
torch.cuda.synchronize()
# Get the maximum results size, as all tensors must be the same shape for
# the all_gather call we need to make
max_size = 0
sizes = []
for s in ret_sizes:
max_size = max(max_size, s.item())
sizes.append(s.item())
# Need to pad my output to max_size in order to use in all_gather
ret_pad = torch.cat([ret_copy, torch.zeros(max_size - ret_copy.shape[0], 7, dtype=torch.float32).cuda()])
# allocate storage for results from all other processes
other_ret = [torch.zeros(max_size, 7, dtype=torch.float32).cuda() for i in range(N_gpu)]
# Everyone exchanges (padded) results
torch.cuda.synchronize()
torch.distributed.all_gather(other_ret, ret_pad)
torch.cuda.synchronize()
# Now need to reconstruct the _actual_ results from the padded set using slices.
cat_tensors = []
for i in range(N_gpu):
cat_tensors.append(other_ret[i][:sizes[i]][:])
final_results = torch.cat(cat_tensors).cpu().numpy()
else:
# Otherwise full results are just our results
final_results = ret
if args.local_rank == 0:
print("")
print("Predicting Ended, total time: {:.2f} s".format(time.time() - start))
cocoDt = cocoGt.loadRes(final_results, use_ext=True)
E = COCOeval(cocoGt, cocoDt, iouType='bbox', use_ext=True)
E.evaluate()
E.accumulate()
if args.local_rank == 0:
E.summarize()
print("Current AP: {:.5f}".format(E.stats[0]))
else:
# fix for cocoeval indiscriminate prints
with redirect_stdout(io.StringIO()):
E.summarize()
# put your model in training mode back on
model.train()
return E.stats[0] # Average Precision (AP) @[ IoU=050:0.95 | area= all | maxDets=100 ]
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/evaluate.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from torch.utils.data import DataLoader
from ssd.utils import dboxes300_coco, COCODetection
from ssd.utils import SSDTransformer
from pycocotools.coco import COCO
#DALI import
from ssd.coco_pipeline import COCOPipeline, DALICOCOIterator
def get_train_loader(args, local_seed):
train_annotate = os.path.join(args.data, "annotations/instances_train2017.json")
train_coco_root = os.path.join(args.data, "train2017")
train_pipe = COCOPipeline(batch_size=args.batch_size,
file_root=train_coco_root,
annotations_file=train_annotate,
default_boxes=dboxes300_coco(),
device_id=args.local_rank,
num_shards=args.N_gpu,
output_fp16=args.amp,
output_nhwc=False,
pad_output=False,
num_threads=args.num_workers, seed=local_seed)
train_pipe.build()
test_run = train_pipe.schedule_run(), train_pipe.share_outputs(), train_pipe.release_outputs()
train_loader = DALICOCOIterator(train_pipe, 118287 / args.N_gpu)
return train_loader
def get_val_dataset(args):
dboxes = dboxes300_coco()
val_trans = SSDTransformer(dboxes, (300, 300), val=True)
val_annotate = os.path.join(args.data, "annotations/instances_val2017.json")
val_coco_root = os.path.join(args.data, "val2017")
val_coco = COCODetection(val_coco_root, val_annotate, val_trans)
return val_coco
def get_val_dataloader(dataset, args):
if args.distributed:
val_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
val_sampler = None
val_dataloader = DataLoader(dataset,
batch_size=args.eval_batch_size,
shuffle=False, # Note: distributed sampler is shuffled :(
sampler=val_sampler,
num_workers=args.num_workers)
return val_dataloader
def get_coco_ground_truth(args):
val_annotate = os.path.join(args.data, "annotations/instances_val2017.json")
cocoGt = COCO(annotation_file=val_annotate, use_ext=True)
return cocoGt
| DeepLearningExamples-master | PyTorch/Detection/SSD/ssd/data.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from apex.fp16_utils import network_to_half
from dle.inference import prepare_input
from ssd.model import SSD300, ResNet
from ssd.utils import dboxes300_coco, Encoder
def load_checkpoint(model, model_file):
cp = torch.load(model_file)['model']
model.load_state_dict(cp)
def build_predictor(model_file, backbone='resnet50'):
ssd300 = SSD300(backbone=ResNet(backbone=backbone))
load_checkpoint(ssd300, model_file)
return ssd300
def prepare_model(checkpoint_path):
ssd300 = build_predictor(checkpoint_path)
ssd300 = ssd300.cuda()
ssd300 = network_to_half(ssd300)
ssd300 = ssd300.eval()
return ssd300
def prepare_tensor(inputs):
NHWC = np.array(inputs)
NCHW = np.swapaxes(np.swapaxes(NHWC, 2, 3), 1, 2)
tensor = torch.from_numpy(NCHW)
tensor = tensor.cuda()
tensor = tensor.half()
return tensor
def decode_results(predictions):
dboxes = dboxes300_coco()
encoder = Encoder(dboxes)
ploc, plabel = [val.float() for val in predictions]
results = encoder.decode_batch(ploc, plabel, criteria=0.5, max_output=20)
return [ [ pred.detach().cpu().numpy()
for pred in detections
]
for detections in results
]
def pick_best(detections, treshold):
bboxes, classes, confidences = detections
best = np.argwhere(confidences > 0.3).squeeze(axis=1)
return [pred[best] for pred in detections]
def main(checkpoint_path, imgs):
inputs = [prepare_input(uri) for uri in imgs]
tensor = prepare_tensor(inputs)
ssd300 = prepare_model(checkpoint_path)
predictions = ssd300(tensor)
results = decode_results(predictions)
best_results = [pick_best(detections, treshold=0.3) for detections in results]
return best_results
if __name__ == '__main__':
best_results = main(
checkpoint_path='/checkpoints/SSD300v1.1.pt',
imgs=[ 'http://images.cocodataset.org/val2017/000000397133.jpg',
'http://images.cocodataset.org/val2017/000000037777.jpg',
'http://images.cocodataset.org/val2017/000000252219.jpg',
]
)
print(best_results)
| DeepLearningExamples-master | PyTorch/Detection/SSD/examples/SSD300_inference.py |
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import skimage
def load_image(image_path):
"""Code from Loading_Pretrained_Models.ipynb - a Caffe2 tutorial"""
mean, std = 128, 128
img = skimage.img_as_float(skimage.io.imread(image_path))
if len(img.shape) == 2:
img = np.array([img, img, img]).swapaxes(0,2)
return img
def rescale(img, input_height, input_width):
"""Code from Loading_Pretrained_Models.ipynb - a Caffe2 tutorial"""
aspect = img.shape[1]/float(img.shape[0])
if(aspect>1):
# landscape orientation - wide image
res = int(aspect * input_height)
imgScaled = skimage.transform.resize(img, (input_width, res))
if(aspect<1):
# portrait orientation - tall image
res = int(input_width/aspect)
imgScaled = skimage.transform.resize(img, (res, input_height))
if(aspect == 1):
imgScaled = skimage.transform.resize(img, (input_width, input_height))
return imgScaled
def crop_center(img,cropx,cropy):
"""Code from Loading_Pretrained_Models.ipynb - a Caffe2 tutorial"""
y,x,c = img.shape
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
def normalize(img, mean=128, std=128):
img = (img * 256 - mean) / std
return img
def prepare_input(img_uri):
img = load_image(img_uri)
img = rescale(img, 300, 300)
img = crop_center(img, 300, 300)
img = normalize(img)
return img
| DeepLearningExamples-master | PyTorch/Detection/SSD/dle/inference.py |
#!/usr/bin/env python
""" COCO validation script
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import json
import time
import logging
import torch
import torch.nn.parallel
from torch.nn.parallel import DistributedDataParallel as DDP
import ctypes
import dllogger
from effdet.factory import create_model
from effdet.evaluator import COCOEvaluator
from utils.utils import setup_dllogger
from data import create_loader, CocoDetection
from utils.utils import AverageMeter, setup_default_logging
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import numpy as np
import itertools
torch.backends.cudnn.benchmark = True
_libcudart = ctypes.CDLL('libcudart.so')
def add_bool_arg(parser, name, default=False, help=''): # FIXME move to utils
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--waymo', action='store_true', default=False,
help='Train on Waymo dataset or COCO dataset. Default: False (COCO dataset)')
parser.add_argument('--anno', default='val2017',
help='mscoco annotation set (one of val2017, train2017, test-dev2017)')
parser.add_argument('--model', '-m', metavar='MODEL', default='tf_efficientdet_d1',
help='model architecture (default: tf_efficientdet_d1)')
add_bool_arg(parser, 'redundant-bias', default=None,
help='override model config for redundant bias layers')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--input_size', type=int, default=None, metavar='PCT',
help='Image size (default: None) if this is not set default model image size is taken')
parser.add_argument('--num_classes', type=int, default=None, metavar='PCT',
help='Number of classes the model needs to be trained for (default: None)')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA amp for mixed precision training')
parser.add_argument('--interpolation', default='bilinear', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--fill-color', default='mean', type=str, metavar='NAME',
help='Image augmentation fill (background) color ("mean" or int)')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument("--memory-format", type=str, default="nchw", choices=["nchw", "nhwc"],
help="memory layout, nchw or nhwc")
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--inference', dest='inference', action='store_true',
help='If true then inference else evaluation.')
parser.add_argument('--use-soft-nms', dest='use_soft_nms', action='store_true', default=False,
help='use softnms instead of default nms for eval')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--results', default='./results.json', type=str, metavar='FILENAME',
help='JSON filename for evaluation results')
parser.add_argument('--dllogger-file', default='log.json', type=str, metavar='PATH',
help='File name of dllogger json file (default: log.json, current dir)')
parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 0), type=int)
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--waymo-val', default=None, type=str,
help='Path to waymo validation images relative to data (default: "None")')
parser.add_argument('--waymo-val-annotation', default=None, type=str,
help='Absolute Path to waymo validation annotation (default: "None")')
def validate(args):
setup_dllogger(0, filename=args.dllogger_file)
dllogger.metadata('total_inference_time', {'unit': 's'})
dllogger.metadata('inference_throughput', {'unit': 'images/s'})
dllogger.metadata('inference_time', {'unit': 's'})
dllogger.metadata('map', {'unit': None})
dllogger.metadata('total_eval_time', {'unit': 's'})
if args.checkpoint != '':
args.pretrained = True
args.prefetcher = not args.no_prefetcher
if args.waymo:
assert args.waymo_val is not None
memory_format = (
torch.channels_last if args.memory_format == "nhwc" else torch.contiguous_format
)
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
torch.cuda.manual_seed_all(args.seed)
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
assert args.rank >= 0
# create model
bench = create_model(
args.model,
input_size=args.input_size,
num_classes=args.num_classes,
bench_task='predict',
pretrained=args.pretrained,
redundant_bias=args.redundant_bias,
checkpoint_path=args.checkpoint,
checkpoint_ema=args.use_ema,
soft_nms=args.use_soft_nms,
strict_load=False
)
input_size = bench.config.image_size
data_config = bench.config
param_count = sum([m.numel() for m in bench.parameters()])
print('Model %s created, param count: %d' % (args.model, param_count))
bench = bench.cuda().to(memory_format=memory_format)
if args.distributed > 1:
raise ValueError("Evaluation is supported only on single GPU. args.num_gpu must be 1")
bench = DDP(bench, device_ids=[args.device]) # torch.nn.DataParallel(bench, device_ids=list(range(args.num_gpu)))
if args.waymo:
annotation_path = args.waymo_val_annotation
image_dir = args.waymo_val
else:
if 'test' in args.anno:
annotation_path = os.path.join(args.data, 'annotations', f'image_info_{args.anno}.json')
image_dir = 'test2017'
else:
annotation_path = os.path.join(args.data, 'annotations', f'instances_{args.anno}.json')
image_dir = args.anno
dataset = CocoDetection(os.path.join(args.data, image_dir), annotation_path, data_config)
evaluator = COCOEvaluator(dataset.coco, distributed=args.distributed, waymo=args.waymo)
loader = create_loader(
dataset,
input_size=input_size,
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=args.interpolation,
fill_color=args.fill_color,
num_workers=args.workers,
distributed=args.distributed,
pin_mem=args.pin_mem,
memory_format=memory_format)
img_ids = []
results = []
dllogger_metric = {}
bench.eval()
batch_time = AverageMeter()
throughput = AverageMeter()
torch.cuda.synchronize()
end = time.time()
total_time_start = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(loader):
with torch.cuda.amp.autocast(enabled=args.amp):
output = bench(input, target['img_scale'], target['img_size'])
torch.cuda.synchronize()
batch_time.update(time.time() - end)
throughput.update(input.size(0) / batch_time.val)
evaluator.add_predictions(output, target)
torch.cuda.synchronize()
# measure elapsed time
if i == 9:
batch_time.reset()
throughput.reset()
if args.rank == 0 and i % args.log_freq == 0:
print(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
.format(
i, len(loader), batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
)
)
end = time.time()
torch.cuda.synchronize()
dllogger_metric['total_inference_time'] = time.time() - total_time_start
dllogger_metric['inference_throughput'] = throughput.avg
dllogger_metric['inference_time'] = 1000 / throughput.avg
total_time_start = time.time()
mean_ap = 0.
if not args.inference:
if 'test' not in args.anno:
mean_ap = evaluator.evaluate()
else:
evaluator.save_predictions(args.results)
torch.cuda.synchronize()
dllogger_metric['map'] = mean_ap
dllogger_metric['total_eval_time'] = time.time() - total_time_start
else:
evaluator.save_predictions(args.results)
if not args.distributed or args.rank == 0:
dllogger.log(step=(), data=dllogger_metric, verbosity=0)
return results
def main():
args = parser.parse_args()
validate(args)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/validate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from os import path
import torch
from setuptools import setup, find_packages
from codecs import open
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
here = path.abspath(path.dirname(__file__))
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "effdet", "csrc", "nms")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"effdet_ext._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
),
CUDAExtension('focal_loss_cuda', [
'effdet/csrc/focal_loss/focal_loss_cuda.cpp',
'effdet/csrc/focal_loss/focal_loss_cuda_kernel.cu',
],
extra_compile_args={
'cxx': ['-O3', ],
'nvcc':['-O3', '-lineinfo', '-res-usage', '--use_fast_math', '--ftz=false']
})
]
return ext_modules
setup(
name='effdet',
version="0.4.1",
description='EfficientDet for PyTorch',
packages=find_packages(exclude=['data']),
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/setup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import subprocess
import os
import socket
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
parser.add_argument('--no_hyperthreads', action='store_true',
help='Flag to disable binding to hyperthreads')
parser.add_argument('--no_membind', action='store_true',
help='Flag to disable memory binding')
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def get_cpu_info(lscpu_out):
num_sockets = num_nodes = cores_per_socket = None
for line in lscpu_out.split('\n'):
if "Socket(s)" in line:
num_sockets = int(line.split(':')[1])
if "NUMA node(s)" in line:
num_nodes = int(line.split(':')[1])
if "Core(s) per socket" in line:
cores_per_socket = int(line.split(':')[1])
return num_sockets, num_nodes, cores_per_socket
def main():
args = parse_args()
res = subprocess.run(["nvidia-smi", "-i", "0", "--query-gpu=count",
"--format=csv,noheader,nounits"], stdout=subprocess.PIPE)
num_gpus = int(res.stdout.decode('utf-8'))
assert args.nproc_per_node <= num_gpus, "ERROR: launch {} processes, but " \
"there are only {} gpus available".format(args.nproc_per_node, num_gpus)
res = subprocess.run(["lscpu"], stdout=subprocess.PIPE)
lscpu_out = res.stdout.decode('utf-8')
num_sockets, num_nodes, cores_per_socket = get_cpu_info(lscpu_out)
print("num_sockets = {} num_nodes={} cores_per_socket={}".format(
num_sockets, num_nodes, cores_per_socket))
assert all([num_sockets, num_nodes, cores_per_socket]), \
"ERROR: failed to parsing CPU info"
cores_per_node = (num_sockets * cores_per_socket) // num_nodes
if num_gpus > 1:
gpus_per_node = num_gpus // num_nodes
else:
gpus_per_node = 1
cores_per_gpu = cores_per_node // gpus_per_node
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
local_node = local_rank // gpus_per_node
# form numactrl binding command
cpu_ranges = [
local_rank * cores_per_gpu,
(local_rank + 1) * cores_per_gpu - 1,
local_rank * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes),
(local_rank + 1) * cores_per_gpu + (cores_per_gpu * gpus_per_node * num_nodes) - 1
]
numactlargs = []
if args.no_hyperthreads:
numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[0:2]) ]
else:
numactlargs += [ "--physcpubind={}-{},{}-{}".format(*cpu_ranges) ]
if not args.no_membind:
numactlargs += [ "--membind={}".format(local_node) ]
# spawn the processes
cmd = [ "/usr/bin/numactl" ] \
+ numactlargs \
+ [ sys.executable,
"-u",
args.training_script,
"--local_rank={}".format(local_rank)
] \
+ args.training_script_args
print(cmd)
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/bind_launch.py |
#!/usr/bin/env python
""" EfficientDet Training Script
This script was started from an early version of the PyTorch ImageNet example
(https://github.com/pytorch/examples/tree/master/imagenet)
NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples
(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import yaml
import os
from datetime import datetime
import ctypes
import numpy as np
import random
import copy
import torch
import torchvision.utils
from torch.nn.parallel import DistributedDataParallel as DDP
import dllogger
from effdet.factory import create_model
from effdet.evaluator import COCOEvaluator
from effdet.bench import unwrap_bench
from data import create_loader, CocoDetection
from utils.gpu_affinity import set_affinity
from utils.utils import *
from utils.optimizers import create_optimizer, clip_grad_norm_2
from utils.scheduler import create_scheduler
from utils.model_ema import ModelEma
torch.backends.cudnn.benchmark = True
_libcudart = ctypes.CDLL('libcudart.so')
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
def add_bool_arg(parser, name, default=False, help=''): # FIXME move to utils
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', default='tf_efficientdet_d1', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
add_bool_arg(parser, 'redundant-bias', default=None,
help='override model config for redundant bias')
parser.set_defaults(redundant_bias=None)
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--pretrained-backbone-path', default='', type=str, metavar='PATH',
help='Start from pretrained backbone weights.')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', action='store_true', default=False,
help='Resume full model and optimizer state from checkpoint (default: False)')
parser.add_argument('--no-resume-opt', action='store_true', default=False,
help='prevent resume of optimizer state when resuming model')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--fill-color', default='0', type=str, metavar='NAME',
help='Image augmentation fill (background) color ("mean" or int)')
parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',
help='ratio of validation batch size to training batch size (default: 1)')
parser.add_argument('--input_size', type=int, default=None, metavar='PCT',
help='Image size (default: None) if this is not set default model image size is taken')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--clip-grad', type=float, default=10.0, metavar='NORM',
help='Clip gradient norm (default: 10.0)')
# Optimizer parameters
parser.add_argument('--opt', default='momentum', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "momentum"')
parser.add_argument('--opt-eps', default=1e-3, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-3)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=4e-5,
help='weight decay (default: 0.00004)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',
help='learning rate cycle len multiplier (default: 1.0)')
parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',
help='learning rate cycle limit')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--epochs', type=int, default=300, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.0,
help='label smoothing (default: 0.0)')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('--sync-bn', action='store_true',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--dist-bn', type=str, default='',
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--dist-group-size', type=int, default=0,
help='Group size for sync-bn')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--eval-after', type=int, default=0, metavar='N',
help='Start evaluating after eval-after epochs')
parser.add_argument('--benchmark', action='store_true', default=False,
help='Turn this on when measuring performance')
parser.add_argument('--benchmark-steps', type=int, default=0, metavar='N',
help='Run training for this number of steps for performance measurement')
parser.add_argument('--dllogger-file', default='log.json', type=str, metavar='PATH',
help='File name of dllogger json file (default: log.json, current dir)')
parser.add_argument('--save-checkpoint-interval', type=int, default=10, metavar='N',
help='Save checkpoints after so many epochs')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA amp for mixed precision training')
parser.add_argument('--no-pin-mem', dest='pin_mem', action='store_false',
help='Disable pin CPU memory in DataLoader.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='map', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "map"')
parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 0), type=int)
parser.add_argument("--memory-format", type=str, default="nchw", choices=["nchw", "nhwc"],
help="memory layout, nchw or nhwc")
parser.add_argument("--fused-focal-loss", action='store_true',
help="Use fused focal loss for better performance.")
# Waymo
parser.add_argument('--waymo', action='store_true', default=False,
help='Train on Waymo dataset or COCO dataset. Default: False (COCO dataset)')
parser.add_argument('--num_classes', type=int, default=None, metavar='PCT',
help='Number of classes the model needs to be trained for (default: None)')
parser.add_argument('--remove-weights', nargs='*', default=[],
help='Remove these weights from the state dict before loading checkpoint (use case can be not loading heads)')
parser.add_argument('--freeze-layers', nargs='*', default=[],
help='Freeze these layers')
parser.add_argument('--waymo-train-annotation', default=None, type=str,
help='Absolute Path to waymo training annotation (default: "None")')
parser.add_argument('--waymo-val-annotation', default=None, type=str,
help='Absolute Path to waymo validation annotation (default: "None")')
parser.add_argument('--waymo-train', default=None, type=str,
help='Path to waymo training relative to waymo data (default: "None")')
parser.add_argument('--waymo-val', default=None, type=str,
help='Path to waymo validation relative to waymo data (default: "None")')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def get_outdirectory(path, *paths):
outdir = os.path.join(path, *paths)
if not os.path.exists(outdir):
os.makedirs(outdir, exist_ok=True)
return outdir
def main():
setup_default_logging() ## TODO(sugh) replace
args, args_text = _parse_args()
set_affinity(args.local_rank)
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
torch.cuda.manual_seed_all(args.seed)
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
assert args.rank >= 0
setup_dllogger(args.rank, filename=args.dllogger_file)
dllogger.metadata('eval_batch_time', {'unit': 's'})
dllogger.metadata('train_batch_time', {'unit': 's'})
dllogger.metadata('eval_throughput', {'unit': 'images/s'})
dllogger.metadata('train_throughout', {'unit': 'images/s'})
dllogger.metadata('eval_loss', {'unit': None})
dllogger.metadata('train_loss', {'unit': None})
dllogger.metadata('map', {'unit': None})
if args.distributed:
logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
logging.info('Training with a single process on 1 GPU.')
if args.waymo:
if (args.waymo_train is not None and args.waymo_val is None) or (args.waymo_train is None and args.waymo_val is not None):
raise Exception("waymo_train or waymo_val is not set")
memory_format = (
torch.channels_last if args.memory_format == "nhwc" else torch.contiguous_format
)
model = create_model(
args.model,
input_size=args.input_size,
num_classes=args.num_classes,
bench_task='train',
pretrained=args.pretrained,
pretrained_backbone_path=args.pretrained_backbone_path,
redundant_bias=args.redundant_bias,
checkpoint_path=args.initial_checkpoint,
label_smoothing=args.smoothing,
fused_focal_loss=args.fused_focal_loss,
remove_params=args.remove_weights,
freeze_layers=args.freeze_layers,
strict_load=False
)
# FIXME decide which args to keep and overlay on config / pass to backbone
# num_classes=args.num_classes,
input_size = model.config.image_size
data_config = model.config
print("Input size to be passed to dataloaders: {}".format(input_size))
print("Image size used in model: {}".format(model.config.image_size))
if args.rank == 0:
dllogger.log(step='PARAMETER', data={'model_name':args.model, 'param_count': sum([m.numel() for m in model.parameters()])})
model = model.cuda().to(memory_format=memory_format)
# # optionally resume from a checkpoint
if args.distributed:
if args.sync_bn:
try:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
logging.info(
'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '
'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')
except Exception as e:
logging.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1')
optimizer = create_optimizer(args, model)
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
resume_state = {}
resume_epoch = None
output_base = args.output if args.output else './output'
resume_checkpoint_path = get_latest_checkpoint(os.path.join(output_base, 'train'))
if args.resume and resume_checkpoint_path is not None:
print("Trying to load checkpoint from {}".format(resume_checkpoint_path))
resume_state, resume_epoch = resume_checkpoint(unwrap_bench(model), resume_checkpoint_path)
if resume_epoch is not None:
print("Resume training from {} epoch".format(resume_epoch))
if resume_state and not args.no_resume_opt:
if 'optimizer' in resume_state:
if args.local_rank == 0:
logging.info('Restoring Optimizer state from checkpoint')
optimizer.load_state_dict(resume_state['optimizer'])
if args.amp and 'scaler' in resume_state:
if args.local_rank == 0:
logging.info('Restoring NVIDIA AMP state from checkpoint')
scaler.load_state_dict(resume_state['scaler'])
del resume_state
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
if args.resume and resume_checkpoint_path is not None:
resume_path = resume_checkpoint_path
else:
resume_path = ''
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
resume=resume_path)
if args.distributed:
if args.local_rank == 0:
logging.info("Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.")
model = DDP(model, device_ids=[args.device]) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
dllogger.log(step="PARAMETER", data={'Scheduled_epochs': num_epochs}, verbosity=0)
# Benchmark will always override every other setting.
if args.benchmark:
start_epoch = 0
num_epochs = args.epochs
if args.waymo:
train_annotation_path = args.waymo_train_annotation
train_image_dir = args.waymo_train
else:
train_anno_set = 'train2017'
train_annotation_path = os.path.join(args.data, 'annotations', f'instances_{train_anno_set}.json')
train_image_dir = train_anno_set
dataset_train = CocoDetection(os.path.join(args.data, train_image_dir), train_annotation_path, data_config)
loader_train = create_loader(
dataset_train,
input_size=input_size,
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
interpolation=args.train_interpolation,
num_workers=args.workers,
distributed=args.distributed,
pin_mem=args.pin_mem,
memory_format=memory_format
)
loader_train_iter = iter(loader_train)
steps_per_epoch = int(np.ceil( len(dataset_train) / (args.world_size * args.batch_size) ))
if args.waymo:
val_annotation_path = args.waymo_val_annotation
val_image_dir = args.waymo_val
else:
val_anno_set = 'val2017'
val_annotation_path = os.path.join(args.data, 'annotations', f'instances_{val_anno_set}.json')
val_image_dir = val_anno_set
dataset_eval = CocoDetection(os.path.join(args.data, val_image_dir), val_annotation_path, data_config)
loader_eval = create_loader(
dataset_eval,
input_size=input_size,
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=args.interpolation,
num_workers=args.workers,
distributed=args.distributed,
pin_mem=args.pin_mem,
memory_format=memory_format
)
evaluator = COCOEvaluator(dataset_eval.coco, distributed=args.distributed, waymo=args.waymo)
eval_metric = args.eval_metric
eval_metrics = None
train_metrics = {}
best_metric = -1
is_best = False
best_epoch = None
saver = None
output_dir = ''
if args.rank == 0:
output_base = args.output if args.output else './output'
output_dir = get_outdirectory(output_base, 'train')
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(checkpoint_dir=output_dir)
with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed:
loader_train.sampler.set_epoch(epoch)
train_metrics = train_epoch(
epoch, steps_per_epoch, model, loader_train_iter, optimizer, args,
lr_scheduler=lr_scheduler, output_dir=output_dir, use_amp=args.amp, scaler=scaler, model_ema=model_ema)
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
if args.local_rank == 0:
logging.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == 'reduce')
# the overhead of evaluating with coco style datasets is fairly high, so just ema or non, not both
if model_ema is not None:
if args.distributed and args.dist_bn in ('broadcast', 'reduce'):
distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')
if epoch >= args.eval_after:
eval_metrics = validate(model_ema.ema, loader_eval, args, evaluator, epoch, log_suffix=' (EMA)')
else:
eval_metrics = validate(model, loader_eval, args, evaluator, epoch)
lr_scheduler.step(epoch + 1)
if saver is not None and args.rank == 0 and epoch % args.save_checkpoint_interval == 0:
if eval_metrics is not None:
# save proper checkpoint with eval metric
is_best = eval_metrics[eval_metric] > best_metric
best_metric = max(
eval_metrics[eval_metric],
best_metric
)
best_epoch = epoch
else:
is_best = False
best_metric = 0
saver.save_checkpoint(model, optimizer, epoch, model_ema=model_ema, metric=best_metric, is_best=is_best)
except KeyboardInterrupt:
dllogger.flush()
torch.cuda.empty_cache()
if best_metric > 0:
train_metrics.update({'best_map': best_metric, 'best_epoch': best_epoch})
if eval_metrics is not None:
train_metrics.update(eval_metrics)
dllogger.log(step=(), data=train_metrics, verbosity=0)
def train_epoch(
epoch, steps_per_epoch, model, loader_iter, optimizer, args,
lr_scheduler=None, output_dir='', use_amp=False, scaler=None, model_ema=None):
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
throughput_m = AverageMeter()
model.train()
torch.cuda.synchronize()
end = time.time()
last_idx = steps_per_epoch - 1
num_updates = epoch * steps_per_epoch
for batch_idx in range(steps_per_epoch):
input, target = next(loader_iter)
last_batch = batch_idx == last_idx
torch.cuda.synchronize()
data_time_m.update(time.time() - end)
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(input, target)
loss = output['loss']
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
scaler.scale(loss).backward()
if args.clip_grad > 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=args.clip_grad)
scaler.step(optimizer)
scaler.update()
for p in model.parameters():
p.grad = None
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
num_updates += 1
if batch_idx == 10:
batch_time_m.reset()
throughput_m.reset()
batch_time_m.update(time.time() - end)
throughput_m.update(float(input.size(0) * args.world_size / batch_time_m.val))
if last_batch or (batch_idx+1) % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.rank == 0:
dllogger_data = {'train_batch_time': batch_time_m.avg,
'train_loss': losses_m.avg,
'throughput': throughput_m.avg,
'lr': lr,
'train_data_time': data_time_m.avg}
dllogger.log(step=(epoch, steps_per_epoch, batch_idx), data=dllogger_data, verbosity=0)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
torch.cuda.synchronize()
end = time.time()
if args.benchmark:
if batch_idx >= args.benchmark_steps:
break
# end for
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
metrics = {'train_loss': losses_m.avg, 'train_batch_time': batch_time_m.avg, 'train_throughout': throughput_m.avg}
dllogger.log(step=(epoch,), data=metrics, verbosity=0)
return metrics
def validate(model, loader, args, evaluator=None, epoch=0, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
throughput_m = AverageMeter()
model.eval()
torch.cuda.synchronize()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
with torch.cuda.amp.autocast(enabled=args.amp):
output = model(input, target)
loss = output['loss']
if evaluator is not None:
evaluator.add_predictions(output['detections'], target)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
batch_time_m.update(time.time() - end)
throughput_m.update(float(input.size(0) * args.world_size / batch_time_m.val))
end = time.time()
if args.rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
dllogger_data = {'eval_batch_time': batch_time_m.val, 'eval_loss': losses_m.val}
dllogger.log(step=(epoch, last_idx, batch_idx), data=dllogger_data, verbosity=0)
metrics = {'eval_batch_time': batch_time_m.avg, 'eval_throughput': throughput_m.avg, 'eval_loss': losses_m.avg}
if evaluator is not None:
metrics['map'] = evaluator.evaluate()
if args.rank == 0:
dllogger.log(step=(epoch,), data=metrics, verbosity=0)
return metrics
if __name__ == '__main__':
torch.cuda.empty_cache()
main()
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/train.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from efficientnet import EfficientNet, efficientnet_configs
def test_feature_type(net, images):
output, features = net(images, features_only=True)
print("[ ... Test Type ... ] Type of output {} features {}".format(type(output), type(features)))
def test_feature_dimensions(net, images):
output, features = net(images, features_only=True)
print("[ ... Test dimension ... ] Dim of output {} features {}".format(output.size(), len(features)))
for i, x in enumerate(features):
print("[ ... Test dimension ... ] Index {} features size {}".format(i, features[i].size()))
def test_feature_info(net, images):
feature_info = net.feature_info
for i, f in enumerate(feature_info):
print("[ ... Test Feature Info ... ] Index {} features info {}".format(i, f))
def main():
global_config = efficientnet_configs['fanout']
net = EfficientNet(width_coeff=1, depth_coeff=1, dropout=0.2, num_classes=1000, global_config=global_config, out_indices=[2,3,4])
images = torch.rand((2, 3, 512, 512))
test_feature_type(net, images)
test_feature_dimensions(net, images)
test_feature_info(net, images)
print("Model Layer Names")
for n, m in net.named_modules():
print(n)
if __name__ == '__main__':
main() | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/efficientnet_test.py |
__version__ = '0.1.4'
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/version.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model import EfficientDet
from .bench import DetBenchTrain, DetBenchPredict
from .config import get_efficientdet_config
from utils.utils import load_checkpoint, freeze_layers_fn
def create_model(
model_name, input_size=None, num_classes=None, bench_task='', pretrained=False, checkpoint_path='', checkpoint_ema=False, **kwargs):
config = get_efficientdet_config(model_name)
if num_classes is not None:
config.num_classes = num_classes
if input_size is not None:
config.image_size = input_size
pretrained_backbone_path = kwargs.pop('pretrained_backbone_path', '')
if pretrained or checkpoint_path:
pretrained_backbone_path = '' # no point in loading backbone weights
strict_load = kwargs.pop('strict_load', True)
redundant_bias = kwargs.pop('redundant_bias', None)
if redundant_bias is not None:
# override config if set to something
config.redundant_bias = redundant_bias
soft_nms = kwargs.pop('soft_nms', False)
config.label_smoothing = kwargs.pop('label_smoothing', 0.1)
remove_params = kwargs.pop('remove_params', [])
freeze_layers = kwargs.pop('freeze_layers', [])
config.fused_focal_loss = kwargs.pop('fused_focal_loss', False)
model = EfficientDet(config, pretrained_backbone_path=pretrained_backbone_path, **kwargs)
# FIXME handle different head classes / anchors and re-init of necessary layers w/ pretrained load
if checkpoint_path:
load_checkpoint(model, checkpoint_path, use_ema=checkpoint_ema, strict=strict_load, remove_params=remove_params)
if len(freeze_layers) > 0:
freeze_layers_fn(model, freeze_layers=freeze_layers)
# wrap model in task specific bench if set
if bench_task == 'train':
model = DetBenchTrain(model, config)
elif bench_task == 'predict':
model = DetBenchPredict(model, config, soft_nms)
return model | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/factory.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from collections import namedtuple
import torch
from torch import nn
BlockParameters = namedtuple('BlockParameters',
['kernel_size', 'stride', 'num_repeat', 'in_channels', 'out_channels', 'expand_ratio'])
GlobalParameters = namedtuple('GlobalParameters',
['squeeze_excitation_ratio', 'batchnorm_momentum', 'batchnorm_epsilon',
'stochastic_depth_survival_prob', 'feature_channels', "weights_init_mode"])
efficientnet_configs = {
"fanin": GlobalParameters(
squeeze_excitation_ratio=0.25,
batchnorm_momentum=1-0.99, # batchnorm momentum definition is different in pytorch and original paper
batchnorm_epsilon=1e-3,
stochastic_depth_survival_prob=0.8,
feature_channels=1280,
weights_init_mode="fan_in"
),
"fanout": GlobalParameters(
squeeze_excitation_ratio=0.25,
batchnorm_momentum=1-0.99,
batchnorm_epsilon=1e-3,
stochastic_depth_survival_prob=0.8,
feature_channels=1280,
weights_init_mode="fan_out"
),
}
BASE_EFFICIENTNET_BLOCKS_CONFIG = [
BlockParameters(kernel_size=3, stride=1, num_repeat=1, in_channels=32, out_channels=16, expand_ratio=1),
BlockParameters(kernel_size=3, stride=2, num_repeat=2, in_channels=16, out_channels=24, expand_ratio=6),
BlockParameters(kernel_size=5, stride=2, num_repeat=2, in_channels=24, out_channels=40, expand_ratio=6),
BlockParameters(kernel_size=3, stride=2, num_repeat=3, in_channels=40, out_channels=80, expand_ratio=6),
BlockParameters(kernel_size=5, stride=1, num_repeat=3, in_channels=80, out_channels=112, expand_ratio=6),
BlockParameters(kernel_size=5, stride=2, num_repeat=4, in_channels=112, out_channels=192, expand_ratio=6),
BlockParameters(kernel_size=3, stride=1, num_repeat=1, in_channels=192, out_channels=320, expand_ratio=6)
]
def _scale_width(num_channels, width_coeff, divisor=8):
num_channels *= width_coeff
# Rounding should not go down by more than 10%
rounded_num_channels = max(divisor, int(num_channels + divisor / 2) // divisor * divisor)
if rounded_num_channels < 0.9 * num_channels:
rounded_num_channels += divisor
return rounded_num_channels
def scaled_efficientnet_config(width_coeff, depth_coeff):
config = [
block._replace(
num_repeat=int(math.ceil(block.num_repeat * depth_coeff)),
in_channels=_scale_width(block.in_channels, width_coeff),
out_channels=_scale_width(block.out_channels, width_coeff),
)
for block in BASE_EFFICIENTNET_BLOCKS_CONFIG
]
return config
class SqueezeAndExcitation(nn.Module):
def __init__(self, in_channels, squeeze, activation):
super(SqueezeAndExcitation, self).__init__()
self.squeeze = nn.Linear(in_channels, squeeze)
self.expand = nn.Linear(squeeze, in_channels)
self.activation = activation
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = torch.mean(x.view(x.size(0), x.size(1), -1), 2)
out = self.squeeze(out)
out = self.activation(out)
out = self.expand(out)
out = self.sigmoid(out)
out = out.unsqueeze(2).unsqueeze(3)
return out
# Since torch.nn.SiLU is not supported in ONNX,
# it is required to use this implementation in exported model (15-20% more GPU memory is needed)
class MemoryInefficientSiLU(nn.Module):
def __init__(self, *args, **kwargs):
super(MemoryInefficientSiLU, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class ConvBN(nn.Sequential):
def __init__(self, kernel_size, stride, in_channels, out_channels, activation,
bn_momentum, bn_epsilon, groups=1):
layers = [
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
groups=groups, bias=False, padding=int((kernel_size - 1) / 2)),
nn.BatchNorm2d(out_channels, momentum=bn_momentum, eps=bn_epsilon),
]
if activation is not None:
layers.append(activation)
super(ConvBN, self).__init__(*layers)
class MBConvBlock(nn.Module):
def __init__(self, block_config, global_config, survival_prob, activation):
super(MBConvBlock, self).__init__()
self.in_channels = block_config.in_channels
self.out_channels = block_config.out_channels
self.hidden_dim = self.in_channels * block_config.expand_ratio
self.squeeze_dim = max(1, int(self.in_channels * global_config.squeeze_excitation_ratio))
self.kernel_size = block_config.kernel_size
self.stride = block_config.stride
self.stochastic_depth_survival_prob = survival_prob
bn_momentum = global_config.batchnorm_momentum
bn_epsilon = global_config.batchnorm_epsilon
if self.in_channels != self.hidden_dim:
self.expand_conv = ConvBN(1, 1, self.in_channels, self.hidden_dim, activation(),
bn_momentum=bn_momentum, bn_epsilon=bn_epsilon)
self.squeeze_and_excitation = SqueezeAndExcitation(self.hidden_dim, self.squeeze_dim, activation())
self.depthwise_conv = ConvBN(self.kernel_size, self.stride, self.hidden_dim, self.hidden_dim, activation(),
groups=self.hidden_dim, bn_momentum=bn_momentum, bn_epsilon=bn_epsilon)
self.project_conv = ConvBN(1, 1, self.hidden_dim, self.out_channels,
activation=None, bn_momentum=bn_momentum, bn_epsilon=bn_epsilon)
def _drop_connections(self, x, synchronized=False):
if not self.training:
return x
random_mask = torch.rand([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
if synchronized:
torch.distributed.broadcast(random_mask, 0)
random_mask = (self.stochastic_depth_survival_prob + random_mask).floor()
scaled_x = x / self.stochastic_depth_survival_prob
return scaled_x * random_mask
def forward(self, inputs):
x = inputs
if self.in_channels != self.hidden_dim:
x = self.expand_conv(x)
x = self.depthwise_conv(x)
x = x * self.squeeze_and_excitation(x)
x = self.project_conv(x)
if self.stride == 1 and self.in_channels == self.out_channels:
if self.stochastic_depth_survival_prob != 1:
x = self._drop_connections(x)
x = x + inputs
return x
class EfficientNet(nn.Module):
def __init__(self, width_coeff, depth_coeff, dropout, num_classes, global_config, features_only=True, out_indices=None, onnx_exportable=False):
super(EfficientNet, self).__init__()
self.features_only = features_only
self.efficientnet_blocks_config = scaled_efficientnet_config(width_coeff, depth_coeff)
self.global_config = global_config
self.in_channels = 3
self.feature_channels = _scale_width(self.global_config.feature_channels, width_coeff)
self.activation = torch.nn.SiLU if not onnx_exportable else MemoryInefficientSiLU
self.input_conv = ConvBN(3, 2, self.in_channels, self.efficientnet_blocks_config[0].in_channels,
activation=self.activation(),
bn_momentum=self.global_config.batchnorm_momentum,
bn_epsilon=self.global_config.batchnorm_epsilon)
self.feature_info = []
self.mbconv_blocks = nn.Sequential(*self.mbconv_blocks_generator())
if not self.features_only:
self.features_conv = ConvBN(1, 1, self.efficientnet_blocks_config[-1].out_channels, self.feature_channels,
activation=self.activation(),
bn_momentum=self.global_config.batchnorm_momentum,
bn_epsilon=self.global_config.batchnorm_epsilon)
self.avg_pooling = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(dropout)
self.fc = nn.Linear(self.feature_channels, num_classes)
if out_indices is not None:
self.feature_info = [v for i, v in enumerate(self.feature_info) if i in out_indices]
def mbconv_blocks_generator(self):
num_blocks = sum([block_config.num_repeat for block_config in self.efficientnet_blocks_config])
drop_rate = 1.0 - self.global_config.stochastic_depth_survival_prob
idx = 0
current_stride = 2
prev_block_config = None
for config_idx, block_config in enumerate(self.efficientnet_blocks_config):
for i in range(block_config.num_repeat):
# Conditions for feature extraction
if config_idx == len(self.efficientnet_blocks_config)-1 and i == block_config.num_repeat-1:
self.feature_info.append(dict(block_idx=idx, reduction=current_stride, num_chs=block_config.out_channels))
elif prev_block_config is not None and block_config.stride > 1:
self.feature_info.append(dict(block_idx=idx-1, reduction=current_stride, num_chs=prev_block_config.out_channels))
# Calculating the current stride
if block_config.stride > 1:
current_stride = current_stride * block_config.stride
survival_prob = 1.0 - drop_rate * float(idx) / num_blocks
yield MBConvBlock(block_config, self.global_config,
survival_prob=survival_prob, activation=self.activation)
idx += 1
prev_block_config = block_config
block_config = block_config._replace(in_channels=block_config.out_channels, stride=1)
def forward(self, inputs):
x = inputs
x = self.input_conv(x)
features = []
extraction_idx = 0
for i, b in enumerate(self.mbconv_blocks):
x = b(x)
if i == self.feature_info[extraction_idx]['block_idx']:
features.append(x)
extraction_idx += 1
return x, features
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/efficientnet.py |
""" PyTorch EfficientDet support benches
Hacked together by Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from utils.model_ema import ModelEma
from .anchors import Anchors, AnchorLabeler, generate_detections, MAX_DETECTION_POINTS
from .loss import DetectionLoss
def _post_process(config, cls_outputs, box_outputs):
"""Selects top-k predictions.
Post-proc code adapted from Tensorflow version at: https://github.com/google/automl/tree/master/efficientdet
and optimized for PyTorch.
Args:
config: a parameter dictionary that includes `min_level`, `max_level`, `batch_size`, and `num_classes`.
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width, num_anchors * 4].
"""
batch_size = cls_outputs[0].shape[0]
if config.fused_focal_loss:
batch_size, channels, _, _ = cls_outputs[0].shape
padded_classes = (config.num_classes + 7) // 8 * 8
anchors = channels // padded_classes
_cls_outputs_all = []
for level in range(config.num_levels):
_, _, height, width = cls_outputs[level].shape
_cls_output = cls_outputs[level].permute(0, 2, 3, 1)
_cls_output = _cls_output.view(batch_size, height, width, anchors, padded_classes)
_cls_output = _cls_output[..., :config.num_classes]
_cls_output = _cls_output.reshape([batch_size, -1, config.num_classes])
_cls_outputs_all.append(_cls_output)
cls_outputs_all = torch.cat(_cls_outputs_all, 1)
else:
cls_outputs_all = torch.cat([
cls_outputs[level].permute(0, 2, 3, 1).reshape([batch_size, -1, config.num_classes])
for level in range(config.num_levels)], 1)
box_outputs_all = torch.cat([
box_outputs[level].permute(0, 2, 3, 1).reshape([batch_size, -1, 4])
for level in range(config.num_levels)], 1)
_, cls_topk_indices_all = torch.topk(cls_outputs_all.reshape(batch_size, -1), dim=1, k=MAX_DETECTION_POINTS, sorted=False)
indices_all = cls_topk_indices_all // config.num_classes
classes_all = cls_topk_indices_all % config.num_classes
box_outputs_all_after_topk = torch.gather(
box_outputs_all, 1, indices_all.unsqueeze(2).expand(-1, -1, 4))
cls_outputs_all_after_topk = torch.gather(
cls_outputs_all, 1, indices_all.unsqueeze(2).expand(-1, -1, config.num_classes))
cls_outputs_all_after_topk = torch.gather(
cls_outputs_all_after_topk, 2, classes_all.unsqueeze(2))
return cls_outputs_all_after_topk, box_outputs_all_after_topk, indices_all, classes_all
def _batch_detection(batch_size: int, class_out, box_out, anchor_boxes, indices, classes, img_scale, img_size, soft_nms: bool = False):
batch_detections = []
# FIXME we may be able to do this as a batch with some tensor reshaping/indexing, PR welcome
for i in range(batch_size):
detections = generate_detections(
class_out[i], box_out[i], anchor_boxes, indices[i], classes[i], img_scale[i], img_size[i], soft_nms=soft_nms)
batch_detections.append(detections)
return torch.stack(batch_detections, dim=0)
class DetBenchPredict(nn.Module):
def __init__(self, model, config, soft_nms=False):
super(DetBenchPredict, self).__init__()
self.config = config
self.model = model
self.soft_nms = soft_nms
self.anchors = Anchors(
config.min_level, config.max_level,
config.num_scales, config.aspect_ratios,
config.anchor_scale, config.image_size)
def forward(self, x, img_scales, img_size):
class_out, box_out = self.model(x)
class_out, box_out, indices, classes = _post_process(self.config, class_out, box_out)
return _batch_detection(
x.shape[0], class_out, box_out, self.anchors.boxes, indices, classes, img_scales, img_size, self.soft_nms)
class DetBenchTrain(nn.Module):
def __init__(self, model, config):
super(DetBenchTrain, self).__init__()
self.config = config
self.model = model
self.anchors = Anchors(
config.min_level, config.max_level,
config.num_scales, config.aspect_ratios,
config.anchor_scale, config.image_size)
self.loss_fn = DetectionLoss(self.config)
def forward(self, x, target):
class_out, box_out = self.model(x)
loss, class_loss, box_loss = self.loss_fn(class_out, box_out, target, target['num_positives'])
output = dict(loss=loss, class_loss=class_loss, box_loss=box_loss)
if not self.training:
# if eval mode, output detections for evaluation
class_out, box_out, indices, classes = _post_process(self.config, class_out, box_out)
output['detections'] = _batch_detection(
x.shape[0], class_out, box_out, self.anchors.boxes, indices, classes,
target['img_scale'], target['img_size'])
return output
def unwrap_bench(model):
# Unwrap a model in support bench so that various other fns can access the weights and attribs of the
# underlying model directly
if isinstance(model, ModelEma): # unwrap ModelEma
return unwrap_bench(model.ema)
elif hasattr(model, 'module'): # unwrap DDP
return unwrap_bench(model.module)
elif hasattr(model, 'model'): # unwrap Bench -> model
return unwrap_bench(model.model)
else:
return model
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/bench.py |
""" PyTorch EfficientDet model
Based on official Tensorflow version at: https://github.com/google/automl/tree/master/efficientdet
Paper: https://arxiv.org/abs/1911.09070
Hacked together by Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import logging
import math
from collections import OrderedDict
from typing import List, Callable
from .layers import create_conv2d, drop_path, create_pool2d, Swish, get_act_layer
from .config import get_fpn_config, get_backbone_config
from .efficientnet import EfficientNet, efficientnet_configs
_DEBUG = False
_ACT_LAYER = Swish
class SequentialAppend(nn.Sequential):
def __init__(self, *args):
super(SequentialAppend, self).__init__(*args)
def forward(self, x: List[torch.Tensor]):
for module in self:
x.append(module(x))
return x
class SequentialAppendLast(nn.Sequential):
def __init__(self, *args):
super(SequentialAppendLast, self).__init__(*args)
def forward(self, x: List[torch.Tensor]):
for module in self:
x.append(module(x[-1]))
return x
class ConvBnAct2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding='', bias=False,
norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=_ACT_LAYER):
super(ConvBnAct2d, self).__init__()
norm_kwargs = norm_kwargs or {}
self.conv = create_conv2d(
in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias)
self.bn = None if norm_layer is None else norm_layer(out_channels, **norm_kwargs) # here
self.act = None if act_layer is None else act_layer(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class SeparableConv2d(nn.Module):
""" Separable Conv
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1, act_layer=_ACT_LAYER,
norm_layer=nn.BatchNorm2d, norm_kwargs=None):
super(SeparableConv2d, self).__init__()
norm_kwargs = norm_kwargs or {}
self.conv_dw = create_conv2d(
in_channels, int(in_channels * channel_multiplier), kernel_size,
stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_pw = create_conv2d(
int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
self.bn = None if norm_layer is None else norm_layer(out_channels, **norm_kwargs) # Here
self.act = None if act_layer is None else act_layer(inplace=True)
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
if self.bn is not None:
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class ResampleFeatureMap(nn.Sequential):
def __init__(self, in_channels, out_channels, reduction_ratio=1., pad_type='', pooling_type='max',
norm_layer=nn.BatchNorm2d, norm_kwargs=None, apply_bn=False, conv_after_downsample=False,
redundant_bias=False):
super(ResampleFeatureMap, self).__init__()
pooling_type = pooling_type or 'max'
self.in_channels = in_channels
self.out_channels = out_channels
self.reduction_ratio = reduction_ratio
self.conv_after_downsample = conv_after_downsample
conv = None
if in_channels != out_channels:
conv = ConvBnAct2d(
in_channels, out_channels, kernel_size=1, padding=pad_type,
norm_layer=norm_layer if apply_bn else None, norm_kwargs=norm_kwargs,
bias=not apply_bn or redundant_bias, act_layer=None)
if reduction_ratio > 1:
stride_size = int(reduction_ratio)
if conv is not None and not self.conv_after_downsample:
self.add_module('conv', conv)
self.add_module(
'downsample',
create_pool2d(
pooling_type, kernel_size=stride_size + 1, stride=stride_size, padding=pad_type))
if conv is not None and self.conv_after_downsample:
self.add_module('conv', conv)
else:
if conv is not None:
self.add_module('conv', conv)
if reduction_ratio < 1:
scale = int(1 // reduction_ratio)
self.add_module('upsample', nn.UpsamplingNearest2d(scale_factor=scale))
# def forward(self, x):
# # here for debugging only
# assert x.shape[1] == self.in_channels
# if self.reduction_ratio > 1:
# if hasattr(self, 'conv') and not self.conv_after_downsample:
# x = self.conv(x)
# x = self.downsample(x)
# if hasattr(self, 'conv') and self.conv_after_downsample:
# x = self.conv(x)
# else:
# if hasattr(self, 'conv'):
# x = self.conv(x)
# if self.reduction_ratio < 1:
# x = self.upsample(x)
# return x
class FpnCombine(nn.Module):
def __init__(self, feature_info, fpn_config, fpn_channels, inputs_offsets, target_reduction, pad_type='',
pooling_type='max', norm_layer=nn.BatchNorm2d, norm_kwargs=None, apply_bn_for_resampling=False,
conv_after_downsample=False, redundant_bias=False, weight_method='attn'):
super(FpnCombine, self).__init__()
self.inputs_offsets = inputs_offsets
self.weight_method = weight_method
self.resample = nn.ModuleDict()
for idx, offset in enumerate(inputs_offsets):
in_channels = fpn_channels
if offset < len(feature_info):
in_channels = feature_info[offset]['num_chs']
input_reduction = feature_info[offset]['reduction']
else:
node_idx = offset - len(feature_info)
input_reduction = fpn_config.nodes[node_idx]['reduction']
reduction_ratio = target_reduction / input_reduction
self.resample[str(offset)] = ResampleFeatureMap(
in_channels, fpn_channels, reduction_ratio=reduction_ratio, pad_type=pad_type,
pooling_type=pooling_type, norm_layer=norm_layer, norm_kwargs=norm_kwargs,
apply_bn=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample,
redundant_bias=redundant_bias)
if weight_method == 'attn' or weight_method == 'fastattn':
# WSM
self.edge_weights = nn.Parameter(torch.ones(len(inputs_offsets)), requires_grad=True)
else:
self.edge_weights = None
def forward(self, x):
dtype = x[0].dtype
nodes = []
for offset in self.inputs_offsets:
input_node = x[offset]
input_node = self.resample[str(offset)](input_node)
nodes.append(input_node)
if self.weight_method == 'attn':
normalized_weights = torch.softmax(self.edge_weights.type(dtype), dim=0)
x = torch.stack(nodes, dim=-1) * normalized_weights
elif self.weight_method == 'fastattn':
edge_weights = nn.functional.relu(self.edge_weights.type(dtype))
weights_sum = torch.sum(edge_weights)
x = torch.stack(
[(nodes[i] * edge_weights[i]) / (weights_sum + 0.0001) for i in range(len(nodes))], dim=-1)
elif self.weight_method == 'sum':
x = torch.stack(nodes, dim=-1)
else:
raise ValueError('unknown weight_method {}'.format(self.weight_method))
x = torch.sum(x, dim=-1)
return x
class BiFpnLayer(nn.Module):
def __init__(self, feature_info, fpn_config, fpn_channels, num_levels=5, pad_type='',
pooling_type='max', norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=_ACT_LAYER,
apply_bn_for_resampling=False, conv_after_downsample=True, conv_bn_relu_pattern=False,
separable_conv=True, redundant_bias=False):
super(BiFpnLayer, self).__init__()
self.fpn_config = fpn_config
self.num_levels = num_levels
self.conv_bn_relu_pattern = False
self.feature_info = []
self.fnode = SequentialAppend()
for i, fnode_cfg in enumerate(fpn_config.nodes):
logging.debug('fnode {} : {}'.format(i, fnode_cfg))
fnode_layers = OrderedDict()
# combine features
reduction = fnode_cfg['reduction']
fnode_layers['combine'] = FpnCombine(
feature_info, fpn_config, fpn_channels, fnode_cfg['inputs_offsets'], target_reduction=reduction,
pad_type=pad_type, pooling_type=pooling_type, norm_layer=norm_layer, norm_kwargs=norm_kwargs,
apply_bn_for_resampling=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample,
redundant_bias=redundant_bias, weight_method=fpn_config.weight_method)
self.feature_info.append(dict(num_chs=fpn_channels, reduction=reduction))
# after combine ops
after_combine = OrderedDict()
if not conv_bn_relu_pattern:
after_combine['act'] = act_layer(inplace=True)
conv_bias = redundant_bias
conv_act = None
else:
conv_bias = False
conv_act = act_layer
conv_kwargs = dict(
in_channels=fpn_channels, out_channels=fpn_channels, kernel_size=3, padding=pad_type,
bias=conv_bias, norm_layer=norm_layer, norm_kwargs=norm_kwargs, act_layer=conv_act)
after_combine['conv'] = SeparableConv2d(**conv_kwargs) if separable_conv else ConvBnAct2d(**conv_kwargs)
fnode_layers['after_combine'] = nn.Sequential(after_combine)
self.fnode.add_module(str(i), nn.Sequential(fnode_layers))
self.feature_info = self.feature_info[-num_levels::]
def forward(self, x):
x = self.fnode(x)
return x[-self.num_levels::]
class BiFpn(nn.Module):
def __init__(self, config, feature_info, norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=_ACT_LAYER):
super(BiFpn, self).__init__()
self.config = config
fpn_config = config.fpn_config or get_fpn_config(config.fpn_name)
self.resample = SequentialAppendLast()
for level in range(config.num_levels):
if level < len(feature_info):
in_chs = feature_info[level]['num_chs']
reduction = feature_info[level]['reduction']
else:
# Adds a coarser level by downsampling the last feature map
reduction_ratio = 2
self.resample.add_module(str(level), ResampleFeatureMap(
in_channels=in_chs,
out_channels=config.fpn_channels,
pad_type=config.pad_type,
pooling_type=config.pooling_type,
norm_layer=norm_layer,
norm_kwargs=norm_kwargs,
reduction_ratio=reduction_ratio,
apply_bn=config.apply_bn_for_resampling,
conv_after_downsample=config.conv_after_downsample,
redundant_bias=config.redundant_bias,
))
in_chs = config.fpn_channels
reduction = int(reduction * reduction_ratio)
feature_info.append(dict(num_chs=in_chs, reduction=reduction))
self.cell = nn.Sequential()
for rep in range(config.fpn_cell_repeats):
logging.debug('building cell {}'.format(rep))
fpn_layer = BiFpnLayer(
feature_info=feature_info,
fpn_config=fpn_config,
fpn_channels=config.fpn_channels,
num_levels=config.num_levels,
pad_type=config.pad_type,
pooling_type=config.pooling_type,
norm_layer=norm_layer,
norm_kwargs=norm_kwargs,
act_layer=act_layer,
separable_conv=config.separable_conv,
apply_bn_for_resampling=config.apply_bn_for_resampling,
conv_after_downsample=config.conv_after_downsample,
conv_bn_relu_pattern=config.conv_bn_relu_pattern,
redundant_bias=config.redundant_bias,
)
self.cell.add_module(str(rep), fpn_layer)
feature_info = fpn_layer.feature_info
def forward(self, x):
assert len(self.resample) == self.config.num_levels - len(x)
x = self.resample(x)
x = self.cell(x)
return x
class HeadNet(nn.Module):
def __init__(self, config, num_outputs, norm_layer=nn.BatchNorm2d, norm_kwargs=None, act_layer=_ACT_LAYER, predict_nhwc=False):
super(HeadNet, self).__init__()
norm_kwargs = norm_kwargs or {}
self.config = config
self.predict_nhwc = predict_nhwc
num_anchors = len(config.aspect_ratios) * config.num_scales
self.conv_rep = nn.ModuleList()
self.bn_rep = nn.ModuleList()
conv_kwargs = dict(
in_channels=config.fpn_channels, out_channels=config.fpn_channels, kernel_size=3,
padding=self.config.pad_type, bias=config.redundant_bias, act_layer=None, norm_layer=None)
for i in range(config.box_class_repeats):
conv = SeparableConv2d(**conv_kwargs) if config.separable_conv else ConvBnAct2d(**conv_kwargs)
self.conv_rep.append(conv)
bn_levels = []
for _ in range(config.num_levels):
bn_seq = nn.Sequential()
bn_seq.add_module('bn', norm_layer(config.fpn_channels, **norm_kwargs)) # Here
bn_levels.append(bn_seq)
self.bn_rep.append(nn.ModuleList(bn_levels))
self.act = act_layer(inplace=True)
predict_kwargs = dict(
in_channels=config.fpn_channels, out_channels=num_outputs * num_anchors, kernel_size=3,
padding=self.config.pad_type, bias=True, norm_layer=None, act_layer=None)
if config.separable_conv:
self.predict = SeparableConv2d(**predict_kwargs)
else:
self.predict = ConvBnAct2d(**predict_kwargs)
if self.predict_nhwc:
self.predict = self.predict.to(memory_format=torch.channels_last)
def forward(self, x):
outputs = []
for level in range(self.config.num_levels):
x_level = x[level]
for i in range(self.config.box_class_repeats):
x_level_ident = x_level
x_level = self.conv_rep[i](x_level)
x_level = self.bn_rep[i][level](x_level)
x_level = self.act(x_level)
if i > 0 and self.config.fpn_drop_path_rate:
x_level = drop_path(x_level, self.config.fpn_drop_path_rate, self.training)
x_level += x_level_ident
if self.predict_nhwc:
x_level = x_level.contiguous(memory_format=torch.channels_last)
outputs.append(self.predict(x_level))
return outputs
def _init_weight(m, n='', ):
""" Weight initialization as per Tensorflow official implementations.
"""
def _fan_in_out(w, groups=1):
dimensions = w.dim()
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
num_input_fmaps = w.size(1)
num_output_fmaps = w.size(0)
receptive_field_size = 1
if w.dim() > 2:
receptive_field_size = w[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
fan_out //= groups
return fan_in, fan_out
def _glorot_uniform(w, gain=1, groups=1):
fan_in, fan_out = _fan_in_out(w, groups)
gain /= max(1., (fan_in + fan_out) / 2.) # fan avg
limit = math.sqrt(3.0 * gain)
w.data.uniform_(-limit, limit)
def _variance_scaling(w, gain=1, groups=1):
fan_in, fan_out = _fan_in_out(w, groups)
# gain /= max(1., fan_in) # fan in
gain /= max(1., (fan_in + fan_out) / 2.) # fan
# should it be normal or trunc normal? using normal for now since no good trunc in PT
# constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
# std = math.sqrt(gain) / .87962566103423978
# w.data.trunc_normal(std=std)
std = math.sqrt(gain)
w.data.normal_(std=std)
if isinstance(m, SeparableConv2d):
if 'box_net' in n or 'class_net' in n:
_variance_scaling(m.conv_dw.weight, groups=m.conv_dw.groups)
_variance_scaling(m.conv_pw.weight)
if m.conv_pw.bias is not None:
if 'class_net.predict' in n:
m.conv_pw.bias.data.fill_(-math.log((1 - 0.01) / 0.01))
else:
m.conv_pw.bias.data.zero_()
else:
_glorot_uniform(m.conv_dw.weight, groups=m.conv_dw.groups)
_glorot_uniform(m.conv_pw.weight)
if m.conv_pw.bias is not None:
m.conv_pw.bias.data.zero_()
elif isinstance(m, ConvBnAct2d):
if 'box_net' in n or 'class_net' in n:
m.conv.weight.data.normal_(std=.01)
if m.conv.bias is not None:
if 'class_net.predict' in n:
m.conv.bias.data.fill_(-math.log((1 - 0.01) / 0.01))
else:
m.conv.bias.data.zero_()
else:
_glorot_uniform(m.conv.weight)
if m.conv.bias is not None:
m.conv.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
# looks like all bn init the same?
m.weight.data.fill_(1.0)
m.bias.data.zero_()
def _init_weight_alt(m, n='', ):
""" Weight initialization alternative, based on EfficientNet bacbkone init w/ class bias addition
NOTE: this will likely be removed after some experimentation
"""
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
if 'class_net.predict' in n:
m.bias.data.fill_(-math.log((1 - 0.01) / 0.01))
else:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
class EfficientDet(nn.Module):
def __init__(self, config, norm_kwargs=None, pretrained_backbone_path='', alternate_init=False):
super(EfficientDet, self).__init__()
norm_kwargs = norm_kwargs or dict(eps=.001, momentum=.01)
## Replacing backbone
global_config = efficientnet_configs['fanout']
backbone_config = get_backbone_config(config.backbone_name)
self.backbone = EfficientNet(width_coeff=backbone_config['width_coeff'], depth_coeff=backbone_config['depth_coeff'], \
dropout=backbone_config['dropout'], num_classes=1000, global_config=global_config, features_only=True, out_indices=[2,3,4])
feature_info = self.backbone.feature_info
if pretrained_backbone_path != '':
ckpt_state_dict = torch.load(pretrained_backbone_path, map_location=lambda storage, loc: storage)
print("Backbone being loaded from checkpoint {}".format(pretrained_backbone_path))
self.backbone.load_state_dict(ckpt_state_dict, strict=False)
del ckpt_state_dict
# Pad to multiple of 8 for better performance
if config.fused_focal_loss:
num_classes = (config.num_classes + 7) // 8 * 8
else:
num_classes = config.num_classes
# TODO: predict_nhwc=config.fused_focal_loss for class_net
act_layer = get_act_layer(config.act_type)
self.fpn = BiFpn(config, feature_info, norm_kwargs=norm_kwargs, act_layer=act_layer)
self.class_net = HeadNet(config, num_outputs=num_classes, norm_kwargs=norm_kwargs,
act_layer=act_layer)
self.box_net = HeadNet(config, num_outputs=4, norm_kwargs=norm_kwargs, act_layer=act_layer)
for n, m in self.named_modules():
if 'backbone' not in n:
if alternate_init:
_init_weight_alt(m, n)
else:
_init_weight(m, n)
def forward(self, x):
_, x = self.backbone(x)
x = self.fpn(x)
x_class = self.class_net(x)
x_box = self.box_net(x)
return x_class, x_box
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import focal_loss_cuda
class FocalLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, cls_output, cls_targets_at_level, num_positives_sum,
num_real_classes, alpha, gamma, label_smoothing=0.0):
loss, partial_grad = focal_loss_cuda.forward(cls_output,
cls_targets_at_level,
num_positives_sum,
num_real_classes,
alpha, gamma,
label_smoothing)
ctx.save_for_backward(partial_grad, num_positives_sum)
return loss
@staticmethod
def backward(ctx, grad_loss):
partial_grad, num_positives_sum = ctx.saved_tensors
# The backward kernel is actually in-place to save memory space,
# partial_grad and grad_input are the same tensor.
grad_input = focal_loss_cuda.backward(grad_loss, partial_grad,
num_positives_sum)
return grad_input, None, None, None, None, None, None
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/focal_loss.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, List, Dict
from .focal_loss import FocalLoss
opt_focal_loss = FocalLoss.apply
def focal_loss(logits, targets, alpha: float, gamma: float, normalizer):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
logits: A float32 tensor of size [batch, height_in, width_in, num_predictions].
targets: A float32 tensor of size [batch, height_in, width_in, num_predictions].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
normalizer: A float32 scalar normalizes the total loss from all examples.
Returns:
loss: A float32 scalar representing normalized total loss.
"""
positive_label_mask = targets == 1.0
cross_entropy = F.binary_cross_entropy_with_logits(logits, targets.to(logits.dtype), reduction='none')
# Below are comments/derivations for computing modulator.
# For brevity, let x = logits, z = targets, r = gamma, and p_t = sigmod(x)
# for positive samples and 1 - sigmoid(x) for negative examples.
#
# The modulator, defined as (1 - P_t)^r, is a critical part in focal loss
# computation. For r > 0, it puts more weights on hard examples, and less
# weights on easier ones. However if it is directly computed as (1 - P_t)^r,
# its back-propagation is not stable when r < 1. The implementation here
# resolves the issue.
#
# For positive samples (labels being 1),
# (1 - p_t)^r
# = (1 - sigmoid(x))^r
# = (1 - (1 / (1 + exp(-x))))^r
# = (exp(-x) / (1 + exp(-x)))^r
# = exp(log((exp(-x) / (1 + exp(-x)))^r))
# = exp(r * log(exp(-x)) - r * log(1 + exp(-x)))
# = exp(- r * x - r * log(1 + exp(-x)))
#
# For negative samples (labels being 0),
# (1 - p_t)^r
# = (sigmoid(x))^r
# = (1 / (1 + exp(-x)))^r
# = exp(log((1 / (1 + exp(-x)))^r))
# = exp(-r * log(1 + exp(-x)))
#
# Therefore one unified form for positive (z = 1) and negative (z = 0)
# samples is:
# (1 - p_t)^r = exp(-r * z * x - r * log(1 + exp(-x))).
neg_logits = -1.0 * logits
modulator = torch.exp(gamma * targets * neg_logits - gamma * torch.log1p(torch.exp(neg_logits)))
loss = modulator * cross_entropy
weighted_loss = torch.where(positive_label_mask, alpha * loss, (1.0 - alpha) * loss)
weighted_loss /= normalizer
return weighted_loss
def new_focal_loss(logits: torch.Tensor, targets: torch.Tensor, alpha: float, gamma: float, normalizer, label_smoothing: float = 0.1):
"""Compute the focal loss between `logits` and the golden `target` values.
'New' is not the best descriptor, but this focal loss impl matches recent versions of
the official Tensorflow impl of EfficientDet. It has support for label smoothing, however
it is a bit slower, doesn't jit optimize well, and uses more memory.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
logits: A float32 tensor of size [batch, height_in, width_in, num_predictions].
targets: A float32 tensor of size [batch, height_in, width_in, num_predictions].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
normalizer: Divide loss by this value.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
loss: A float32 scalar representing normalized total loss.
"""
# compute focal loss multipliers before label smoothing, such that it will not blow up the loss.
pred_prob = logits.sigmoid()
targets = targets.to(logits.dtype)
onem_targets = 1. - targets
p_t = (targets * pred_prob) + (onem_targets * (1. - pred_prob))
alpha_factor = targets * alpha + onem_targets * (1. - alpha)
modulating_factor = (1. - p_t) ** gamma
# apply label smoothing for cross_entropy for each entry.
if label_smoothing > 0.:
targets = targets * (1. - label_smoothing) + .5 * label_smoothing
ce = F.binary_cross_entropy_with_logits(logits, targets, reduction='none')
# compute the final loss and return
return (1 / normalizer) * alpha_factor * modulating_factor * ce
def huber_loss(
input: torch.Tensor, target: torch.Tensor, delta: float = 1., weights: Optional[torch.Tensor] = None, size_average: bool = True):
"""
"""
err = input - target
abs_err = err.abs()
quadratic = torch.clamp(abs_err, max=delta)
linear = abs_err - quadratic
loss = 0.5 * quadratic.pow(2) + delta * linear
if weights is not None:
loss *= weights
return loss.mean() if size_average else loss.sum()
def smooth_l1_loss(
input: torch.Tensor, target: torch.Tensor, beta: float = 1. / 9, weights: Optional[torch.Tensor] = None, size_average: bool = True):
"""
very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter
"""
if beta < 1e-5:
# if beta == 0, then torch.where will result in nan gradients when
# the chain rule is applied due to pytorch implementation details
# (the False branch "0.5 * n ** 2 / 0" has an incoming gradient of
# zeros, rather than "no gradient"). To avoid this issue, we define
# small values of beta to be exactly l1 loss.
loss = torch.abs(input - target)
else:
err = torch.abs(input - target)
loss = torch.where(err < beta, 0.5 * err.pow(2) / beta, err - 0.5 * beta)
if weights is not None:
loss *= weights
return loss.mean() if size_average else loss.sum()
def _classification_loss(cls_outputs: torch.Tensor, cls_targets: torch.Tensor, num_positives: float, alpha: float = 0.25, gamma: float = 2.0, label_smoothing: float = 0.1):
"""Computes classification loss."""
normalizer = num_positives
classification_loss = new_focal_loss(cls_outputs, cls_targets, alpha, gamma, normalizer, label_smoothing)
return classification_loss
def _box_loss(box_outputs, box_targets, num_positives, delta: float = 0.1):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
normalizer = num_positives * 4.0
mask = box_targets != 0.0
box_loss = huber_loss(box_outputs, box_targets, weights=mask, delta=delta, size_average=False)
box_loss /= normalizer
return box_loss
def one_hot(x, num_classes: int):
# NOTE: PyTorch one-hot does not handle -ve entries (no hot) like Tensorflow, so mask them out
x_non_neg = (x >= 0).to(x.dtype)
onehot = torch.zeros(x.shape + (num_classes,), device=x.device, dtype=x.dtype)
onehot.scatter_(-1, (x * x_non_neg).unsqueeze(-1), 1)
return onehot * x_non_neg.unsqueeze(-1)
class DetectionLoss(nn.Module):
def __init__(self, config):
super(DetectionLoss, self).__init__()
self.config = config
self.num_classes = config.num_classes
self.alpha = config.alpha
self.gamma = config.gamma
self.delta = config.delta
self.label_smoothing = config.label_smoothing
self.box_loss_weight = config.box_loss_weight
self.fused_focal_loss = config.fused_focal_loss
def forward(
self, cls_outputs: List[torch.Tensor], box_outputs: List[torch.Tensor],
targets: Dict[str, torch.Tensor], num_positives: torch.Tensor):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: a List with values representing logits in [batch_size, height, width, num_anchors].
at each feature level (index)
box_outputs: a List with values representing box regression targets in
[batch_size, height, width, num_anchors * 4] at each feature level (index)
cls_targets: groundtruth class targets.
box_targets: groundtrusth box targets.
num_positives: num positive grountruth anchors
Returns:
total_loss: an integer tensor representing total loss reducing from class and box losses from all levels.
cls_loss: an integer tensor representing total class loss.
box_loss: an integer tensor representing total box regression loss.
"""
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = num_positives.sum() + 1.0
levels = len(cls_outputs)
cls_losses = []
box_losses = []
for l in range(levels):
cls_key = 'cls_targets_%d' % (l+1)
bbox_key = 'box_targets_%d' % (l+1)
cls_targets_at_level = targets[cls_key].cuda()
box_targets_at_level = targets[bbox_key].cuda()
if self.fused_focal_loss:
cls_output = cls_outputs[l].permute(0, 2, 3, 1).contiguous()
bs, height, width, anchor = cls_targets_at_level.shape
cls_output = cls_output.view(bs, height, width, anchor, -1)
cls_loss = opt_focal_loss(cls_output,
cls_targets_at_level,
num_positives_sum,
self.num_classes, self.alpha, self.gamma, self.label_smoothing)
cls_losses.append(cls_loss)
else:
# Onehot encoding for classification labels.
cls_targets_at_level_oh = one_hot(cls_targets_at_level, self.num_classes)
bs, height, width, _, _ = cls_targets_at_level_oh.shape
cls_targets_at_level_oh = cls_targets_at_level_oh.view(bs, height, width, -1)
cls_loss = _classification_loss(
cls_outputs[l].permute(0, 2, 3, 1),
cls_targets_at_level_oh,
num_positives_sum,
alpha=self.alpha, gamma=self.gamma, label_smoothing=self.label_smoothing)
cls_loss = cls_loss.view(bs, height, width, -1, self.num_classes)
cls_loss *= (cls_targets_at_level != -2).unsqueeze(-1).float()
cls_losses.append(cls_loss.sum())
box_losses.append(_box_loss(
box_outputs[l].permute(0, 2, 3, 1),
box_targets_at_level,
num_positives_sum,
delta=self.delta))
# Sum per level losses to total loss.
cls_loss = torch.sum(torch.stack(cls_losses, dim=-1), dim=-1)
box_loss = torch.sum(torch.stack(box_losses, dim=-1), dim=-1)
total_loss = cls_loss + self.box_loss_weight * box_loss
return total_loss, cls_loss, box_loss
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/loss.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def all_gather_container(container, group=None, cat_dim=0):
group = group or dist.group.WORLD
world_size = dist.get_world_size(group)
def _do_gather(tensor):
tensor_list = [torch.empty_like(tensor) for _ in range(world_size)]
dist.all_gather(tensor_list, tensor, group=group)
return torch.cat(tensor_list, dim=cat_dim)
if isinstance(container, dict):
gathered = dict()
for k, v in container.items():
v = _do_gather(v)
gathered[k] = v
return gathered
elif isinstance(container, (list, tuple)):
gathered = [_do_gather(v) for v in container]
if isinstance(container, tuple):
gathered = tuple(gathered)
return gathered
else:
# if not a dict, list, tuple, expect a singular tensor
assert isinstance(container, torch.Tensor)
return _do_gather(container)
def gather_container(container, dst, group=None, cat_dim=0):
group = group or dist.group.WORLD
world_size = dist.get_world_size(group)
this_rank = dist.get_rank(group)
def _do_gather(tensor):
if this_rank == dst:
tensor_list = [torch.empty_like(tensor) for _ in range(world_size)]
else:
tensor_list = None
dist.gather(tensor, tensor_list, dst=dst, group=group)
return torch.cat(tensor_list, dim=cat_dim)
if isinstance(container, dict):
gathered = dict()
for k, v in container.items():
v = _do_gather(v)
gathered[k] = v
return gathered
elif isinstance(container, (list, tuple)):
gathered = [_do_gather(v) for v in container]
if isinstance(container, tuple):
gathered = tuple(gathered)
return gathered
else:
# if not a dict, list, tuple, expect a singular tensor
assert isinstance(container, torch.Tensor)
return _do_gather(container) | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/distributed.py |
""" RetinaNet / EfficientDet Anchor Gen
Adapted for PyTorch from Tensorflow impl at
https://github.com/google/automl/blob/6f6694cec1a48cdb33d5d1551a2d5db8ad227798/efficientdet/anchors.py
Hacked together by Ross Wightman, original copyright below
"""
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Anchor definition.
This module is borrowed from TPU RetinaNet implementation:
https://github.com/tensorflow/tpu/blob/master/models/official/retinanet/anchors.py
"""
import collections
import numpy as np
import torch
import torch.nn as nn
from torchvision.ops.boxes import remove_small_boxes, batched_nms
from effdet.object_detection import ArgMaxMatcher, FasterRcnnBoxCoder, BoxList, IouSimilarity, TargetAssigner
from .layers.nms_layer import batched_soft_nms
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
# The maximum number of detections per image.
MAX_DETECTIONS_PER_IMAGE = 100
def decode_box_outputs(rel_codes, anchors, output_xyxy: bool=False):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input image.
Args:
rel_codes: box regression targets.
anchors: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
ycenter_a = (anchors[:, 0] + anchors[:, 2]) / 2
xcenter_a = (anchors[:, 1] + anchors[:, 3]) / 2
ha = anchors[:, 2] - anchors[:, 0]
wa = anchors[:, 3] - anchors[:, 1]
ty, tx, th, tw = rel_codes.unbind(dim=1)
w = torch.exp(tw) * wa
h = torch.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
if output_xyxy:
out = torch.stack([xmin, ymin, xmax, ymax], dim=1)
else:
out = torch.stack([ymin, xmin, ymax, xmax], dim=1)
return out
def _generate_anchor_configs(min_level, max_level, num_scales, aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added on each level.
For instances, num_scales=2 adds two additional anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added on each level.
For instances, aspect_ratios = [(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append((2 ** level, scale_octave / float(num_scales), aspect))
return anchor_configs
def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):
"""Generates multiscale anchor boxes.
Args:
image_size: integer number of input image size. The input image has the same dimension for
width and height. The image_size should be divided by the largest feature stride 2^max_level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
boxes_all = []
for _, configs in anchor_configs.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect = config
if image_size % stride != 0:
raise ValueError("input size must be divided by the stride.")
base_anchor_size = anchor_scale * stride * 2 ** octave_scale
anchor_size_x_2 = base_anchor_size * aspect[0] / 2.0
anchor_size_y_2 = base_anchor_size * aspect[1] / 2.0
x = np.arange(stride / 2, image_size, stride)
y = np.arange(stride / 2, image_size, stride)
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
return anchor_boxes
def clip_boxes_xyxy(boxes: torch.Tensor, size: torch.Tensor):
boxes = boxes.clamp(min=0)
size = torch.cat([size, size], dim=0)
boxes = boxes.min(size)
return boxes
def generate_detections(
cls_outputs, box_outputs, anchor_boxes, indices, classes, img_scale, img_size,
max_det_per_image: int = MAX_DETECTIONS_PER_IMAGE, soft_nms: bool = False):
"""Generates detections with RetinaNet model outputs and anchors.
Args:
cls_outputs: a torch tensor with shape [N, 1], which has the highest class
scores on all feature levels. The N is the number of selected
top-K total anchors on all levels. (k being MAX_DETECTION_POINTS)
box_outputs: a torch tensor with shape [N, 4], which stacks box regression
outputs on all feature levels. The N is the number of selected top-k
total anchors on all levels. (k being MAX_DETECTION_POINTS)
anchor_boxes: a torch tensor with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of selected top-k total anchors on all levels.
indices: a torch tensor with shape [N], which is the indices from top-k selection.
classes: a torch tensor with shape [N], which represents the class
prediction on all selected anchors from top-k selection.
img_scale: a float tensor representing the scale between original image
and input image for the detector. It is used to rescale detections for
evaluating with the original groundtruth annotations.
max_det_per_image: an int constant, added as argument to make torchscript happy
Returns:
detections: detection results in a tensor with shape [MAX_DETECTION_POINTS, 6],
each row representing [x, y, width, height, score, class]
"""
anchor_boxes = anchor_boxes[indices, :]
# apply bounding box regression to anchors
boxes = decode_box_outputs(box_outputs.float(), anchor_boxes, output_xyxy=True)
boxes = clip_boxes_xyxy(boxes, img_size / img_scale) # clip before NMS better?
scores = cls_outputs.sigmoid().squeeze(1).float()
if soft_nms:
top_detection_idx, soft_scores = batched_soft_nms(
boxes, scores, classes, method_gaussian=True, iou_threshold=0.3, score_threshold=.001)
scores[top_detection_idx] = soft_scores
else:
top_detection_idx = batched_nms(boxes, scores, classes, iou_threshold=0.5)
# keep only topk scoring predictions
top_detection_idx = top_detection_idx[:max_det_per_image]
boxes = boxes[top_detection_idx]
scores = scores[top_detection_idx, None]
classes = classes[top_detection_idx, None]
# xyxy to xywh & rescale to original image
boxes[:, 2] -= boxes[:, 0]
boxes[:, 3] -= boxes[:, 1]
boxes *= img_scale
classes += 1 # back to class idx with background class = 0
# stack em and pad out to MAX_DETECTIONS_PER_IMAGE if necessary
detections = torch.cat([boxes, scores, classes.float()], dim=1)
if len(top_detection_idx) < max_det_per_image:
detections = torch.cat([
detections,
torch.zeros(
(max_det_per_image - len(top_detection_idx), 6), device=detections.device, dtype=detections.dtype)
], dim=0)
return detections
class Anchors(nn.Module):
"""RetinaNet Anchors class."""
def __init__(self, min_level, max_level, num_scales, aspect_ratios, anchor_scale, image_size):
"""Constructs multiscale RetinaNet anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: integer number of input image size. The input image has the
same dimension for width and height. The image_size should be divided by
the largest feature stride 2^max_level.
"""
super(Anchors, self).__init__()
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.image_size = image_size
self.config = self._generate_configs()
self.register_buffer('boxes', self._generate_boxes())
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
return _generate_anchor_configs(self.min_level, self.max_level, self.num_scales, self.aspect_ratios)
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale, self.config)
boxes = torch.from_numpy(boxes).float()
return boxes
def get_anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
#@torch.jit.script
class AnchorLabeler(object):
"""Labeler for multiscale anchor boxes.
"""
def __init__(self, anchors, num_classes: int, match_threshold: float = 0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: float number between 0 and 1 representing the threshold
to assign positive labels for anchors.
"""
similarity_calc = IouSimilarity()
matcher = ArgMaxMatcher(
match_threshold,
unmatched_threshold=match_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = FasterRcnnBoxCoder()
self.target_assigner = TargetAssigner(similarity_calc, matcher, box_coder)
self.anchors = anchors
self.match_threshold = match_threshold
self.num_classes = num_classes
self.feat_size = {}
for level in range(self.anchors.min_level, self.anchors.max_level + 1):
self.feat_size[level] = int(self.anchors.image_size / 2 ** level)
self.indices_cache = {}
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth classes.
Returns:
cls_targets_dict: ordered dictionary with keys [min_level, min_level+1, ..., max_level].
The values are tensor with shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys [min_level, min_level+1, ..., max_level].
The values are tensor with shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
cls_targets_out = dict()
box_targets_out = dict()
gt_box_list = BoxList(gt_boxes)
anchor_box_list = BoxList(self.anchors.boxes)
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self.target_assigner.assign(anchor_box_list, gt_box_list, gt_labels)
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = cls_targets.long()
# Unpack labels.
"""Unpacks an array of cls/box into multiple scales."""
count = 0
for level in range(self.anchors.min_level, self.anchors.max_level + 1):
feat_size = self.feat_size[level]
steps = feat_size ** 2 * self.anchors.get_anchors_per_location()
indices = torch.arange(count, count + steps, device=cls_targets.device)
count += steps
cls_key = 'cls_targets_%d' % (level - self.anchors.min_level + 1)
bbox_key = 'box_targets_%d' % (level - self.anchors.min_level + 1)
cls_targets_out[cls_key] = torch.index_select(cls_targets, 0, indices).view([feat_size, feat_size, -1])
box_targets_out[bbox_key] = torch.index_select(box_targets, 0, indices).view([feat_size, feat_size, -1])
num_positives = (matches.match_results != -1).float().sum()
return cls_targets_out, box_targets_out, num_positives
def _build_indices(self, device):
anchors_per_loc = self.anchors.get_anchors_per_location()
indices_dict = {}
count = 0
for level in range(self.anchors.min_level, self.anchors.max_level + 1):
feat_size = self.feat_size[level]
steps = feat_size ** 2 * anchors_per_loc
indices = torch.arange(count, count + steps, device=device)
indices_dict[level] = indices
count += steps
return indices_dict
def _get_indices(self, device, level):
if device not in self.indices_cache:
self.indices_cache[device] = self._build_indices(device)
return self.indices_cache[device][level]
def batch_label_anchors(self, batch_size: int, gt_boxes, gt_classes):
num_levels = self.anchors.max_level - self.anchors.min_level + 1
cls_targets_out = [[] for _ in range(num_levels)]
box_targets_out = [[] for _ in range(num_levels)]
num_positives_out = []
# FIXME this may be a bottleneck, would be faster if batched, or should be done in loader/dataset?
anchor_box_list = BoxList(self.anchors.boxes)
for i in range(batch_size):
last_sample = i == batch_size - 1
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self.target_assigner.assign(
anchor_box_list, BoxList(gt_boxes[i]), gt_classes[i])
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = cls_targets.long()
# Unpack labels.
"""Unpacks an array of cls/box into multiple scales."""
for level in range(self.anchors.min_level, self.anchors.max_level + 1):
level_index = level - self.anchors.min_level
feat_size = self.feat_size[level]
indices = self._get_indices(cls_targets.device, level)
cls_targets_out[level_index].append(
torch.index_select(cls_targets, 0, indices).view([feat_size, feat_size, -1]))
box_targets_out[level_index].append(
torch.index_select(box_targets, 0, indices).view([feat_size, feat_size, -1]))
if last_sample:
cls_targets_out[level_index] = torch.stack(cls_targets_out[level_index])
box_targets_out[level_index] = torch.stack(box_targets_out[level_index])
num_positives_out.append((matches.match_results != -1).float().sum())
if last_sample:
num_positives_out = torch.stack(num_positives_out)
return cls_targets_out, box_targets_out, num_positives_out
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/anchors.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
import abc
import json
from .distributed import synchronize, is_main_process, all_gather_container
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import numpy as np
import itertools
def create_small_table(small_dict):
"""
Create a small table using the keys of small_dict as headers. This is only
suitable for small dictionaries.
Args:
small_dict (dict): a result dictionary of only a few items.
Returns:
str: the table as a string.
"""
keys, values = tuple(zip(*small_dict.items()))
table = tabulate(
[values],
headers=keys,
tablefmt="pipe",
floatfmt=".3f",
stralign="center",
numalign="center",
)
return table
class Evaluator:
def __init__(self):
pass
@abc.abstractmethod
def add_predictions(self, output, target):
pass
@abc.abstractmethod
def evaluate(self):
pass
class COCOEvaluator(Evaluator):
def __init__(self, coco_api, distributed=False, waymo=False):
super().__init__()
self.coco_api = coco_api
self.distributed = distributed
self.distributed_device = None
self.img_ids = []
self.predictions = []
self.waymo = waymo
def reset(self):
self.img_ids = []
self.predictions = []
def add_predictions(self, detections, target):
if self.distributed:
if self.distributed_device is None:
# cache for use later to broadcast end metric
self.distributed_device = detections.device
synchronize()
detections = all_gather_container(detections)
#target = all_gather_container(target)
sample_ids = all_gather_container(target['img_id'])
if not is_main_process():
return
else:
sample_ids = target['img_id']
detections = detections.cpu()
sample_ids = sample_ids.cpu()
for index, sample in enumerate(detections):
image_id = int(sample_ids[index])
for det in sample:
score = float(det[4])
if score < .001: # stop when below this threshold, scores in descending order
break
coco_det = dict(
image_id=image_id,
bbox=det[0:4].tolist(),
score=score,
category_id=int(det[5]))
self.img_ids.append(image_id)
self.predictions.append(coco_det)
def evaluate(self):
if not self.distributed or dist.get_rank() == 0:
assert len(self.predictions)
json.dump(self.predictions, open('./temp.json', 'w'), indent=4)
results = self.coco_api.loadRes('./temp.json')
coco_eval = COCOeval(self.coco_api, results, 'bbox')
coco_eval.params.imgIds = self.img_ids # score only ids we've used
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
metric = coco_eval.stats[0] # mAP 0.5-0.95
if self.waymo:
results = self._derive_coco_results(coco_eval, iou_type="bbox", class_names=['Vehicle', 'Pedestrian', 'Cyclist'])
if self.distributed:
dist.broadcast(torch.tensor(metric, device=self.distributed_device), 0)
else:
metric = torch.tensor(0, device=self.distributed_device)
dist.broadcast(metric, 0)
metric = metric.item()
self.reset()
return metric
def save_predictions(self, file_path):
if not self.distributed or dist.get_rank() == 0:
assert len(self.predictions)
json.dump(self.predictions, open(file_path, 'w'), indent=4)
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = {
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
"keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
}[iou_type]
if coco_eval is None:
print("No predictions from the model!")
return {metric: float("nan") for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
for idx, metric in enumerate(metrics)
}
print(
"Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
)
if not np.isfinite(sum(results.values())):
print("Note that some metrics cannot be computed.")
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
print("Per-category {} AP: \n".format(iou_type) + table)
results.update({"AP-" + name: ap for name, ap in results_per_category})
# get index for threshold closest to coco api iouThrs
def _get_thr_ind(coco_eval, thr):
ind = np.where((coco_eval.params.iouThrs > thr - 1e-5) &
(coco_eval.params.iouThrs < thr + 1e-5))[0][0]
iou_thr = coco_eval.params.iouThrs[ind]
assert np.isclose(iou_thr, thr)
return ind
# Per category waymo eval
waymo_results_per_category = []
# For waymo evaluation, we find AP at specific IoUs for each object
# Vehicle @ IoU 0.7, Pedestrian/Cyclist @ IoU 0.5
# IoU thresholds defined in coco api:
# iouThrs = np.array([0.5 , 0.55, 0.6 , 0.65, 0.7 , 0.75, 0.8 , 0.85, 0.9 , 0.95])
thresholds = [.7, .5, .5]
threshold_ids = [_get_thr_ind(coco_eval, thr) for thr in thresholds]
mean_precision = np.array([])
for idx, name in enumerate(class_names):
# get precision at specific iouThr
precision = precisions[threshold_ids[idx], :, idx, 0, -1]
# precision for a specific category and specific iou threshold
precision = precision[precision > -1]
mean_precision = np.append(mean_precision, precision)
ap = np.mean(precision) if precision.size else float("nan")
waymo_results_per_category.append(("{}".format(name), float(ap * 100)))
# compute mAP (Waymo evaluation format
# AP (all categories)
# L2 (easy + hard detections)
# ALL_NS (all categories except stop signs))
ap = np.mean(mean_precision) if mean_precision.size else float("nan")
waymo_results_per_category = [("L2_ALL_NS", float(ap * 100))] + waymo_results_per_category
# tabulate waymo evaluation results
results_flatten = list(itertools.chain(*waymo_results_per_category))
results_2d = itertools.zip_longest(*[results_flatten[i::len(results_flatten)] for i in range(len(results_flatten))])
headers = [("category", "mAP")] + \
[("category", "AP @ IoU {}".format(coco_eval.params.iouThrs[threshold_ids[i]]))
for i in range(len(threshold_ids))]
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=list(itertools.chain(*headers)),
numalign="left",
)
print("Waymo Evaluation: {} AP: \n".format(iou_type) + table)
results.update({"WaymoAP" + name: ap for name, ap in waymo_results_per_category})
return results
class FastMapEvalluator(Evaluator):
def __init__(self, distributed=False):
super().__init__()
self.distributed = distributed
self.predictions = []
def add_predictions(self, output, target):
pass
def evaluate(self):
pass | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/evaluator.py |
""" Conv2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple, Optional
from .padding import pad_same, get_padding_value
def conv2d_same(
x, weight: torch.Tensor, bias: Optional[torch.Tensor] = None, stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0), dilation: Tuple[int, int] = (1, 1), groups: int = 1):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(Conv2dSame, self).__init__(
in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
def forward(self, x):
return conv2d_same(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop('padding', '')
kwargs.setdefault('bias', False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
if is_dynamic:
return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
else:
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/conv2d_same.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
__all__ = [
'is_exportable', 'is_scriptable', 'is_no_jit',
'set_exportable', 'set_scriptable', 'set_no_jit', 'set_layer_config'
]
# Set to True if prefer to have layers with no jit optimization (includes activations)
_NO_JIT = False
# Set to True if prefer to have activation layers with no jit optimization
# NOTE not currently used as no difference between no_jit and no_activation jit as only layers obeying
# the jit flags so far are activations. This will change as more layers are updated and/or added.
_NO_ACTIVATION_JIT = False
# Set to True if exporting a model with Same padding via ONNX
_EXPORTABLE = False
# Set to True if wanting to use torch.jit.script on a model
_SCRIPTABLE = False
def is_no_jit():
return _NO_JIT
class set_no_jit:
def __init__(self, mode: bool) -> None:
global _NO_JIT
self.prev = _NO_JIT
_NO_JIT = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _NO_JIT
_NO_JIT = self.prev
return False
def is_exportable():
return _EXPORTABLE
class set_exportable:
def __init__(self, mode: bool) -> None:
global _EXPORTABLE
self.prev = _EXPORTABLE
_EXPORTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _EXPORTABLE
_EXPORTABLE = self.prev
return False
def is_scriptable():
return _SCRIPTABLE
class set_scriptable:
def __init__(self, mode: bool) -> None:
global _SCRIPTABLE
self.prev = _SCRIPTABLE
_SCRIPTABLE = mode
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
_SCRIPTABLE = self.prev
return False
class set_layer_config:
""" Layer config context manager that allows setting all layer config flags at once.
If a flag arg is None, it will not change the current value.
"""
def __init__(
self,
scriptable: Optional[bool] = None,
exportable: Optional[bool] = None,
no_jit: Optional[bool] = None,
no_activation_jit: Optional[bool] = None):
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
self.prev = _SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT
if scriptable is not None:
_SCRIPTABLE = scriptable
if exportable is not None:
_EXPORTABLE = exportable
if no_jit is not None:
_NO_JIT = no_jit
if no_activation_jit is not None:
_NO_ACTIVATION_JIT = no_activation_jit
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> bool:
global _SCRIPTABLE
global _EXPORTABLE
global _NO_JIT
global _NO_ACTIVATION_JIT
_SCRIPTABLE, _EXPORTABLE, _NO_JIT, _NO_ACTIVATION_JIT = self.prev
return False | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/config.py |
""" PyTorch Mixed Convolution
Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595)
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn as nn
from .conv2d_same import create_conv2d_pad
def _split_channels(num_chan, num_groups):
split = [num_chan // num_groups for _ in range(num_groups)]
split[0] += num_chan - sum(split)
return split
class MixedConv2d(nn.ModuleDict):
""" Mixed Grouped Convolution
Based on MDConv and GroupedConv in MixNet impl:
https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py
"""
def __init__(self, in_channels, out_channels, kernel_size=3,
stride=1, padding='', dilation=1, depthwise=False, **kwargs):
super(MixedConv2d, self).__init__()
kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size]
num_groups = len(kernel_size)
in_splits = _split_channels(in_channels, num_groups)
out_splits = _split_channels(out_channels, num_groups)
self.in_channels = sum(in_splits)
self.out_channels = sum(out_splits)
for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)):
conv_groups = out_ch if depthwise else 1
# use add_module to keep key space clean
self.add_module(
str(idx),
create_conv2d_pad(
in_ch, out_ch, k, stride=stride,
padding=padding, dilation=dilation, groups=conv_groups, **kwargs)
)
self.splits = in_splits
def forward(self, x):
x_split = torch.split(x, self.splits, 1)
x_out = [c(x_split[i]) for i, c in enumerate(self.values())]
x = torch.cat(x_out, 1)
return x | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/mixed_conv2d.py |
""" Activation Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .activations import *
from .activations_jit import *
from .activations_me import *
from .config import is_exportable, is_scriptable, is_no_jit
_ACT_FN_DEFAULT = dict(
swish=swish,
mish=mish,
relu=F.relu,
relu6=F.relu6,
leaky_relu=F.leaky_relu,
elu=F.elu,
prelu=F.prelu,
celu=F.celu,
selu=F.selu,
gelu=F.gelu,
sigmoid=sigmoid,
tanh=tanh,
hard_sigmoid=hard_sigmoid,
hard_swish=hard_swish,
hard_mish=hard_mish,
)
_ACT_FN_JIT = dict(
swish=swish_jit,
mish=mish_jit,
hard_sigmoid=hard_sigmoid_jit,
hard_swish=hard_swish_jit,
hard_mish=hard_mish_jit
)
_ACT_FN_ME = dict(
swish=swish_me,
mish=mish_me,
hard_sigmoid=hard_sigmoid_me,
hard_swish=hard_swish_me,
hard_mish=hard_mish_me,
)
_ACT_LAYER_DEFAULT = dict(
swish=Swish,
mish=Mish,
relu=nn.ReLU,
relu6=nn.ReLU6,
elu=nn.ELU,
prelu=nn.PReLU,
celu=nn.CELU,
selu=nn.SELU,
gelu=nn.GELU,
sigmoid=Sigmoid,
tanh=Tanh,
hard_sigmoid=HardSigmoid,
hard_swish=HardSwish,
hard_mish=HardMish,
)
_ACT_LAYER_JIT = dict(
swish=SwishJit,
mish=MishJit,
hard_sigmoid=HardSigmoidJit,
hard_swish=HardSwishJit,
hard_mish=HardMishJit
)
_ACT_LAYER_ME = dict(
swish=SwishMe,
mish=MishMe,
hard_sigmoid=HardSigmoidMe,
hard_swish=HardSwishMe,
hard_mish=HardMishMe,
)
def get_act_fn(name='relu'):
""" Activation Function Factory
Fetching activation fns by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if not (is_no_jit() or is_exportable() or is_scriptable()):
# If not exporting or scripting the model, first look for a memory-efficient version with
# custom autograd, then fallback
if name in _ACT_FN_ME:
return _ACT_FN_ME[name]
if not is_no_jit():
if name in _ACT_FN_JIT:
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name]
def get_act_layer(name='relu'):
""" Activation Layer Factory
Fetching activation layers by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if not (is_no_jit() or is_exportable() or is_scriptable()):
if name in _ACT_LAYER_ME:
return _ACT_LAYER_ME[name]
if not is_no_jit():
if name in _ACT_LAYER_JIT:
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]
def create_act_layer(name, inplace=False, **kwargs):
act_layer = get_act_layer(name)
if act_layer is not None:
return act_layer(inplace=inplace, **kwargs)
else:
return None | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/create_act.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn as nn
from torch.nn import functional as F
@torch.jit.script
def swish_jit_fwd(x):
return x.mul(torch.sigmoid(x))
@torch.jit.script
def swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
class SwishJitAutoFn(torch.autograd.Function):
""" torch.jit.script optimised Swish w/ memory-efficient checkpoint
Inspired by conversation btw Jeremy Howard & Adam Pazske
https://twitter.com/jeremyphoward/status/1188251041835315200
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return swish_jit_bwd(x, grad_output)
def swish_me(x, inplace=False):
return SwishJitAutoFn.apply(x)
class SwishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(SwishMe, self).__init__()
def forward(self, x):
return SwishJitAutoFn.apply(x)
@torch.jit.script
def mish_jit_fwd(x):
return x.mul(torch.tanh(F.softplus(x)))
@torch.jit.script
def mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishJitAutoFn(torch.autograd.Function):
""" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
A memory efficient, jit scripted variant of Mish
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return mish_jit_bwd(x, grad_output)
def mish_me(x, inplace=False):
return MishJitAutoFn.apply(x)
class MishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(MishMe, self).__init__()
def forward(self, x):
return MishJitAutoFn.apply(x)
@torch.jit.script
def hard_sigmoid_jit_fwd(x, inplace: bool = False):
return (x + 3).clamp(min=0, max=6).div(6.)
@torch.jit.script
def hard_sigmoid_jit_bwd(x, grad_output):
m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6.
return grad_output * m
class HardSigmoidJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_sigmoid_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_sigmoid_jit_bwd(x, grad_output)
def hard_sigmoid_me(x, inplace: bool = False):
return HardSigmoidJitAutoFn.apply(x)
class HardSigmoidMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoidMe, self).__init__()
def forward(self, x):
return HardSigmoidJitAutoFn.apply(x)
@torch.jit.script
def hard_swish_jit_fwd(x):
return x * (x + 3).clamp(min=0, max=6).div(6.)
@torch.jit.script
def hard_swish_jit_bwd(x, grad_output):
m = torch.ones_like(x) * (x >= 3.)
m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m)
return grad_output * m
class HardSwishJitAutoFn(torch.autograd.Function):
"""A memory efficient, jit-scripted HardSwish activation"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_swish_jit_bwd(x, grad_output)
def hard_swish_me(x, inplace=False):
return HardSwishJitAutoFn.apply(x)
class HardSwishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwishMe, self).__init__()
def forward(self, x):
return HardSwishJitAutoFn.apply(x)
@torch.jit.script
def hard_mish_jit_fwd(x):
return 0.5 * x * (x + 2).clamp(min=0, max=2)
@torch.jit.script
def hard_mish_jit_bwd(x, grad_output):
m = torch.ones_like(x) * (x >= -2.)
m = torch.where((x >= -2.) & (x <= 0.), x + 1., m)
return grad_output * m
class HardMishJitAutoFn(torch.autograd.Function):
""" A memory efficient, jit scripted variant of Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_mish_jit_bwd(x, grad_output)
def hard_mish_me(x, inplace: bool = False):
return HardMishJitAutoFn.apply(x)
class HardMishMe(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMishMe, self).__init__()
def forward(self, x):
return HardMishJitAutoFn.apply(x) | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/activations_me.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn as nn
from torch.nn import functional as F
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
NOTE: I don't have a working inplace variant
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
def forward(self, x):
return mish(x)
def sigmoid(x, inplace: bool = False):
return x.sigmoid_() if inplace else x.sigmoid()
# PyTorch has this, but not with a consistent inplace argmument interface
class Sigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(Sigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.sigmoid_() if self.inplace else x.sigmoid()
def tanh(x, inplace: bool = False):
return x.tanh_() if inplace else x.tanh()
# PyTorch has this, but not with a consistent inplace argmument interface
class Tanh(nn.Module):
def __init__(self, inplace: bool = False):
super(Tanh, self).__init__()
self.inplace = inplace
def forward(self, x):
return x.tanh_() if self.inplace else x.tanh()
def hard_swish(x, inplace: bool = False):
inner = F.relu6(x + 3.).div_(6.)
return x.mul_(inner) if inplace else x.mul(inner)
class HardSwish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_swish(x, self.inplace)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
def hard_mish(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
if inplace:
return x.mul_(0.5 * (x + 2).clamp(min=0, max=2))
else:
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMish(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMish, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_mish(x, self.inplace) | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/activations.py |
""" Create Conv2d Factory Method
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .mixed_conv2d import MixedConv2d
from .cond_conv2d import CondConv2d
from .conv2d_same import create_conv2d_pad
def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
""" Select a 2d convolution implementation based on arguments
Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
Used extensively by EfficientNet, MobileNetv3 and related networks.
"""
if isinstance(kernel_size, list):
assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
assert 'groups' not in kwargs # MixedConv groups are defined by kernel list
# We're going to use only lists for defining the MixedConv2d kernel groups,
# ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)
else:
depthwise = kwargs.pop('depthwise', False)
groups = out_channels if depthwise else kwargs.pop('groups', 1)
if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
else:
m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
return m | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/create_conv2d.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .activations import *
from .cond_conv2d import CondConv2d, get_condconv_initializer
from .config import is_exportable, is_scriptable, is_no_jit, set_exportable, set_scriptable, set_no_jit,\
set_layer_config
from .conv2d_same import Conv2dSame
from .create_act import create_act_layer, get_act_layer, get_act_fn
from .create_conv2d import create_conv2d
from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path
from .mixed_conv2d import MixedConv2d
from .padding import get_padding
from .pool2d_same import AvgPool2dSame, create_pool2d
from .nms_layer import batched_soft_nms, batched_nms | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/__init__.py |
""" AvgPool2d w/ Same Padding
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Tuple, Optional
from .helpers import tup_pair
from .padding import pad_same, get_padding_value
def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
ceil_mode: bool = False, count_include_pad: bool = True):
# FIXME how to deal with count_include_pad vs not for external padding?
x = pad_same(x, kernel_size, stride)
return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
class AvgPool2dSame(nn.AvgPool2d):
""" Tensorflow like 'SAME' wrapper for 2D average pooling
"""
def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
kernel_size = tup_pair(kernel_size)
stride = tup_pair(stride)
super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
def forward(self, x):
return avg_pool2d_same(
x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad)
def max_pool2d_same(
x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
dilation: List[int] = (1, 1), ceil_mode: bool = False):
x = pad_same(x, kernel_size, stride, value=-float('inf'))
return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode)
class MaxPool2dSame(nn.MaxPool2d):
""" Tensorflow like 'SAME' wrapper for 2D max pooling
"""
def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False, count_include_pad=True):
kernel_size = tup_pair(kernel_size)
stride = tup_pair(stride)
dilation = tup_pair(dilation)
super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode, count_include_pad)
def forward(self, x):
return max_pool2d_same(x, self.kernel_size, self.stride, self.padding, self.dilation, self.ceil_mode)
def create_pool2d(pool_type, kernel_size, stride=None, **kwargs):
stride = stride or kernel_size
padding = kwargs.pop('padding', '')
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs)
if is_dynamic:
if pool_type == 'avg':
return AvgPool2dSame(kernel_size, stride=stride, **kwargs)
elif pool_type == 'max':
return MaxPool2dSame(kernel_size, stride=stride, **kwargs)
else:
assert False, f'Unsupported pool type {pool_type}'
else:
if pool_type == 'avg':
return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
elif pool_type == 'max':
return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
else:
assert False, f'Unsupported pool type {pool_type}' | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/pool2d_same.py |
""" PyTorch Soft-NMS
This code was adapted from a PR for detectron2 submitted by https://github.com/alekseynp
https://github.com/facebookresearch/detectron2/pull/1183/files
Detectron2 is licensed Apache 2.0, Copyright Facebook Inc.
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import List
import time
import effdet_ext._C as _C
def pairwise_iou(boxes1, boxes2) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1[:, 4] # [N,]
area2 = boxes2[:, 4] # [M,]
width_height = torch.min(boxes1[:, None, 2:4], boxes2[:, 2:4]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
inter = width_height.prod(dim=2) # [N,M]
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def soft_nms(
boxes,
scores,
method_gaussian: bool = True,
sigma: float = 0.5,
iou_threshold: float = .5,
score_threshold: float = 0.005
):
"""
Soft non-max suppression algorithm.
Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec]
(https://arxiv.org/abs/1704.04503)
Args:
boxes_remain (Tensor[N, ?]):
boxes where NMS will be performed
if Boxes, in (x1, y1, x2, y2) format
if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format
scores_remain (Tensor[N]):
scores for each one of the boxes
method_gaussian (bool): use gaussian method if True, otherwise linear
sigma (float):
parameter for Gaussian penalty function
iou_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
score_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
# st = time.perf_counter()
device = boxes.device
boxes_remain = boxes.clone()
scores_remain = scores.clone()
num_elem = scores_remain.size()[0]
idxs = torch.arange(num_elem)
idxs_out = torch.zeros(num_elem, dtype=torch.int64, device=device)
scores_out = torch.zeros(num_elem, dtype=torch.float32, device=device)
area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
boxes_remain = torch.cat((boxes_remain, area.unsqueeze(1)), dim=1) # [N, 5] BS, x1, y1, x2, y2, area
count: int = 0
# print("[SOFTMAX] before loop starts in softnms {}".format(time.perf_counter() - st))
while scores_remain.numel() > 0:
# st1 = time.perf_counter()
top_idx = 0 # torch.argmax(scores_remain)
idxs_out[count] = idxs[top_idx]
scores_out[count] = scores_remain[top_idx]
count += 1
top_box = boxes_remain[top_idx]
ious = pairwise_iou(top_box.unsqueeze(0), boxes_remain)[0]
# st2 = time.perf_counter()
# print("[SOFTMAX] Before gaussian in softnms {}".format(st2 - st1))
if method_gaussian:
decay = torch.exp(-torch.pow(ious, 2) / sigma)
else:
decay = torch.ones_like(ious)
decay_mask = ious > iou_threshold
decay[decay_mask] = 1 - ious[decay_mask]
# st3 = time.perf_counter()
# print("[SOFTMAX] Gaussian in softnms {}".format(st3 - st2))
scores_remain *= decay
keep = scores_remain > score_threshold
keep[top_idx] = torch.tensor(False, device=device)
boxes_remain = boxes_remain[keep]
scores_remain = scores_remain[keep]
idxs = idxs[keep]
# st4 = time.perf_counter()
# print("[SOFTMAX] Remaining in softnms {}".format(st4 - st3))
# print("[SOFTMAX] Entire loop takes in softnms {}".format(st4 - st1))
# st5 = time.perf_counter()
# print("[SOFTMAX] Remaining in softnms {}".format(st5 - st))
return idxs_out[:count], scores_out[:count]
def batched_nms(
boxes, scores, idxs,
iou_threshold: float = .5,):
if boxes.numel() == 0:
return (
torch.empty((0,), dtype=torch.int64, device=boxes.device),
torch.empty((0,), dtype=torch.float32, device=scores.device),
)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
return _C.nms(
boxes_for_nms, scores, iou_threshold
)
def batched_soft_nms(
boxes, scores, idxs,
method_gaussian: bool = True,
sigma: float = 0.5,
iou_threshold: float = .5,
score_threshold: float = 0.001):
"""
Performs soft non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 4]):
boxes where NMS will be performed. They
are expected to be in (x1, y1, x2, y2) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
sigma (float):
parameter for Gaussian penalty function
iou_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
score_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
if boxes.numel() == 0:
return (
torch.empty((0,), dtype=torch.int64, device=boxes.device),
torch.empty((0,), dtype=torch.float32, device=scores.device),
)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
return soft_nms(
boxes_for_nms, scores, method_gaussian=method_gaussian, sigma=sigma,
iou_threshold=iou_threshold, score_threshold=score_threshold
)
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/nms_layer.py |
""" Layer/Module Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
from itertools import repeat
if TORCH_MAJOR == 1 and TORCH_MINOR < 8:
from torch._six import container_abcs
else:
import collections.abc as container_abcs
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
tup_single = _ntuple(1)
tup_pair = _ntuple(2)
tup_triple = _ntuple(3)
tup_quadruple = _ntuple(4)
ntup = _ntuple | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/helpers.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn as nn
from torch.nn import functional as F
@torch.jit.script
def swish_jit(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul(x.sigmoid())
@torch.jit.script
def mish_jit(x, _inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
return x.mul(F.softplus(x).tanh())
class SwishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(SwishJit, self).__init__()
def forward(self, x):
return swish_jit(x)
class MishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(MishJit, self).__init__()
def forward(self, x):
return mish_jit(x)
@torch.jit.script
def hard_sigmoid_jit(x, inplace: bool = False):
# return F.relu6(x + 3.) / 6.
return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
class HardSigmoidJit(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoidJit, self).__init__()
def forward(self, x):
return hard_sigmoid_jit(x)
@torch.jit.script
def hard_swish_jit(x, inplace: bool = False):
# return x * (F.relu6(x + 3.) / 6)
return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
class HardSwishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSwishJit, self).__init__()
def forward(self, x):
return hard_swish_jit(x)
@torch.jit.script
def hard_mish_jit(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMishJit(nn.Module):
def __init__(self, inplace: bool = False):
super(HardMishJit, self).__init__()
def forward(self, x):
return hard_mish_jit(x)
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/activations_jit.py |
""" Padding Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Tuple
import torch.nn.functional as F
# Calculate symmetric padding for a convolution
def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int:
padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
return padding
# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
def get_same_padding(x: int, k: int, s: int, d: int):
return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0)
# Can SAME padding for given args be done statically?
def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
# Dynamically pad input x with 'SAME' padding for conv with specified args
def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0):
ih, iw = x.size()[-2:]
pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1])
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value)
return x
def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
dynamic = False
if isinstance(padding, str):
# for any string padding, the padding will be calculated for you, one of three ways
padding = padding.lower()
if padding == 'same':
# TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
if is_static_pad(kernel_size, **kwargs):
# static case, no extra overhead
padding = get_padding(kernel_size, **kwargs)
else:
# dynamic 'SAME' padding, has runtime/GPU memory overhead
padding = 0
dynamic = True
elif padding == 'valid':
# 'VALID' padding, same as padding=0
padding = 0
else:
# Default to PyTorch style 'same'-ish symmetric padding
padding = get_padding(kernel_size, **kwargs)
return padding, dynamic | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/padding.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from functools import partial
import numpy as np
import torch
from torch import nn as nn
from torch.nn import functional as F
from .helpers import tup_pair
from .conv2d_same import conv2d_same
from .padding import get_padding_value
def get_condconv_initializer(initializer, num_experts, expert_shape):
def condconv_initializer(weight):
"""CondConv initializer function."""
num_params = np.prod(expert_shape)
if (len(weight.shape) != 2 or weight.shape[0] != num_experts or
weight.shape[1] != num_params):
raise (ValueError(
'CondConv variables must have shape [num_experts, num_params]'))
for i in range(num_experts):
initializer(weight[i].view(expert_shape))
return condconv_initializer
class CondConv2d(nn.Module):
""" Conditionally Parameterized Convolution
Inspired by: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/condconv/condconv_layers.py
Grouped convolution hackery for parallel execution of the per-sample kernel filters inspired by this discussion:
https://github.com/pytorch/pytorch/issues/17983
"""
__constants__ = ['in_channels', 'out_channels', 'dynamic_padding']
def __init__(self, in_channels, out_channels, kernel_size=3,
stride=1, padding='', dilation=1, groups=1, bias=False, num_experts=4):
super(CondConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = tup_pair(kernel_size)
self.stride = tup_pair(stride)
padding_val, is_padding_dynamic = get_padding_value(
padding, kernel_size, stride=stride, dilation=dilation)
self.dynamic_padding = is_padding_dynamic # if in forward to work with torchscript
self.padding = tup_pair(padding_val)
self.dilation = tup_pair(dilation)
self.groups = groups
self.num_experts = num_experts
self.weight_shape = (self.out_channels, self.in_channels // self.groups) + self.kernel_size
weight_num_param = 1
for wd in self.weight_shape:
weight_num_param *= wd
self.weight = torch.nn.Parameter(torch.Tensor(self.num_experts, weight_num_param))
if bias:
self.bias_shape = (self.out_channels,)
self.bias = torch.nn.Parameter(torch.Tensor(self.num_experts, self.out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init_weight = get_condconv_initializer(
partial(nn.init.kaiming_uniform_, a=math.sqrt(5)), self.num_experts, self.weight_shape)
init_weight(self.weight)
if self.bias is not None:
fan_in = np.prod(self.weight_shape[1:])
bound = 1 / math.sqrt(fan_in)
init_bias = get_condconv_initializer(
partial(nn.init.uniform_, a=-bound, b=bound), self.num_experts, self.bias_shape)
init_bias(self.bias)
def forward(self, x, routing_weights):
B, C, H, W = x.shape
weight = torch.matmul(routing_weights, self.weight)
new_weight_shape = (B * self.out_channels, self.in_channels // self.groups) + self.kernel_size
weight = weight.view(new_weight_shape)
bias = None
if self.bias is not None:
bias = torch.matmul(routing_weights, self.bias)
bias = bias.view(B * self.out_channels)
# move batch elements with channels so each batch element can be efficiently convolved with separate kernel
x = x.view(1, B * C, H, W)
if self.dynamic_padding:
out = conv2d_same(
x, weight, bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * B)
else:
out = F.conv2d(
x, weight, bias, stride=self.stride, padding=self.padding,
dilation=self.dilation, groups=self.groups * B)
out = out.permute([1, 0, 2, 3]).view(B, self.out_channels, out.shape[-2], out.shape[-1])
# Literal port (from TF definition)
# x = torch.split(x, 1, 0)
# weight = torch.split(weight, 1, 0)
# if self.bias is not None:
# bias = torch.matmul(routing_weights, self.bias)
# bias = torch.split(bias, 1, 0)
# else:
# bias = [None] * B
# out = []
# for xi, wi, bi in zip(x, weight, bias):
# wi = wi.view(*self.weight_shape)
# if bi is not None:
# bi = bi.view(*self.bias_shape)
# out.append(self.conv_fn(
# xi, wi, bi, stride=self.stride, padding=self.padding,
# dilation=self.dilation, groups=self.groups))
# out = torch.cat(out, 0)
return out | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/cond_conv2d.py |
""" DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by / Copyright 2020 Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
def drop_block_2d(
x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0,
with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
# Forces the block to be inside the feature map.
w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size,
stride=1,
padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
def drop_block_fast_2d(
x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,
gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
if batchwise:
# one mask for whole batch, quite a bit faster
block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma
else:
# mask per batch element
block_mask = torch.rand_like(x) < gamma
block_mask = F.max_pool2d(
block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(1. - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1. - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
"""
def __init__(self,
drop_prob=0.1,
block_size=7,
gamma_scale=1.0,
with_noise=False,
inplace=False,
batchwise=False,
fast=True):
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast # FIXME finish comparisons of fast vs not
def forward(self, x):
if not self.training or not self.drop_prob:
return x
if self.fast:
return drop_block_fast_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
else:
return drop_block_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
random_tensor = keep_prob + torch.rand((x.size()[0], 1, 1, 1), dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training) | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/layers/drop.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
def default_detection_model_configs():
"""Returns a default detection configs."""
h = OmegaConf.create()
# model name.
h.name = 'tf_efficientdet_d1'
h.backbone_name = 'tf_efficientnet_b1'
h.backbone_args = None # FIXME sort out kwargs vs config for backbone creation
# model specific, input preprocessing parameters
h.image_size = 640
# dataset specific head parameters
h.num_classes = 90
# feature + anchor config
h.min_level = 3
h.max_level = 7
h.num_levels = h.max_level - h.min_level + 1
h.num_scales = 3
h.aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]
h.anchor_scale = 4.0
# FPN and head config
h.pad_type = 'same' # original TF models require an equivalent of Tensorflow 'SAME' padding
h.act_type = 'swish'
h.box_class_repeats = 3
h.fpn_cell_repeats = 3
h.fpn_channels = 88
h.separable_conv = True
h.apply_bn_for_resampling = True
h.conv_after_downsample = False
h.conv_bn_relu_pattern = False
h.use_native_resize_op = False
h.pooling_type = None
h.redundant_bias = True # original TF models have back to back bias + BN layers, not necessary!
h.fpn_name = None
h.fpn_config = None
h.fpn_drop_path_rate = 0. # No stochastic depth in default.
# classification loss (used by train bench)
h.alpha = 0.25
h.gamma = 1.5
# localization loss (used by train bench)
h.delta = 0.1
h.box_loss_weight = 50.0
return h
backbone_config = {
"efficientnet_b0": {
"width_coeff": 1,
"depth_coeff": 1,
"resolution": 224,
"dropout": 0.2,
"checkpoint_path": "./jocbackbone_statedict.pth"
},
"efficientnet_b1": {
"width_coeff": 1,
"depth_coeff": 1.1,
"resolution": 240,
"dropout": 0.2,
"checkpoint_path": ""
},
"efficientnet_b2": {
"width_coeff": 1.1,
"depth_coeff": 1.2,
"resolution": 260,
"dropout": 0.3,
"checkpoint_path": ""
},
"efficientnet_b3": {
"width_coeff": 1.2,
"depth_coeff": 1.4,
"resolution": 300,
"dropout": 0.3,
"checkpoint_path": ""
},
"efficientnet_b4": {
"width_coeff": 1.4,
"depth_coeff": 1.8,
"resolution": 380,
"dropout": 0.4,
"checkpoint_path": "./jocbackbone_statedict_B4.pth"
},
"efficientnet_b5": {
"width_coeff": 1.6,
"depth_coeff": 2.2,
"resolution": 456,
"dropout": 0.4,
"checkpoint_path": ""
},
"efficientnet_b6": {
"width_coeff": 1.8,
"depth_coeff": 2.6,
"resolution": 528,
"dropout": 0.5,
"checkpoint_path": ""
},
"efficientnet_b7": {
"width_coeff": 2.0,
"depth_coeff": 3.1,
"resolution": 600,
"dropout": 0.5,
"checkpoint_path": ""
},
}
efficientdet_model_param_dict = dict(
# Models with PyTorch friendly padding and my PyTorch pretrained backbones, training TBD
efficientdet_d0=dict(
name='efficientdet_d0',
backbone_name='efficientnet_b0',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/efficientdet_d0-f3276ba8.pth',
),
efficientdet_d1=dict(
name='efficientdet_d1',
backbone_name='efficientnet_b1',
image_size=640,
fpn_channels=88,
fpn_cell_repeats=4,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/efficientdet_d1-bb7e98fe.pth',
),
efficientdet_d2=dict(
name='efficientdet_d2',
backbone_name='efficientnet_b2',
image_size=768,
fpn_channels=112,
fpn_cell_repeats=5,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.2),
url='', # no pretrained weights yet
),
efficientdet_d3=dict(
name='efficientdet_d3',
backbone_name='efficientnet_b3',
image_size=896,
fpn_channels=160,
fpn_cell_repeats=6,
box_class_repeats=4,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.2),
url='', # no pretrained weights yet
),
efficientdet_d4=dict(
name='efficientdet_d4',
backbone_name='efficientnet_b4',
image_size=1024,
fpn_channels=224,
fpn_cell_repeats=7,
box_class_repeats=4,
backbone_args=dict(drop_path_rate=0.2),
url='',
),
# My own experimental configs with alternate models, training TBD
# Note: any 'timm' model in the EfficientDet family can be used as a backbone here.
efficientdet_w0=dict(
name='efficientdet_w0', # 'wide'
backbone_name='efficientnet_b0',
image_size=512,
fpn_channels=80,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(
drop_path_rate=0.1,
feature_location='depthwise'), # features from after DW/SE in IR block
url='', # no pretrained weights yet
),
mixdet_m=dict(
name='mixdet_m',
backbone_name='mixnet_m',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
url='', # no pretrained weights yet
),
mixdet_l=dict(
name='mixdet_l',
backbone_name='mixnet_l',
image_size=640,
fpn_channels=88,
fpn_cell_repeats=4,
box_class_repeats=3,
pad_type='',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.2),
url='', # no pretrained weights yet
),
mobiledetv2_110d=dict(
name='mobiledetv2_110d',
backbone_name='mobilenetv2_110d',
image_size=384,
fpn_channels=48,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
act_type='relu6',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.05),
url='', # no pretrained weights yet
),
mobiledetv2_120d=dict(
name='mobiledetv2_120d',
backbone_name='mobilenetv2_120d',
image_size=512,
fpn_channels=56,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
act_type='relu6',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
url='', # no pretrained weights yet
),
mobiledetv3_large=dict(
name='mobiledetv3_large',
backbone_name='mobilenetv3_large_100',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
pad_type='',
act_type='hard_swish',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
url='', # no pretrained weights yet
),
# Models ported from Tensorflow with pretrained backbones ported from Tensorflow
tf_efficientdet_d0=dict(
name='tf_efficientdet_d0',
backbone_name='tf_efficientnet_b0',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d0-d92fd44f.pth',
),
tf_efficientdet_d1=dict(
name='tf_efficientdet_d1',
backbone_name='tf_efficientnet_b1',
image_size=640,
fpn_channels=88,
fpn_cell_repeats=4,
box_class_repeats=3,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d1-4c7ebaf2.pth'
),
tf_efficientdet_d2=dict(
name='tf_efficientdet_d2',
backbone_name='tf_efficientnet_b2',
image_size=768,
fpn_channels=112,
fpn_cell_repeats=5,
box_class_repeats=3,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d2-cb4ce77d.pth',
),
tf_efficientdet_d3=dict(
name='tf_efficientdet_d3',
backbone_name='tf_efficientnet_b3',
image_size=896,
fpn_channels=160,
fpn_cell_repeats=6,
box_class_repeats=4,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d3-b0ea2cbc.pth',
),
tf_efficientdet_d4=dict(
name='tf_efficientdet_d4',
backbone_name='tf_efficientnet_b4',
image_size=1024,
fpn_channels=224,
fpn_cell_repeats=7,
box_class_repeats=4,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d4-5b370b7a.pth',
),
tf_efficientdet_d5=dict(
name='tf_efficientdet_d5',
backbone_name='tf_efficientnet_b5',
image_size=1280,
fpn_channels=288,
fpn_cell_repeats=7,
box_class_repeats=4,
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d5-ef44aea8.pth',
),
tf_efficientdet_d6=dict(
name='tf_efficientdet_d6',
backbone_name='tf_efficientnet_b6',
image_size=1280,
fpn_channels=384,
fpn_cell_repeats=8,
box_class_repeats=5,
fpn_name='bifpn_sum', # Use unweighted sum for training stability.
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d6-51cb0132.pth'
),
tf_efficientdet_d7=dict(
name='tf_efficientdet_d7',
backbone_name='tf_efficientnet_b6',
image_size=1536,
fpn_channels=384,
fpn_cell_repeats=8,
box_class_repeats=5,
anchor_scale=5.0,
fpn_name='bifpn_sum', # Use unweighted sum for training stability.
backbone_args=dict(drop_path_rate=0.2),
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_d7_53-6d1d7a95.pth'
),
# The lite configs are in TF automl repository but no weights yet and listed as 'not final'
tf_efficientdet_lite0=dict(
name='tf_efficientdet_lite0',
backbone_name='tf_efficientnet_lite0',
image_size=512,
fpn_channels=64,
fpn_cell_repeats=3,
box_class_repeats=3,
act_type='relu',
redundant_bias=False,
backbone_args=dict(drop_path_rate=0.1),
# unlike other tf_ models, this was not ported from tf automl impl, but trained from tf pretrained efficient lite
# weights using this code, will likely replace if/when official det-lite weights are released
url='https://github.com/rwightman/efficientdet-pytorch/releases/download/v0.1/tf_efficientdet_lite0-f5f303a9.pth',
),
tf_efficientdet_lite1=dict(
name='tf_efficientdet_lite1',
backbone_name='tf_efficientnet_lite1',
image_size=640,
fpn_channels=88,
fpn_cell_repeats=4,
box_class_repeats=3,
act_type='relu',
backbone_args=dict(drop_path_rate=0.2),
url='', # no pretrained weights yet
),
tf_efficientdet_lite2=dict(
name='tf_efficientdet_lite2',
backbone_name='tf_efficientnet_lite2',
image_size=768,
fpn_channels=112,
fpn_cell_repeats=5,
box_class_repeats=3,
act_type='relu',
backbone_args=dict(drop_path_rate=0.2),
url='',
),
tf_efficientdet_lite3=dict(
name='tf_efficientdet_lite3',
backbone_name='tf_efficientnet_lite3',
image_size=896,
fpn_channels=160,
fpn_cell_repeats=6,
box_class_repeats=4,
act_type='relu',
backbone_args=dict(drop_path_rate=0.2),
url='',
),
tf_efficientdet_lite4=dict(
name='tf_efficientdet_lite4',
backbone_name='tf_efficientnet_lite4',
image_size=1024,
fpn_channels=224,
fpn_cell_repeats=7,
box_class_repeats=4,
act_type='relu',
backbone_args=dict(drop_path_rate=0.2),
url='',
),
)
def get_backbone_config(backbone_name='efficientnet_b0'):
if backbone_name not in backbone_config:
raise Exception("Backbone name {} not supported".format(backbone_name))
return backbone_config[backbone_name]
def get_efficientdet_config(model_name='tf_efficientdet_d1'):
"""Get the default config for EfficientDet based on model name."""
h = default_detection_model_configs()
h.update(efficientdet_model_param_dict[model_name])
return h
def bifpn_sum_config(base_reduction=8):
"""BiFPN config with sum."""
p = OmegaConf.create()
p.nodes = [
{'reduction': base_reduction << 3, 'inputs_offsets': [3, 4]},
{'reduction': base_reduction << 2, 'inputs_offsets': [2, 5]},
{'reduction': base_reduction << 1, 'inputs_offsets': [1, 6]},
{'reduction': base_reduction, 'inputs_offsets': [0, 7]},
{'reduction': base_reduction << 1, 'inputs_offsets': [1, 7, 8]},
{'reduction': base_reduction << 2, 'inputs_offsets': [2, 6, 9]},
{'reduction': base_reduction << 3, 'inputs_offsets': [3, 5, 10]},
{'reduction': base_reduction << 4, 'inputs_offsets': [4, 11]},
]
p.weight_method = 'sum'
return p
def bifpn_attn_config():
"""BiFPN config with fast weighted sum."""
p = bifpn_sum_config()
p.weight_method = 'attn'
return p
def bifpn_fa_config():
"""BiFPN config with fast weighted sum."""
p = bifpn_sum_config()
p.weight_method = 'fastattn'
return p
def get_fpn_config(fpn_name):
if not fpn_name:
fpn_name = 'bifpn_fa'
name_to_config = {
'bifpn_sum': bifpn_sum_config(),
'bifpn_attn': bifpn_attn_config(),
'bifpn_fa': bifpn_fa_config(),
}
return name_to_config[fpn_name]
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/config/model_config.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model_config import get_efficientdet_config, get_fpn_config, default_detection_model_configs, get_backbone_config
from .train_config import default_detection_train_config
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/config/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
def default_detection_train_config():
# FIXME currently using args for train config, will revisit, perhaps move to Hydra
h = OmegaConf.create()
# dataset
h.skip_crowd_during_training = True
# augmentation
h.input_rand_hflip = True
h.train_scale_min = 0.1
h.train_scale_max = 2.0
h.autoaugment_policy = None
# optimization
h.momentum = 0.9
h.learning_rate = 0.08
h.lr_warmup_init = 0.008
h.lr_warmup_epoch = 1.0
h.first_lr_drop_epoch = 200.0
h.second_lr_drop_epoch = 250.0
h.clip_gradients_norm = 10.0
h.num_epochs = 300
# regularization l2 loss.
h.weight_decay = 4e-5
h.lr_decay_method = 'cosine'
h.moving_average_decay = 0.9998
h.ckpt_var_scope = None
return h
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/config/train_config.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import torch
from . import box_list
from .region_similarity_calculator import IouSimilarity
from .argmax_matcher import ArgMaxMatcher
from .matcher import Match
from .box_list import BoxList
from .box_coder import FasterRcnnBoxCoder
KEYPOINTS_FIELD_NAME = 'keypoints'
#@torch.jit.script
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self, similarity_calc: IouSimilarity, matcher: ArgMaxMatcher, box_coder: FasterRcnnBoxCoder,
negative_class_weight: float = 1.0, unmatched_cls_target=None,
keypoints_field_name: str = KEYPOINTS_FIELD_NAME):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
self._unmatched_cls_target = unmatched_cls_target
self._keypoints_field_name = keypoints_field_name
def assign(self, anchors: BoxList, groundtruth_boxes: BoxList, groundtruth_labels=None, groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and groundtruth boxes,
with rows corresponding to groundtruth boxes and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
device = anchors.device()
if groundtruth_labels is None:
groundtruth_labels = torch.ones(groundtruth_boxes.num_boxes(), device=device).unsqueeze(0)
groundtruth_labels = groundtruth_labels.unsqueeze(-1)
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = torch.ones([num_gt_boxes], device=device)
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix)
reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match)
cls_targets = self._create_classification_targets(groundtruth_labels, match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match, groundtruth_weights)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _create_regression_targets(self, anchors: BoxList, groundtruth_boxes: BoxList, match: Match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
device = anchors.device()
zero_box = torch.zeros(4, device=device)
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.boxes(), unmatched_value=zero_box, ignored_value=zero_box)
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(self._keypoints_field_name):
groundtruth_keypoints = groundtruth_boxes.get_field(self._keypoints_field_name)
zero_kp = torch.zeros(groundtruth_keypoints.shape[1:], device=device)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints, unmatched_value=zero_kp, ignored_value=zero_kp)
matched_gt_boxlist.add_field(self._keypoints_field_name, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
unmatched_ignored_reg_targets = self._default_regression_target(device).repeat(match.match_results.shape[0], 1)
matched_anchors_mask = match.matched_column_indicator()
reg_targets = torch.where(matched_anchors_mask.unsqueeze(1), matched_reg_targets, unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self, device: torch.device):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in this implementation what
these targets are set to should not matter as the regression weight of any box
set to regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return torch.zeros(1, self._box_coder.code_size(), device=device)
def _create_classification_targets(self, groundtruth_labels, match: Match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
if self._unmatched_cls_target is not None:
uct = self._unmatched_cls_target
else:
uct = torch.scalar_tensor(0, device=groundtruth_labels.device)
return match.gather_based_on_match(groundtruth_labels, unmatched_value=uct, ignored_value=uct)
def _create_regression_weights(self, match: Match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
zs = torch.scalar_tensor(0, device=groundtruth_weights.device)
return match.gather_based_on_match(groundtruth_weights, ignored_value=zs, unmatched_value=zs)
def _create_classification_weights(self, match: Match, groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification weights.
"""
ignored = torch.scalar_tensor(0, device=groundtruth_weights.device)
ncw = torch.scalar_tensor(self._negative_class_weight, device=groundtruth_weights.device)
return match.gather_based_on_match(groundtruth_weights, ignored_value=ignored, unmatched_value=ncw)
def box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/object_detection/target_assigner.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
import torch
@torch.jit.script
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results: torch.Tensor):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an integer int32 scalar tensor
"""
if len(match_results.shape) != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype not in (torch.int32, torch.int64):
raise ValueError('match_results should be an int32 or int64 scalar tensor')
self.match_results = match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return torch.nonzero(self.match_results > -1).flatten().long()
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self.match_results >= 0
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return self.matched_column_indices().numel()
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return torch.nonzero(self.match_results == -1).flatten().long()
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self.match_results == -1
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return self.unmatched_column_indices().numel()
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return torch.nonzero(self.ignored_column_indicator()).flatten().long()
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the column is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column indices.
"""
return self.match_results == -2
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return self.ignored_column_indices().numel()
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return torch.nonzero(0 > self.match_results).flatten().long()
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence with the output of
matched_column_indicator(). For example if self.matched_column_indicator() is [0,2],
and self.matched_row_indices() is [7, 3], then we know that column 0 was matched to row 7 and
column 2 was matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return torch.gather(self.match_results, 0, self.matched_column_indices()).flatten().long()
def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to input_tensor[match_results[col]].
For columns that are unmatched, gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] + input_tensor.shape[1:].
"""
ss = torch.stack([ignored_value, unmatched_value])
input_tensor = torch.cat([ss, input_tensor], dim=0)
gather_indices = torch.clamp(self.match_results + 2, min=0)
gathered_tensor = torch.index_select(input_tensor, 0, gather_indices)
return gathered_tensor
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/object_detection/matcher.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
import torch
from typing import List, Optional
from .box_list import BoxList
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
EPS = 1e-8
#@torch.jit.script
class FasterRcnnBoxCoder(object):
"""Faster RCNN box coder."""
def __init__(self, scale_factors: Optional[List[float]] = None, eps: float = EPS):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
self._scale_factors = scale_factors
if scale_factors is not None:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self.eps = eps
#@property
def code_size(self):
return 4
def encode(self, boxes: BoxList, anchors: BoxList):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
ha += self.eps
wa += self.eps
h += self.eps
w += self.eps
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = torch.log(w / wa)
th = torch.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors is not None:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return torch.stack([ty, tx, th, tw]).t()
def decode(self, rel_codes, anchors: BoxList):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = rel_codes.t().unbind()
if self._scale_factors is not None:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = torch.exp(tw) * wa
h = torch.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return BoxList(torch.stack([ymin, xmin, ymax, xmax]).t())
def batch_decode(encoded_boxes, box_coder: FasterRcnnBoxCoder, anchors: BoxList):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors, coder_size]
representing the corners of the objects in the order of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are inconsistent.
"""
assert len(encoded_boxes.shape) == 3
if encoded_boxes.shape[1] != anchors.num_boxes():
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(encoded_boxes.shape[1], anchors.num_boxes()))
decoded_boxes = torch.stack([
box_coder.decode(boxes, anchors).boxes for boxes in encoded_boxes.unbind()
])
return decoded_boxes
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/object_detection/box_coder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Object detection data loaders and libraries are mostly based on RetinaNet:
# https://github.com/tensorflow/tpu/tree/master/models/official/retinanet
from .argmax_matcher import ArgMaxMatcher
from .box_coder import FasterRcnnBoxCoder
from .box_list import BoxList
from .matcher import Match
from .region_similarity_calculator import IouSimilarity
from .target_assigner import TargetAssigner
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/object_detection/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import torch
from typing import Optional, List, Dict
@torch.jit.script
class BoxList(object):
"""Box collection."""
data: Dict[str, torch.Tensor]
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in float32 format.
"""
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != torch.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return self.data['boxes'].shape[0]
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
# return [k for k in self.data.keys() if k != 'boxes'] # FIXME torscript doesn't support comprehensions yet
extra: List[str] = []
for k in self.data.keys():
if k != 'boxes':
extra.append(k)
return extra
def add_field(self, field: str, field_data: torch.Tensor):
"""Add field to box list.
This method can be used to add related box data such as weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field: str):
return field in self.data
#@property # FIXME for torchscript compat
def boxes(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
#@boxes.setter # FIXME for torchscript compat
def set_boxes(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.shape) != 2 or boxes.shape[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field: str):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field: str, value: torch.Tensor):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
self.data[field] = value
def get_center_coordinates_and_sizes(self):
"""Computes the center coordinates, height and width of the boxes.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
box_corners = self.boxes()
ymin, xmin, ymax, xmax = box_corners.t().unbind()
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self):
"""Transpose the coordinate representation in a boxlist.
"""
y_min, x_min, y_max, x_max = self.boxes().chunk(4, dim=1)
self.set_boxes(torch.cat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields: Optional[List[str]] = None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
#@property
def device(self):
return self.data['boxes'].device
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/object_detection/box_list.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
import torch
from .box_list import BoxList
def area(boxlist: BoxList):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
Returns:
a tensor with shape [N] representing box areas.
"""
y_min, x_min, y_max, x_max = boxlist.boxes().chunk(4, dim=1)
out = (y_max - y_min).squeeze(1) * (x_max - x_min).squeeze(1)
return out
def intersection(boxlist1: BoxList, boxlist2: BoxList):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
y_min1, x_min1, y_max1, x_max1 = boxlist1.boxes().chunk(4, dim=1)
y_min2, x_min2, y_max2, x_max2 = boxlist2.boxes().chunk(4, dim=1)
all_pairs_min_ymax = torch.min(y_max1, y_max2.t())
all_pairs_max_ymin = torch.max(y_min1, y_min2.t())
intersect_heights = torch.clamp(all_pairs_min_ymax - all_pairs_max_ymin, min=0)
all_pairs_min_xmax = torch.min(x_max1, x_max2.t())
all_pairs_max_xmin = torch.max(x_min1, x_min2.t())
intersect_widths = torch.clamp(all_pairs_min_xmax - all_pairs_max_xmin, min=0)
return intersect_heights * intersect_widths
def iou(boxlist1: BoxList, boxlist2: BoxList):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = areas1.unsqueeze(1) + areas2.unsqueeze(0) - intersections
return torch.where(intersections == 0.0, torch.zeros_like(intersections), intersections / unions)
@torch.jit.script
class IouSimilarity(object):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def __init__(self):
pass
def compare(self, boxlist1: BoxList, boxlist2: BoxList):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
return iou(boxlist1, boxlist2)
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/object_detection/region_similarity_calculator.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import torch
from torch.nn.functional import one_hot
from .matcher import Match
from typing import Optional
def one_hot_bool(x, num_classes: int):
# for improved perf over PyTorch builtin one_hot, scatter to bool
onehot = torch.zeros(x.size(0), num_classes, device=x.device, dtype=torch.bool)
return onehot.scatter_(1, x.unsqueeze(1), 1)
@torch.jit.script
class ArgMaxMatcher(object): # cannot inherit with torchscript
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold: float,
unmatched_threshold: Optional[float] = None,
negatives_lower_than_unmatched: bool = True,
force_match_for_each_row: bool = False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and unmatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when unmatched_threshold is defined')
self._matched_threshold = matched_threshold
self._unmatched_threshold: float = 0.
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and unmatched thresholds, these '
'cannot be of equal value. matched: %s, unmatched: %s',
self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match_when_rows_are_empty(self, similarity_matrix):
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
return -1 * torch.ones(similarity_matrix.shape[1], dtype=torch.long)
def _match_when_rows_are_non_empty(self, similarity_matrix):
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matched_vals, matches = torch.max(similarity_matrix, 0)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
below_unmatched_threshold = self._unmatched_threshold > matched_vals
between_thresholds = (matched_vals >= self._unmatched_threshold) & \
(self._matched_threshold > matched_vals)
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches, below_unmatched_threshold, -1)
matches = self._set_values_using_indicator(matches, between_thresholds, -2)
else:
matches = self._set_values_using_indicator(matches, below_unmatched_threshold, -2)
matches = self._set_values_using_indicator(matches, between_thresholds, -1)
if self._force_match_for_each_row:
force_match_column_ids = torch.argmax(similarity_matrix, 1)
force_match_column_indicators = one_hot_bool(force_match_column_ids, similarity_matrix.shape[1])
force_match_column_mask, force_match_row_ids = torch.max(force_match_column_indicators, 0)
final_matches = torch.where(force_match_column_mask, force_match_row_ids, matches)
return final_matches
else:
return matches
def match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
if similarity_matrix.shape[0] == 0:
return Match(self._match_when_rows_are_empty(similarity_matrix))
else:
return Match(self._match_when_rows_are_non_empty(similarity_matrix))
def _set_values_using_indicator(self, x, indicator, val: int):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = indicator.to(dtype=x.dtype)
return x * (1 - indicator) + val * indicator
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/effdet/object_detection/argmax_matcher.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
return [i for i, e in enumerate(affinity_list) if e != 0]
def set_affinity(gpu_id=None):
if gpu_id is None:
gpu_id = int(os.getenv('LOCAL_RANK', 0))
dev = device(gpu_id)
os.sched_setaffinity(0, dev.getCpuAffinity())
# list of ints representing the logical cores this process is now affinitied with
return os.sched_getaffinity(0)
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/utils/gpu_affinity.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from copy import deepcopy
import logging
import logging.handlers
from collections import OrderedDict
_logger = logging.getLogger(__name__)
class ModelEma:
""" Model Exponential Moving Average
Keep a moving average of everything in the model state_dict (parameters and buffers).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
I've tested with the sequence in my own train.py for torch.DataParallel, apex.DDP, and single-GPU.
"""
def __init__(self, model, decay=0.9999, device='', resume='', remove_params=[]):
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.remove_params = remove_params
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
self._load_checkpoint(resume)
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# Check if key k is in the remove_params list
if any(remove_str in k for remove_str in self.remove_params):
continue
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
if len(self.remove_params) > 0:
this_dict = self.ema.state_dict()
this_dict.update(new_state_dict)
self.ema.load_state_dict(this_dict)
else:
self.ema.load_state_dict(new_state_dict)
_logger.info("Loaded state_dict_ema")
else:
_logger.warning("Failed to find state_dict_ema, starting from loaded model weights")
def update(self, model):
x = []
y = []
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
for ema_v, model_v in zip(self.ema.state_dict().values(), model.state_dict().values()):
x.append(ema_v.type(torch.float32))
if self.device:
model_v = model_v.detach().to(device=self.device)
y.append(model_v.type(torch.float32))
torch._foreach_mul_(x, self.decay)
torch._foreach_add_(x, y, alpha=1.-self.decay)
for ind, ema_v in enumerate(self.ema.state_dict().values()):
ema_v.copy_(x[ind]) | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/utils/model_ema.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/utils/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import optim as optim
from typing import Union, Iterable
_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]]
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
from apex.multi_tensor_apply import multi_tensor_applier
from amp_C import multi_tensor_l2norm
has_apex = True
except ImportError:
has_apex = False
def clip_grad_norm_2(parameters: _tensor_or_tensors, max_norm: float):
dummy_overflow_buf = torch.cuda.IntTensor([0])
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
grads = [p.grad for p in parameters]
max_norm = float(max_norm)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
total_norm, _ = multi_tensor_applier(
multi_tensor_l2norm,
dummy_overflow_buf,
[grads],
False)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
torch._foreach_mul_(grads, clip_coef.item())
return total_norm
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or ".bn" in name or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def create_optimizer(args, model, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay
parameters = add_weight_decay(model, weight_decay, skip)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None and opt_lower not in ['sgd', 'momentum', 'fusedmomentum', 'fusedsgd']:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
if opt_lower == 'sgd' or opt_lower == 'nesterov':
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/utils/optimizers.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from collections import OrderedDict
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
import torch
from torch import distributed as dist
import logging
import logging.handlers
from .model_ema import ModelEma
_logger = logging.getLogger(__name__)
def reduce_tensor(tensor, n):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= n
return rt
def unwrap_model(model):
if isinstance(model, ModelEma):
return unwrap_model(model.ema)
else:
return model.module if hasattr(model, 'module') else model
def get_state_dict(model, unwrap_fn=unwrap_model):
return unwrap_fn(model).state_dict()
def setup_dllogger(rank, enabled=True, filename='log.json'):
if enabled and rank == 0:
backends = [
StdOutBackend(Verbosity.DEFAULT),
JSONStreamBackend(
Verbosity.VERBOSE,
filename,
),
]
DLLogger.init(backends)
else:
DLLogger.init([])
def get_latest_file(files):
prefix = files[0].split("checkpoint")[0]
max_checkpoint_number = max([int(f.split("checkpoint_")[1].split('.')[0]) for f in files]) # checkpoint_name_convention = checkpoint_ + number + .pth.tar
return prefix + "checkpoint_" + str(max_checkpoint_number) + ".pth.tar"
def get_latest_checkpoint(dir_path):
if not os.path.exists(dir_path):
print("{} does not exist to load checkpoint".format(dir_path))
return None
files = [os.path.join(dir_path, f) for f in sorted(os.listdir(dir_path)) if "checkpoint" in f]
print("... Looking inside {}".format(dir_path))
if len(files) > 0:
return get_latest_file(files)
return None
class FormatterNoInfo(logging.Formatter):
def __init__(self, fmt='%(levelname)s: %(message)s'):
logging.Formatter.__init__(self, fmt)
def format(self, record):
if record.levelno == logging.INFO:
return str(record.getMessage())
return logging.Formatter.format(self, record)
def setup_default_logging(default_level=logging.INFO, log_path=''):
console_handler = logging.StreamHandler()
console_handler.setFormatter(FormatterNoInfo())
logging.root.addHandler(console_handler)
logging.root.setLevel(default_level)
if log_path:
file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3)
file_formatter = logging.Formatter("%(asctime)s - %(name)20s: [%(levelname)8s] - %(message)s")
file_handler.setFormatter(file_formatter)
logging.root.addHandler(file_handler)
class CheckpointSaver:
def __init__(
self,
args=None,
checkpoint_dir='',
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.args = args
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
# config
self.checkpoint_dir = checkpoint_dir
self.extension = '.pth.tar'
self.unwrap_fn = unwrap_fn
def save_checkpoint(self, model, optimizer, epoch, scaler=None, model_ema=None, metric=None, is_best=False):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, "tmp" + self.extension)
actual_save_path = os.path.join(self.checkpoint_dir, "checkpoint_" + str(epoch) + self.extension)
self._save(model, optimizer, tmp_save_path, actual_save_path, epoch, scaler, model_ema, metric, is_best)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def _save(self, model, optimizer, tmp_save_path, save_path, epoch, scaler=None, model_ema=None, metric=None, is_best=False):
save_state = {
'epoch': epoch,
'arch': type(model).__name__.lower(),
'state_dict': get_state_dict(model, self.unwrap_fn),
'optimizer': optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if scaler is not None:
save_state['scaler'] = scaler.state_dict()
if model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, tmp_save_path)
os.rename(tmp_save_path, save_path)
if is_best:
shutil.copyfile(
save_path, os.path.join(self.checkpoint_dir, "model_best" + self.extension)
)
self.best_epoch = epoch
self.best_metric = metric
print("Checkpoint saving for {} epoch is done...".format(epoch))
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def freeze_layers_fn(model, freeze_layers=[]):
for name, param in model.named_parameters():
if any(layer in name for layer in freeze_layers):
param.requires_grad = False
def load_state_dict(checkpoint_path, has_module, use_ema=False, remove_params=[]):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
ckpt = checkpoint[state_dict_key]
_logger.info('Restoring model state from checkpoint...')
else:
ckpt = checkpoint
_logger.info('Restoring model state from stete_dict ...')
new_state_dict = OrderedDict()
for k, v in ckpt.items():
if any(remove_str in k for remove_str in remove_params):
continue
# strip `module.` prefix
if not has_module and k.startswith('module'):
name = k[7:]
elif k.startswith('model'):
name = k[6:]
elif has_module and not k.startswith('module'):
name = 'module.' + k
else:
name = k
new_state_dict[name] = v
state_dict = new_state_dict
_logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict, checkpoint
else:
_logger.error("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
def load_checkpoint(model, checkpoint_path, use_ema=False, strict=True, remove_params=[]):
has_module = hasattr(model, 'module')
if has_module:
_logger.info('model has attribute module...')
else:
_logger.info('model does not have attribute module...')
state_dict, checkpoint = load_state_dict(checkpoint_path, has_module, use_ema, remove_params)
if len(remove_params) > 0:
this_dict = model.state_dict()
this_dict.update(state_dict)
model.load_state_dict(this_dict, strict=strict)
else:
model.load_state_dict(state_dict, strict=strict)
return checkpoint
def resume_checkpoint(model, checkpoint_path, optimizer=None, loss_scaler=None, log_info=True, remove_params=[]):
resume_epoch = None
checkpoint = load_checkpoint(model, checkpoint_path=checkpoint_path, use_ema=False, strict=False, remove_params=remove_params)
resume_epoch = 0
if 'epoch' in checkpoint:
resume_epoch = checkpoint['epoch'] + 1
if log_info:
_logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
return checkpoint, resume_epoch | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/utils/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from collections import OrderedDict
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('--checkpoint_path', default='/checkpoints/model_best.pth.tar', help='path to checkpoint')
parser.add_argument('--state_dict_path', default='/checkpoints/Effdet_B0.pth', help='path to save processed checkpoint state_dict to.')
args = parser.parse_args()
ckpt = torch.load(args.checkpoint_path)
print("Checkpoint keys {}".format([k for k in ckpt.keys()]))
if 'state_dict_ema' in ckpt:
print("... state_dict found in ckpt")
state_dict = ckpt['state_dict_ema']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
# strip `module.` prefix
if k.startswith('module'):
name = k[7:]
elif k.startswith('model'):
name = k[6:]
else:
name = k
new_state_dict[name] = v
print("... state_dict saving")
torch.save(new_state_dict, args.state_dict_path)
print("...End process")
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/utils/checkpoint_processing.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Any
import math
import numpy as np
import torch
class Scheduler:
""" Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
def get_epoch_values(self, epoch: int):
return None
def get_update_values(self, num_updates: int):
return None
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self.get_epoch_values(epoch)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self.get_update_values(num_updates)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
if apply_noise:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
break
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
lrs = [v + v * noise for v in lrs]
return lrs
class CosineLRScheduler(Scheduler):
"""
Cosine decay with restarts.
This is described in the paper https://arxiv.org/abs/1608.03983.
Inspiration from
https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py
"""
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
assert t_initial > 0
assert lr_min >= 0
if t_initial == 1 and t_mul == 1 and decay_rate == 1:
print("Cosine annealing scheduler will have no effect on the learning "
"rate since t_initial = t_mul = eta_mul = 1.")
self.t_initial = t_initial
self.t_mul = t_mul
self.lr_min = lr_min
self.decay_rate = decay_rate
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.t_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.t_mul), self.t_mul))
t_i = self.t_mul ** i * self.t_initial
t_curr = t - (1 - self.t_mul ** i) / (1 - self.t_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
gamma = self.decay_rate ** i
lr_min = self.lr_min * gamma
lr_max_values = [v * gamma for v in self.base_values]
if self.cycle_limit == 0 or (self.cycle_limit > 0 and i < self.cycle_limit):
lrs = [
lr_min + 0.5 * (lr_max - lr_min) * (1 + math.cos(math.pi * t_curr / t_i)) for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
def get_cycle_length(self, cycles=0):
if not cycles:
cycles = self.cycle_limit
cycles = max(1, cycles)
if self.t_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.t_mul ** cycles - 1) / (1 - self.t_mul)))
def create_scheduler(args, optimizer):
num_epochs = args.epochs
if getattr(args, 'lr_noise', None) is not None:
lr_noise = getattr(args, 'lr_noise')
if isinstance(lr_noise, (list, tuple)):
noise_range = [n * num_epochs for n in lr_noise]
if len(noise_range) == 1:
noise_range = noise_range[0]
else:
noise_range = lr_noise * num_epochs
else:
noise_range = None
lr_scheduler = None
if args.sched == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
else:
assert False and "Invalid Scheduler"
raise ValueError
return lr_scheduler, num_epochs | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/utils/scheduler.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
class OrderedDistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/utils/distributed_sampler.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import argparse
import glob
import tensorflow as tf
import math
import numpy as np
import itertools
import ipdb
import os
import h5py
import cv2
import sys
import json
import matplotlib.pyplot as plt
from collections import Counter
from google.cloud import storage
tf.compat.v1.enable_eager_execution()
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
args = None
def hash(m, n, t):
return int(int(m)*10000000 + int(n)*100 + int(t))
def parse_args():
global args, seg_id
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=["training", "validation"], default="validation")
parser.add_argument("--tf-dir", default="/workspace/data/waymo_tfrecords_val")
parser.add_argument("--out-dir", default="/workspace/data/waymo_coco_format_val")
parser.add_argument("--seg-min", default=0, type=int)
parser.add_argument("--seg-max", default=1, type=int)
parser.add_argument("--log-file", default="waymo-converter")
args = parser.parse_args()
# set starting seg id
seg_id = args.seg_min
return args
def setup_logging(args):
logging.basicConfig(filename="/results/{}.log".format(args.log_file),
# filemode="w",
format="%(asctime)s:%(levelname)s:%(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
level=logging.DEBUG)
logging.info('Logging setup done!')
def create_dirs(args):
# create intermediate and out directories
os.makedirs(args.tf_dir, exist_ok=True)
os.makedirs(args.out_dir, exist_ok=True)
args.images_dir = os.path.join(args.out_dir, "images")
args.annotations_dir = os.path.join(args.out_dir, "annotations")
os.makedirs(args.images_dir, exist_ok=True)
os.makedirs(args.annotations_dir, exist_ok=True)
logging.info("Created images and annotations directories: {} {}".format(
args.images_dir, args.annotations_dir))
# set global frame and annotations id
seg_id = 0
frame_id = 0
annotation_id = 0
images_content = []
annotations_content = []
info = {
u'description': u'COCO 2014 Dataset',
u'url': u'http://cocodataset.org',
u'version': u'1.0',
u'year': 2014,
u'contributor': u'COCO Consortium',
u'date_created': u'2017/09/01'
}
licenses = [{
u'url': u'http://creativecommons.org/licenses/by-nc-sa/2.0/',
u'id': 1,
u'name': u'Attribution-NonCommercial-ShareAlike License'
}, {
u'url': u'http://creativecommons.org/licenses/by-nc/2.0/',
u'id': 2,
u'name': u'Attribution-NonCommercial License'
}, {
u'url': u'http://creativecommons.org/licenses/by-nc-nd/2.0/',
u'id': 3,
u'name': u'Attribution-NonCommercial-NoDerivs License'
}, {
u'url': u'http://creativecommons.org/licenses/by/2.0/',
u'id': 4,
u'name': u'Attribution License'
}, {
u'url': u'http://creativecommons.org/licenses/by-sa/2.0/',
u'id': 5,
u'name': u'Attribution-ShareAlike License'
}, {
u'url': u'http://creativecommons.org/licenses/by-nd/2.0/',
u'id': 6,
u'name': u'Attribution-NoDerivs License'
}, {
u'url': u'http://flickr.com/commons/usage/',
u'id': 7,
u'name': u'No known copyright restrictions'
}, {
u'url': u'http://www.usa.gov/copyright.shtml',
u'id': 8,
u'name': u'United States Government Work'
}]
#dataset-specific
category = [{
u'supercategory': u'object',
u'id': 1,
u'name': u'vehicle'
}, {
u'supercategory': u'object',
u'id': 2,
u'name': u'pedestrian'
}, {
u'supercategory': u'object',
u'id': 3,
u'name': u'cyclist'
}]
# Function to convert Waymo TFrecord to COCO format
def convert(tfrecord):
global frame_id, seg_id, annotation_id, images_content, annotations_content
try:
dataset = tf.data.TFRecordDataset(tfrecord, compression_type='')
num_frames = 0
images = []
annotations = []
all_labels = []
# try:
for data in dataset:
frame_id += 1
num_frames += 1
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
image_id = 1
# iterate across images in frame - front, side, etc.,
for index, camera_image in enumerate(frame.images):
output_image = tf.image.decode_jpeg(camera_image.image).numpy()
# iterate across labels in frame - front, side, etc.,
for camera_labels in frame.camera_labels:
# Ignore camera labels that do not correspond to this camera.
if camera_labels.name != camera_image.name:
continue
for image_labels in camera_labels.labels:
#Since label 3 doesn't exist
if image_labels.type == 4:
image_labels.type = 3
annotations.append({
"image_id":
hash(seg_id, frame_id, image_id),
"area":
image_labels.box.width * image_labels.box.length,
"bbox": [
image_labels.box.center_x -
image_labels.box.length / 2.,
image_labels.box.center_y -
image_labels.box.width / 2.,
image_labels.box.length, image_labels.box.width
],
"category_id":
image_labels.type,
"iscrowd":
0,
"id":
annotation_id
})
all_labels.append(image_labels.type)
annotation_id += 1
h, w, c = output_image.shape
plt.imsave("{}/{}_{}_{}.jpg".format(args.images_dir, seg_id, frame_id, image_id),
output_image,
cmap=None)
images.append({
u'license': 1,
u'file_name': "{}_{}_{}.jpg".format(seg_id, frame_id, image_id),
u'waymo_url': None,
u'height': h,
u'width': w,
u'date_captured': u'2013-11-14 16:28:13',
u'flickr_url': None,
u'id': hash(seg_id, frame_id, image_id)
})
image_id += 1
logging.info("Converted {} frames in {}".format(num_frames, tfrecord))
images_content += images
annotations_content += annotations
logging.info("# images: {} # annotations: {}".format(
len(images), len(annotations)))
logging.info("# Label spread: {}".format(Counter(all_labels)))
except:
logging.info("Corrupted record {}".format(tfrecord))
# combine annotations, images data per segment into one annotations.json file
def combine():
global images_content, annotations_content
all_data = {
"info": info,
"images": images_content,
"licenses": licenses,
"annotations": annotations_content,
"categories": category
}
with open("{}/annotations-{}-{}.json".format(args.annotations_dir, args.seg_min, args.seg_max), 'w') as outfile:
json.dump(all_data, outfile)
# download waymo data
def download_and_convert(args):
global seg_id, frame_id
if args.dataset == "training":
num_segs = 32
if args.dataset == "validation":
num_segs = 8
logging.info("Number of segments in dataset: {}".format(num_segs))
logging.info("Segments to process: {} to {}".format(args.seg_min, args.seg_max))
logging.info("Creating google storage client to access waymo bucket")
storage_client = storage.Client(project=None)
bucket_name = "waymo_open_dataset_v_1_2_0"
bucket = storage_client.bucket(bucket_name)
while seg_id < args.seg_max:
# copy from bucket
frame_id = 0
source_blob_name = '{dataset}/{dataset}_{:04}.tar'.format(
seg_id, dataset=args.dataset)
try:
blob = bucket.blob(source_blob_name)
blob.download_to_filename(os.path.join(args.tf_dir, "{}_{:04}.tar".format(args.dataset, seg_id)))
except AssertionError as err:
logging.exception(
"Failed to download segment {}. Make sure GOOGLE_APPLICATION_CREDENTIALS is set and you have access to gs://waymo_open_dataset_v_1_2_0"
.format(seg_id))
sys.exit()
logging.info("Extracting tfrecords from segment: {}_{:04}".format(args.dataset, seg_id))
os.system("cd {}; tar -xvf {}_{:04}.tar".format(args.tf_dir, args.dataset, seg_id))
tfrecords = glob.glob("{}/*.tfrecord".format(args.tf_dir))
# extract data from each record
for record_id, record in enumerate(tfrecords):
if "with_camera_labels" in record:
logging.info("Processing record # {}: {}".format(record_id, record))
convert(record)
else:
logging.info("Skipping record # {}: {}".format(record_id, record))
logging.info("Deleting record # {}: {}...".format(record_id, record))
os.remove(record)
logging.info("Processed {} records".format(len(tfrecords)))
os.remove("{}/{}_{:04}.tar".format(args.tf_dir, args.dataset, seg_id))
os.remove("{}/LICENSE".format(args.tf_dir))
seg_id += 1
# write annotations.json
combine()
if __name__ == "__main__":
# trigger download and conversion of Waymo data
print("Usage: python waymo_data_converter.py --dataset <validation/training> --tf-dir <empty scratch pad dir> --out-dir <empty coco format output dir> --seg-min <0 or any starting seg id> --seg-max <32 - train, 8 - validation or any ending seg id> --log-file <name of log file which will be written to /results>")
args = parse_args()
setup_logging(args)
create_dirs(args)
logging.info("Running on dataset: {} \ntf records dir: {} \ncoco format out dir: {}".format(args.dataset, args.tf_dir, args.out_dir))
download_and_convert(args)
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/waymo_tool/waymo_data_converter.py |
""" COCO transforms (quick and dirty)
Hacked together by Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from PIL import Image
import numpy as np
import random
import math
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
class ImageToNumpy:
def __call__(self, pil_img, annotations: dict):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW
return np_img, annotations
class ImageToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img, annotations: dict):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.moveaxis(np_img, 2, 0) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype), annotations
class TargetToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img, annotations: dict):
annotations['bbox'] = torch.from_numpy(annotations['bbox']).to(dtype=self.dtype)
annotations['cls'] = torch.from_numpy(annotations['cls']).to(dtype=torch.int64)
return pil_img, annotations
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
def clip_boxes_(boxes, img_size):
height, width = img_size
clip_upper = np.array([height, width] * 2, dtype=boxes.dtype)
np.clip(boxes, 0, clip_upper, out=boxes)
def clip_boxes(boxes, img_size):
clipped_boxes = boxes.copy()
clip_boxes_(clipped_boxes, img_size)
return clipped_boxes
def _size_tuple(size):
if isinstance(size, int):
return size, size
else:
assert len(size) == 2
return size
class ResizePad:
def __init__(self, target_size: int, interpolation: str = 'bilinear', fill_color: tuple = (0, 0, 0)):
self.target_size = _size_tuple(target_size)
self.interpolation = interpolation
self.fill_color = fill_color
def __call__(self, img, anno: dict):
width, height = img.size
img_scale_y = self.target_size[0] / height
img_scale_x = self.target_size[1] / width
img_scale = min(img_scale_y, img_scale_x)
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
new_img = Image.new("RGB", (self.target_size[1], self.target_size[0]), color=self.fill_color)
interp_method = _pil_interp(self.interpolation)
img = img.resize((scaled_w, scaled_h), interp_method)
new_img.paste(img)
if 'bbox' in anno:
# FIXME haven't tested this path since not currently using dataset annotations for train/eval
bbox = anno['bbox']
bbox[:, :4] *= img_scale
clip_boxes_(bbox, (scaled_h, scaled_w))
valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)
anno['bbox'] = bbox[valid_indices, :]
anno['cls'] = anno['cls'][valid_indices]
anno['img_scale'] = 1. / img_scale # back to original
return new_img, anno
class RandomResizePad:
def __init__(self, target_size: int, scale: tuple = (0.1, 2.0), interpolation: str = 'bilinear',
fill_color: tuple = (0, 0, 0)):
self.target_size = _size_tuple(target_size)
self.scale = scale
self.interpolation = interpolation
self.fill_color = fill_color
def _get_params(self, img):
# Select a random scale factor.
scale_factor = random.uniform(*self.scale)
scaled_target_height = scale_factor * self.target_size[0]
scaled_target_width = scale_factor * self.target_size[1]
# Recompute the accurate scale_factor using rounded scaled image size.
width, height = img.size
img_scale_y = scaled_target_height / height
img_scale_x = scaled_target_width / width
img_scale = min(img_scale_y, img_scale_x)
# Select non-zero random offset (x, y) if scaled image is larger than target size
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
offset_y = scaled_h - self.target_size[0]
offset_x = scaled_w - self.target_size[1]
offset_y = int(max(0.0, float(offset_y)) * random.uniform(0, 1))
offset_x = int(max(0.0, float(offset_x)) * random.uniform(0, 1))
return scaled_h, scaled_w, offset_y, offset_x, img_scale
def __call__(self, img, anno: dict):
scaled_h, scaled_w, offset_y, offset_x, img_scale = self._get_params(img)
interp_method = _pil_interp(self.interpolation)
img = img.resize((scaled_w, scaled_h), interp_method)
right, lower = min(scaled_w, offset_x + self.target_size[1]), min(scaled_h, offset_y + self.target_size[0])
img = img.crop((offset_x, offset_y, right, lower))
new_img = Image.new("RGB", (self.target_size[1], self.target_size[0]), color=self.fill_color)
new_img.paste(img)
if 'bbox' in anno:
# FIXME not fully tested
bbox = anno['bbox'].copy() # FIXME copy for debugger inspection, back to inplace
bbox[:, :4] *= img_scale
box_offset = np.stack([offset_y, offset_x] * 2)
bbox -= box_offset
clip_boxes_(bbox, (scaled_h, scaled_w))
valid_indices = (bbox[:, :2] < bbox[:, 2:4]).all(axis=1)
anno['bbox'] = bbox[valid_indices, :]
anno['cls'] = anno['cls'][valid_indices]
anno['img_scale'] = 1. / img_scale # back to original
return new_img, anno
class RandomFlip:
def __init__(self, horizontal=True, vertical=False, prob=0.5):
self.horizontal = horizontal
self.vertical = vertical
self.prob = prob
def _get_params(self):
do_horizontal = random.random() < self.prob if self.horizontal else False
do_vertical = random.random() < self.prob if self.vertical else False
return do_horizontal, do_vertical
def __call__(self, img, annotations: dict):
do_horizontal, do_vertical = self._get_params()
width, height = img.size
def _fliph(bbox):
x_max = width - bbox[:, 1]
x_min = width - bbox[:, 3]
bbox[:, 1] = x_min
bbox[:, 3] = x_max
def _flipv(bbox):
y_max = height - bbox[:, 0]
y_min = height - bbox[:, 2]
bbox[:, 0] = y_min
bbox[:, 2] = y_max
if do_horizontal and do_vertical:
img = img.transpose(Image.ROTATE_180)
if 'bbox' in annotations:
_fliph(annotations['bbox'])
_flipv(annotations['bbox'])
elif do_horizontal:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if 'bbox' in annotations:
_fliph(annotations['bbox'])
elif do_vertical:
img = img.transpose(Image.FLIP_TOP_BOTTOM)
if 'bbox' in annotations:
_flipv(annotations['bbox'])
return img, annotations
def resolve_fill_color(fill_color, img_mean=IMAGENET_DEFAULT_MEAN):
if isinstance(fill_color, tuple):
assert len(fill_color) == 3
fill_color = fill_color
else:
try:
int_color = int(fill_color)
fill_color = (int_color,) * 3
except ValueError:
assert fill_color == 'mean'
fill_color = tuple([int(round(255 * x)) for x in img_mean])
return fill_color
class Compose:
def __init__(self, transforms: list):
self.transforms = transforms
def __call__(self, img, annotations: dict):
for t in self.transforms:
img, annotations = t(img, annotations)
return img, annotations
def transforms_coco_eval(
img_size=224,
interpolation='bilinear',
use_prefetcher=False,
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
fill_color = resolve_fill_color(fill_color, mean)
image_tfl = [
ResizePad(
target_size=img_size, interpolation=interpolation, fill_color=fill_color),
TargetToTensor(),
ImageToNumpy(),
]
assert use_prefetcher, "Only supporting prefetcher usage right now"
image_tf = Compose(image_tfl)
return image_tf
def transforms_coco_train(
img_size=224,
interpolation='random',
use_prefetcher=False,
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
fill_color = resolve_fill_color(fill_color, mean)
image_tfl = [
RandomFlip(horizontal=True, prob=0.5),
RandomResizePad(
target_size=img_size, interpolation=interpolation, fill_color=fill_color),
TargetToTensor(),
ImageToNumpy(),
]
assert use_prefetcher, "Only supporting prefetcher usage right now"
image_tf = Compose(image_tfl)
return image_tf
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/data/transforms.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dataset import CocoDetection
from .transforms import *
from .loader import create_loader
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/data/__init__.py |
""" COCO dataset (quick and dirty)
Hacked together by Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import os
import torch
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
from effdet.anchors import Anchors, AnchorLabeler
class CocoDetection(data.Dataset):
"""`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
ann_file (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
"""
def __init__(self, root, ann_file, config, transform=None):
super(CocoDetection, self).__init__()
if isinstance(root, (str, bytes)):
root = os.path.expanduser(root)
self.root = root
self.transform = transform
self.yxyx = True # expected for TF model, most PT are xyxy
self.include_masks = False
self.include_bboxes_ignore = False
self.has_annotations = 'image_info' not in ann_file
self.coco = None
self.cat_ids = []
self.cat_to_label = dict()
self.img_ids = []
self.img_ids_invalid = []
self.img_infos = []
self._load_annotations(ann_file)
self.anchors = Anchors(
config.min_level, config.max_level,
config.num_scales, config.aspect_ratios,
config.anchor_scale, config.image_size)
self.anchor_labeler = AnchorLabeler(self.anchors, config.num_classes, match_threshold=0.5)
def _load_annotations(self, ann_file):
assert self.coco is None
self.coco = COCO(ann_file)
self.cat_ids = self.coco.getCatIds()
img_ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
for img_id in sorted(self.coco.imgs.keys()):
info = self.coco.loadImgs([img_id])[0]
valid_annotation = not self.has_annotations or img_id in img_ids_with_ann
if valid_annotation and min(info['width'], info['height']) >= 32:
self.img_ids.append(img_id)
self.img_infos.append(info)
else:
self.img_ids_invalid.append(img_id)
def _parse_img_ann(self, img_id, img_info):
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
bboxes = []
bboxes_ignore = []
cls = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if self.include_masks and ann['area'] <= 0:
continue
if w < 1 or h < 1:
continue
# To subtract 1 or not, TF doesn't appear to do this so will keep it out for now.
if self.yxyx:
#bbox = [y1, x1, y1 + h - 1, x1 + w - 1]
bbox = [y1, x1, y1 + h, x1 + w]
else:
#bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
if self.include_bboxes_ignore:
bboxes_ignore.append(bbox)
else:
bboxes.append(bbox)
cls.append(self.cat_to_label[ann['category_id']] if self.cat_to_label else ann['category_id'])
if bboxes:
bboxes = np.array(bboxes, dtype=np.float32)
cls = np.array(cls, dtype=np.int64)
else:
bboxes = np.zeros((0, 4), dtype=np.float32)
cls = np.array([], dtype=np.int64)
if self.include_bboxes_ignore:
if bboxes_ignore:
bboxes_ignore = np.array(bboxes_ignore, dtype=np.float32)
else:
bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
ann = dict(img_id=img_id, bbox=bboxes, cls=cls, img_size=(img_info['width'], img_info['height']))
if self.include_bboxes_ignore:
ann['bbox_ignore'] = bboxes_ignore
return ann
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, annotations (target)).
"""
img_id = self.img_ids[index]
img_info = self.img_infos[index]
if self.has_annotations:
ann = self._parse_img_ann(img_id, img_info)
else:
ann = dict(img_id=img_id, img_size=(img_info['width'], img_info['height']))
path = img_info['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img, ann = self.transform(img, ann)
cls_targets, box_targets, num_positives = self.anchor_labeler.label_anchors(
ann['bbox'], ann['cls'])
ann.pop('bbox')
ann.pop('cls')
ann['num_positives'] = num_positives
ann.update(cls_targets)
ann.update(box_targets)
return img, ann
def __len__(self):
return len(self.img_ids)
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/data/dataset.py |
""" Object detection loader/collate
Hacked together by Ross Wightman
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.utils.data
from .transforms import *
from utils.distributed_sampler import OrderedDistributedSampler
from functools import partial
MAX_NUM_INSTANCES = 100
def fast_collate(memory_format, batch):
batch_size = len(batch)
# FIXME this needs to be more robust
target = dict()
for k, v in batch[0][1].items():
if isinstance(v, np.ndarray):
# if a numpy array, assume it relates to object instances, pad to MAX_NUM_INSTANCES
target_shape = (batch_size, MAX_NUM_INSTANCES)
if len(v.shape) > 1:
target_shape = target_shape + v.shape[1:]
target_dtype = torch.float32
elif isinstance(v, (tuple, list)):
# if tuple or list, assume per batch
target_shape = (batch_size, len(v))
target_dtype = torch.float32 if isinstance(v[0], float) else torch.int32
elif isinstance(v, torch.Tensor):
target_dtype = v.dtype
target_shape = (batch_size,) + tuple(v.size())
else:
# scalar, assume per batch
target_shape = batch_size,
target_dtype = torch.float32 if isinstance(v, float) else torch.int64
target[k] = torch.zeros(target_shape, dtype=target_dtype)
tensor = torch.zeros((batch_size, *batch[0][0].shape), dtype=torch.uint8).contiguous(
memory_format=memory_format
)
for i in range(batch_size):
tensor[i] += torch.from_numpy(batch[i][0])
for tk, tv in batch[i][1].items():
if isinstance(tv, np.ndarray) and len(tv.shape):
target[tk][i, 0:tv.shape[0]] = torch.from_numpy(tv)
elif isinstance(tv, torch.Tensor):
target[tk][i] = tv
else:
target[tk][i] = torch.tensor(tv, dtype=target[tk].dtype)
return tensor, target
class PrefetchLoader:
def __init__(self,
loader,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD):
self.loader = loader
self.mean = torch.tensor([x * 255 for x in mean]).cuda().view(1, 3, 1, 1)
self.std = torch.tensor([x * 255 for x in std]).cuda().view(1, 3, 1, 1)
def __iter__(self):
stream = torch.cuda.Stream()
first = True
for next_input, next_target in self.loader:
with torch.cuda.stream(stream):
next_input = next_input.cuda(non_blocking=True)
next_input = next_input.float().sub_(self.mean).div_(self.std)
next_target = {k: v.cuda(non_blocking=True) for k, v in next_target.items()}
if not first:
yield input, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
input = next_input
target = next_target
yield input, target
def __len__(self):
return len(self.loader)
@property
def sampler(self):
return self.loader.batch_sampler
class IterationBasedBatchSampler(torch.utils.data.sampler.BatchSampler):
"""
Wraps a BatchSampler, resampling from it until
a specified number of iterations have been sampled
"""
def __init__(self, batch_sampler):
self.batch_sampler = batch_sampler
def __iter__(self):
while True:
for batch in self.batch_sampler:
yield batch
def __len__(self):
return len(self.batch_sampler)
def set_epoch(self, epoch):
if hasattr(self.batch_sampler.sampler, "set_epoch"):
self.batch_sampler.sampler.set_epoch(epoch)
def create_loader(
dataset,
input_size,
batch_size,
is_training=False,
use_prefetcher=True,
interpolation='bilinear',
fill_color='mean',
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
num_workers=1,
distributed=False,
pin_mem=False,
memory_format=torch.contiguous_format
):
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if is_training:
transform = transforms_coco_train(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
fill_color=fill_color,
mean=mean,
std=std)
else:
transform = transforms_coco_eval(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
fill_color=fill_color,
mean=mean,
std=std)
dataset.transform = transform
sampler = None
if distributed:
if is_training:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
# This will add extra duplicate entries to result in equal num
# of samples per-process, will slightly alter validation results
sampler = OrderedDistributedSampler(dataset)
else:
sampler = torch.utils.data.RandomSampler(dataset)
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=False)
if is_training:
batch_sampler = IterationBasedBatchSampler(batch_sampler)
loader = torch.utils.data.DataLoader(
dataset,
shuffle=False,
num_workers=num_workers,
batch_sampler=batch_sampler,
pin_memory=pin_mem,
collate_fn=partial(fast_collate, memory_format) if use_prefetcher else torch.utils.data.dataloader.default_collate,
)
else:
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
sampler=sampler,
pin_memory=pin_mem,
collate_fn=partial(fast_collate, memory_format) if use_prefetcher else torch.utils.data.dataloader.default_collate,
)
if use_prefetcher:
loader = PrefetchLoader(loader, mean=mean, std=std)
return loader
| DeepLearningExamples-master | PyTorch/Detection/Efficientdet/data/loader.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import yaml
import math
import os
from datetime import datetime
import ctypes
import numpy as np
import torch
import torchvision.utils
from effdet.config import get_efficientdet_config
from data import create_loader, CocoDetection
from utils.utils import AverageMeter
from data.loader import IterationBasedBatchSampler
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
def add_bool_arg(parser, name, default=False, help=''): # FIXME move to utils
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--input-size', type=int, default=512, metavar='N',
help='input image size (default: 512)')
parser.add_argument('--prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 0), type=int)
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def test_number_of_iters_and_elements():
for batch_size in [4]:
for drop_last in [False, True]:
dataset = [i for i in range(10)]
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=drop_last
)
iter_sampler = IterationBasedBatchSampler(
batch_sampler
)
iterator = iter(iter_sampler)
print("Len of sampler {} ".format(len(iter_sampler)))
print("=====================================================")
print("Test batch size {} drop last {}".format(batch_size, drop_last))
steps_per_epoch = int( np.ceil(len(dataset) / batch_size) )
i = 0
for epoch in range(3):
for _ in range(steps_per_epoch):
batch = next(iterator)
start = (i % len(batch_sampler)) * batch_size
end = min(start + batch_size, len(dataset))
expected = [x for x in range(start, end)]
print("Epoch {} iteration {} batch {}".format(epoch, i, batch))
i += 1
def main():
args, args_text = _parse_args()
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
model_name = 'efficientdet_d0'
data_config = get_efficientdet_config(model_name)
train_anno_set = 'train2017'
train_annotation_path = os.path.join(args.data, 'annotations', f'instances_{train_anno_set}.json')
train_image_dir = train_anno_set
dataset_train = CocoDetection(os.path.join(args.data, train_image_dir), train_annotation_path, data_config)
print("Length of training dataset {}".format(len(dataset_train)))
loader_train = create_loader(
dataset_train,
input_size=args.input_size,
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
#re_prob=args.reprob, # FIXME add back various augmentations
#re_mode=args.remode,
#re_count=args.recount,
#re_split=args.resplit,
#color_jitter=args.color_jitter,
#auto_augment=args.aa,
interpolation=args.train_interpolation,
#mean=data_config['mean'],
#std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
#collate_fn=collate_fn,
pin_mem=args.pin_mem,
)
print("Iterations per epoch {}".format(math.ceil( len(dataset_train) / ( args.batch_size * args.world_size ))))
data_time_m = AverageMeter()
end = time.time()
if args.local_rank == 0:
print("Starting to test...")
for batch_idx, (input, target) in enumerate(loader_train):
data_time_m.update(time.time() - end)
if args.local_rank == 0 and batch_idx % 20 == 0:
print("batch time till {} is {}".format(batch_idx, data_time_m.avg))
end = time.time()
if __name__ == "__main__":
main()
#### USAGE ####
#
# NUM_PROC=8
# python -m torch.distributed.launch --nproc_per_node=$NUM_PROC data/dataloader_test.py /workspace/object_detection/datasets/coco -b 64 --workers 16
# | DeepLearningExamples-master | PyTorch/Detection/Efficientdet/data/dataloader_test.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
warnings.simplefilter("ignore")
import tensorflow as tf
import horovod.tensorflow as hvd
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
import dllogger as DLLogger
from utils import hvd_utils
from copy import deepcopy
from importlib import import_module
from config.defaults import base_config
from config.defaults import Config
from utils.setup import set_flags
from runtime import Runner
from utils.cmdline_helper import parse_cmdline
def get_module_path(sys_path):
"""[summary]
converts the path to a py module to a format suitable for the import_module function.
Ex: config/model/hparams.py -> config.model.hparams
Args:
sys_path (string): module path in sys format
Returns:
string: new format
"""
no_ext = sys_path.split('.')[0]
return no_ext.replace('/','.')
if __name__== "__main__":
# get command line args
FLAGS = parse_cmdline()
config = Config(FLAGS.__dict__)
# get model hyperparameters from the user-provided model config
model_config = import_module(get_module_path(FLAGS.cfg))
model_config = Config(model_config.config)
#override model hyper parameters by those provided by the user via cmd
model_config.override(FLAGS.mparams)
config.mparams = model_config
# make sure number of classes in the model config is consistent with data loader config
config.num_classes = config.mparams.num_classes
#========== horovod initialization
hvd.init()
#========== set up env variables, tf flags, and seeds
set_flags(config)
#========== set up the loggers and log dir
backends = []
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
# Prepare Model Dir
os.makedirs(config.model_dir, exist_ok=True)
# Setup dlLogger
backends+=[
JSONStreamBackend(verbosity=Verbosity.VERBOSE, filename=config.log_filename),
StdOutBackend(verbosity=Verbosity.DEFAULT)
]
DLLogger.init(backends=backends)
DLLogger.log(data=vars(config), step='PARAMETER')
DLLogger.metadata('avg_exp_per_second_training', {'unit': 'samples/s'})
DLLogger.metadata('avg_exp_per_second_training_per_GPU', {'unit': 'samples/s'})
DLLogger.metadata('avg_exp_per_second_eval', {'unit': 'samples/s'})
DLLogger.metadata('avg_exp_per_second_eval_per_GPU', {'unit': 'samples/s'})
DLLogger.metadata('latency_pct', {'unit': 'ms'})
DLLogger.metadata('latency_90pct', {'unit': 'ms'})
DLLogger.metadata('latency_95pct', {'unit': 'ms'})
DLLogger.metadata('latency_99pct', {'unit': 'ms'})
DLLogger.metadata('eval_loss', {'unit': None})
DLLogger.metadata('eval_accuracy_top_1', {'unit': None})
DLLogger.metadata('eval_accuracy_top_5', {'unit': None})
DLLogger.metadata('training_loss', {'unit': None})
DLLogger.metadata('training_accuracy_top_1', {'unit': None})
DLLogger.metadata('training_accuracy_top_5', {'unit': None})
#========== initialize the runner
runner = Runner(config, DLLogger)
#========== determine the operation mode of the runner (tr,eval,predict)
if config.mode in ["train", "train_and_eval", "training_benchmark"]:
runner.train()
if config.mode in ['eval', 'evaluate', 'inference_benchmark']:
if config.mode == 'inference_benchmark' and hvd_utils.is_using_hvd():
raise NotImplementedError("Only single GPU inference is implemented.")
elif hvd_utils.is_using_hvd():
raise NotImplementedError("Only single GPU evaluation is implemented.")
else:
runner.evaluate()
if config.mode == 'predict':
if config.predict_img_dir is None:
raise ValueError("No data to predict on.")
if not os.path.isdir(config.predict_img_dir):
raise ValueError("Provide directory with images to infer!")
if hvd_utils.is_using_hvd():
raise NotImplementedError("Only single GPU inference is implemented.")
elif not hvd_utils.is_using_hvd() or hvd.rank() == 0:
runner.predict(config.predict_img_dir, config.predict_ckpt)
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/main.py |
DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/config/__init__.py |
|
# Copyright 2021 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hparams for model architecture and trainer."""
import ast
import collections
import copy
from typing import Any, Dict, Text
import tensorflow as tf
import yaml
def eval_str_fn(val):
if '|' in val:
return [eval_str_fn(v) for v in val.split('|')]
if val in {'true', 'false'}:
return val == 'true'
try:
return ast.literal_eval(val)
except (ValueError, SyntaxError):
return val
# pylint: disable=protected-access
class Config(dict):
"""A config utility class."""
def __init__(self, *args, **kwargs):
super().__init__()
input_config_dict = dict(*args, **kwargs)
self.update(input_config_dict)
def __len__(self):
return len(self.__dict__)
def __setattr__(self, k, v):
if isinstance(v, dict) and not isinstance(v, Config):
self.__dict__[k] = Config(v)
else:
self.__dict__[k] = copy.deepcopy(v)
def __getattr__(self, k):
return self.__dict__[k]
def __setitem__(self, k, v):
self.__setattr__(k, v)
def __getitem__(self, k):
return self.__dict__[k]
def __iter__(self):
for key in self.__dict__:
yield key
def items(self):
for key, value in self.__dict__.items():
yield key, value
def __repr__(self):
return repr(self.as_dict())
def __getstate__(self):
return self.__dict__
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
result[k] = v
return result
def __str__(self):
try:
return yaml.dump(self.as_dict(), indent=4)
except TypeError:
return str(self.as_dict())
def _update(self, config_dict, allow_new_keys=True):
"""Recursively update internal members."""
if not config_dict:
return
for k, v in config_dict.items():
if k not in self.__dict__:
if allow_new_keys:
self.__setattr__(k, v)
else:
raise KeyError('Key `{}` does not exist for overriding. '.format(k))
else:
if isinstance(self.__dict__[k], Config) and isinstance(v, dict):
self.__dict__[k]._update(v, allow_new_keys)
elif isinstance(self.__dict__[k], Config) and isinstance(v, Config):
self.__dict__[k]._update(v.as_dict(), allow_new_keys)
else:
self.__setattr__(k, v)
def get(self, k, default_value=None):
return self.__dict__.get(k, default_value)
def update(self, config_dict):
"""Update members while allowing new keys."""
self._update(config_dict, allow_new_keys=True)
def keys(self):
return self.__dict__.keys()
def override(self, config_dict_or_str, allow_new_keys=False):
"""Update members while disallowing new keys."""
if not config_dict_or_str:
return
if isinstance(config_dict_or_str, str):
if '=' in config_dict_or_str:
config_dict = self.parse_from_str(config_dict_or_str)
elif config_dict_or_str.endswith('.yaml'):
config_dict = self.parse_from_yaml(config_dict_or_str)
else:
raise ValueError(
'Invalid string {}, must end with .yaml or contains "=".'.format(
config_dict_or_str))
elif isinstance(config_dict_or_str, dict):
config_dict = config_dict_or_str
else:
raise ValueError('Unknown value type: {}'.format(config_dict_or_str))
self._update(config_dict, allow_new_keys)
def parse_from_yaml(self, yaml_file_path: Text) -> Dict[Any, Any]:
"""Parses a yaml file and returns a dictionary."""
with tf.io.gfile.GFile(yaml_file_path, 'r') as f:
config_dict = yaml.load(f, Loader=yaml.FullLoader)
return config_dict
def save_to_yaml(self, yaml_file_path):
"""Write a dictionary into a yaml file."""
with tf.io.gfile.GFile(yaml_file_path, 'w') as f:
yaml.dump(self.as_dict(), f, default_flow_style=False)
def parse_from_str(self, config_str: Text) -> Dict[Any, Any]:
"""Parse a string like 'x.y=1,x.z=2' to nested dict {x: {y: 1, z: 2}}."""
if not config_str:
return {}
config_dict = {}
try:
for kv_pair in config_str.split(','):
if not kv_pair: # skip empty string
continue
key_str, value_str = kv_pair.split('=')
key_str = key_str.strip()
def add_kv_recursive(k, v):
"""Recursively parse x.y.z=tt to {x: {y: {z: tt}}}."""
if '.' not in k:
return {k: eval_str_fn(v)}
pos = k.index('.')
return {k[:pos]: add_kv_recursive(k[pos + 1:], v)}
def merge_dict_recursive(target, src):
"""Recursively merge two nested dictionary."""
for k in src.keys():
if ((k in target and isinstance(target[k], dict) and
isinstance(src[k], collections.abc.Mapping))):
merge_dict_recursive(target[k], src[k])
else:
target[k] = src[k]
merge_dict_recursive(config_dict, add_kv_recursive(key_str, value_str))
return config_dict
except ValueError:
raise ValueError('Invalid config_str: {}'.format(config_str))
def as_dict(self):
"""Returns a dict representation."""
config_dict = {}
for k, v in self.__dict__.items():
if isinstance(v, Config):
config_dict[k] = v.as_dict()
elif isinstance(v, (list, tuple)):
config_dict[k] = [
i.as_dict() if isinstance(i, Config) else copy.deepcopy(i)
for i in v
]
else:
config_dict[k] = copy.deepcopy(v)
return config_dict
# pylint: enable=protected-access
registry_map = {}
def register(cls, prefix='effnet:'):
"""Register a function, mainly for config here."""
registry_map[prefix + cls.__name__.lower()] = cls
return cls
def lookup(name, prefix='effnet:') -> Any:
name = prefix + name.lower()
if name not in registry_map:
raise ValueError(f'{name} not registered: {registry_map.keys()}')
return registry_map[name]
# needed?
# --params_override
# --arch or model_name
base_config = Config(
# model related params.
model=Config(), # must be provided in full via model cfg files
# train related params.
train=Config(
img_size=224,
max_epochs=300,
steps_per_epoch=None,
batch_size=32, # renamed from train_batch_size
use_dali=0,
# optimizer
optimizer='rmsprop',
momentum=0.9, # rmsprop, momentum opt
beta_1=0.0, # for adam.adamw
beta_2=0.0, # for adam,adamw
nesterov=0, # for sgd, momentum opt
epsilon=.001, # for adamw, adam, rmsprop
decay=0.9, # for rmsprop
# While the original implementation used a weight decay of 1e-5,
# tf.nn.l2_loss divides it by 2, so we halve this to compensate in Keras
weight_decay=5e-6, # for adamw or can be used in learnable layers as L2 reg.
label_smoothing=0.1,
# The optimizer iteratively updates two sets of weights: the search directions for weights
# are chosen by the inner optimizer, while the "slow weights" are updated each k steps
# based on the directions of the "fast weights" and the two sets of weights are
# synchronized. This method improves the learning stability and lowers the variance of
# its inner optimizer.
lookahead=0, # binary
# Empirically it has been found that using the moving average of the trained parameters
# of a deep network is better than using its trained parameters directly. This optimizer
# allows you to compute this moving average and swap the variables at save time so that
# any code outside of the training loop will use by default the average values instead
# of the original ones.
moving_average_decay=0.0,
# model evaluation during training can be done using the original weights
# or using EMA weights. The latter takes place if moving_average_decay > 0 and intratrain_eval_using_ema is True)
intratrain_eval_using_ema=True,
# to simulate a large batch size
grad_accum_steps=1,
# grad clipping is used in the custom train_step, which is called when grad_accum_steps > 1
grad_clip_norm=0,
# to optimize grad reducing across all workers
hvd_fp16_compression = True,
create_SavedModel=False,
#lr schedule
lr_decay='exponential',
lr_init=0.008,
lr_decay_epochs=2.4,
lr_decay_rate=0.97,
lr_warmup_epochs=5,
# metrics
metrics = ['accuracy', 'top_5'], # used in tr and eval
# load and save ckpt
resume_checkpoint=1, # binary
save_checkpoint_freq=5,
# progressive training (active when n_stages>1)
n_stages=1, # progressive tr
base_img_size=128,
base_mixup=0,
base_cutmix=0,
base_randaug_mag=5,
#callbacks
enable_checkpoint_and_export=1, # binary
enable_tensorboard=0, # binary
tb_write_model_weights=0, # tb: tensorboard, binary
),
eval=Config(
skip_eval=0, # binary
num_epochs_between_eval=1,
use_dali=0, # binary, renamed from use_dali_eval
batch_size=100, # for accurate eval, it should divide the number of validation samples
img_size=224,
export=0
),
predict=Config(
ckpt=None, # renamed from inference_checkpoint
img_dir='/infer_data/', # renamed from to_predict
batch_size=32, # renamed from predict_batch_size
img_size=224,
benchmark=0,
),
# data related params.
data=Config(
dataset='ImageNet',
augmenter_name='autoaugment',
#Rand-augment params
raug_num_layers=None,
raug_magnitude=None,
cutout_const=None,
mixup_alpha=0.,
cutmix_alpha=0.,
defer_img_mixing=True,
translate_const=None,
#Auto-augment params
autoaugmentation_name=None,
# used in dali
index_file='',
#dataset and split
data_dir='/data/',
num_classes=1000, # must match the one in model config
train_num_examples=1281167,
eval_num_examples=50000,
# image normalization
mean_subtract_in_dpipe=False,
standardize_in_dpipe=False,
# Set to False for 1-GPU training
map_parallelization=True
),
runtime=Config(
use_amp=1, # binary
log_steps=100,
mode='tran_and_eval', #OK
time_history=1, # binary
use_xla=1, # binary
intraop_threads='',
interop_threads='',
model_dir='/results/', # ckpts
log_filename='log.json',
display_every=10,
seed=None,
data_format='channels_first',
run_eagerly=0, # binary
memory_limit=None, ##set max memory that can be allocated by TF to avoid hanging
)) | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/config/defaults.py |
import tensorflow as tf
# NOTE: this confile file can further be overridden by user-defined params provided at the command line
config = dict(
path_to_impl='model.efficientnet_model_v2',
#data-related model params
num_classes=1000, # must be the same as data.num_classes
input_channels= 3,
rescale_input= 1, # binary
mean_rgb=(0.485 * 255, 0.456 * 255, 0.406 * 255), # used when rescale_input=True
std_rgb=(0.229 * 255, 0.224 * 255, 0.225 * 255), # used when rescale_input=True
dtype= tf.float32, #used for input image normalization/casting, # tf.float32, tf.bfloat16, tf.float16, tf.float32, tf.bfloat16,
# GUIDE
# width depth resolution dropout
# efficientnet_v2-s 1.0 1.0 300 0.2
width_coefficient= 1.0,
depth_coefficient= 1.0,
dropout_rate= 0.2, # used in the cls head
# image resolution must be set in tr/eval/predict configs below
drop_connect_rate= 0.2, # used in residual for stochastic depth
conv_dropout= None, # used in pre-SE, but never used
stem_base_filters= 24, # effnetv2
top_base_filters= 1280,
activation= 'swish', # same as silu
depth_divisor= 8,
min_depth=8,
# use_se= True, # No longer global: blocks may or may not have it.
batch_norm= 'syncbn',
bn_momentum= 0.99, # google uses 0.9
bn_epsilon= 1e-3,
weight_init= 'fan_out', # google uses untruncated
# NEW
# gn_groups=8, # group normalization
# local_pooling=0, # as opposed global pooling for SE
# headbias=None, # bias for cls head
blocks= (
# (input_filters, output_filters, kernel_size, num_repeat,expand_ratio, strides, se_ratio)
# pylint: disable=bad-whitespace
dict(input_filters=24, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=1, strides=(1, 1), se_ratio=None,id_skip=True,fused_conv=True,conv_type=None),
dict(input_filters=24, output_filters=48, kernel_size=3, num_repeat=4, expand_ratio=4, strides=(2, 2), se_ratio=None,id_skip=True,fused_conv=True,conv_type=None),
dict(input_filters=48, output_filters=64, kernel_size=3, num_repeat=4, expand_ratio=4, strides=(2, 2), se_ratio=None,id_skip=True,fused_conv=True,conv_type=None),
dict(input_filters=64, output_filters=128, kernel_size=3, num_repeat=6, expand_ratio=4, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=128, output_filters=160, kernel_size=3, num_repeat=9, expand_ratio=6, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=160, output_filters=256, kernel_size=3, num_repeat=15, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
# pylint: enable=bad-whitespace
),
) | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/config/efficientnet_v2/s_cfg.py |
DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/config/efficientnet_v2/__init__.py |
|
import tensorflow as tf
from config.defaults import Config
# NOTE: this confile file can further be overridden by user-defined params provided at the command line
config = dict(
path_to_impl='model.efficientnet_model_v1',
#data-related model params
num_classes=1000, # must be the same as data.num_classes
input_channels= 3,
rescale_input= 1, # binary,
mean_rgb=(0.485 * 255, 0.456 * 255, 0.406 * 255), # used when rescale_input=True
std_rgb=(0.229 * 255, 0.224 * 255, 0.225 * 255), # used when rescale_input=True
dtype= tf.float32, #used for input image normalization/casting, # tf.float32, tf.bfloat16, tf.float16, tf.float32, tf.bfloat16,
# GUIDE
# width depth resolution dropout
# efficientnet_v1-b0 1.0 1.0 224 0.2
# 'efficientnet_v1-b1 1.0 1.1 240 0.2
# 'efficientnet_v1-b2 1.1 1.2 260 0.3
# 'efficientnet_v1-b3 1.2 1.4 300 0.3
# 'efficientnet_v1-b4 1.4 1.8 380 0.4
# 'efficientnet_v1-b5 1.6 2.2 456 0.4
# 'efficientnet_v1-b6 1.8 2.6 528 0.5
# 'efficientnet_v1-b7 2.0 3.1 600 0.5
# 'efficientnet_v1-b8 2.2 3.6 672 0.5
# 'efficientnet_v1-l2 4.3 5.3 800 0.5
width_coefficient= 1.0,
depth_coefficient= 1.0,
dropout_rate= 0.2,
# image resolution must be set in tr/eval/predict configs below
drop_connect_rate= 0.2,
stem_base_filters= 32,
top_base_filters= 1280,
activation= 'swish',
depth_divisor= 8,
min_depth= None,
use_se= 1, # binary
batch_norm= 'syncbn',
bn_momentum= 0.99,
bn_epsilon= 1e-3,
weight_init= 'fan_out',
blocks= (
# (input_filters, output_filters, kernel_size, num_repeat,expand_ratio, strides, se_ratio)
# pylint: disable=bad-whitespace
dict(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
# pylint: enable=bad-whitespace
),
)
# train_config = dict(lr_decay='cosine',
#
# max_epochs=500,
# img_size=224,
# batch_size=256,
# save_checkpoint_freq=5,
# lr_init=0.005,
# weight_decay=5e-6,
# epsilon=0.001,
# resume_checkpoint=1,
# enable_tensorboard=0
# )
#
# eval_config = dict(img_size=224,
# batch_size=256)
#
# data_config = dict(
# data_dir='/data/',
# augmenter_name='autoaugment',
# mixup_alpha=0.0,
#
#
# )
# runtime_config = dict(mode='train_and_eval',
# model_dir='./output/',
# use_amp=1,
# use_xla=1,
# log_steps=100
# )
#
# config = dict(model=model_config,
# train=train_config,
# eval=eval_config,
# data=data_config,
# runtime=runtime_config,
# )
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/config/efficientnet_v1/b0_cfg.py |
DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/config/efficientnet_v1/__init__.py |
|
import tensorflow as tf
from config.defaults import Config
# NOTE: this confile file can further be overridden by user-defined params provided at the command line
config = dict(
path_to_impl='model.efficientnet_model_v1',
#data-related model params
num_classes=1000, # must be the same as data.num_classes
input_channels= 3,
rescale_input= 1, # binary,
mean_rgb=(0.485 * 255, 0.456 * 255, 0.406 * 255), # used when rescale_input=True
std_rgb=(0.229 * 255, 0.224 * 255, 0.225 * 255), # used when rescale_input=True
dtype= tf.float32, #used for input image normalization/casting, # tf.float32, tf.bfloat16, tf.float16, tf.float32, tf.bfloat16,
# GUIDE
# width depth resolution dropout
# efficientnet_v1-b0 1.0 1.0 224 0.2
# 'efficientnet_v1-b1 1.0 1.1 240 0.2
# 'efficientnet_v1-b2 1.1 1.2 260 0.3
# 'efficientnet_v1-b3 1.2 1.4 300 0.3
# 'efficientnet_v1-b4 1.4 1.8 380 0.4
# 'efficientnet_v1-b5 1.6 2.2 456 0.4
# 'efficientnet_v1-b6 1.8 2.6 528 0.5
# 'efficientnet_v1-b7 2.0 3.1 600 0.5
# 'efficientnet_v1-b8 2.2 3.6 672 0.5
# 'efficientnet_v1-l2 4.3 5.3 800 0.5
width_coefficient= 1.4,
depth_coefficient= 1.8,
dropout_rate= 0.4,
# image resolution must be set in tr/eval/predict configs below
drop_connect_rate= 0.2,
stem_base_filters= 32,
top_base_filters= 1280,
activation= 'swish',
depth_divisor= 8,
min_depth= None,
use_se= 1, # binary
batch_norm= 'syncbn',
bn_momentum= 0.99,
bn_epsilon= 1e-3,
weight_init= 'fan_out',
blocks= (
# (input_filters, output_filters, kernel_size, num_repeat,expand_ratio, strides, se_ratio)
# pylint: disable=bad-whitespace
dict(input_filters=32, output_filters=16, kernel_size=3, num_repeat=1, expand_ratio=1, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=16, output_filters=24, kernel_size=3, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=24, output_filters=40, kernel_size=5, num_repeat=2, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=40, output_filters=80, kernel_size=3, num_repeat=3, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=80, output_filters=112, kernel_size=5, num_repeat=3, expand_ratio=6, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=112, output_filters=192, kernel_size=5, num_repeat=4, expand_ratio=6, strides=(2, 2), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
dict(input_filters=192, output_filters=320, kernel_size=3, num_repeat=1, expand_ratio=6, strides=(1, 1), se_ratio=0.25,id_skip=True,fused_conv=False,conv_type='depthwise'),
# pylint: enable=bad-whitespace
),
)
# train_config = dict(lr_decay='cosine',
#
# max_epochs=500,
# img_size=380,
# batch_size=256,
# save_checkpoint_freq=5,
# lr_init=0.005,
# weight_decay=5e-6,
# epsilon=0.001,
# resume_checkpoint=1,
# enable_tensorboard=0
# )
#
# eval_config = dict(img_size=380,
# batch_size=256)
#
# data_config = dict(
# data_dir='/data/',
# augmenter_name='autoaugment',
# mixup_alpha=0.0,
#
#
# )
# runtime_config = dict(mode='train_and_eval',
# model_dir='./output/',
# use_amp=1,
# use_xla=1,
# log_steps=100
# )
#
# config = dict(model=model_config,
# train=train_config,
# eval=eval_config,
# data=data_config,
# runtime=runtime_config,
# )
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/config/efficientnet_v1/b4_cfg.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import multiprocessing
import warnings
from tensorflow.python.ops.gen_array_ops import deep_copy
import yaml
import time
import tensorflow as tf
import numpy as np
import horovod.tensorflow.keras as hvd
import tensorflow_addons as tfa
from tensorflow.python.platform import gfile
from importlib import import_module
from utils import hvd_utils, optimizer_factory
from utils import callbacks as custom_callbacks
from runtime.runner_utils import get_optimizer_params
from runtime.runner_utils import get_metrics
from runtime.runner_utils import get_learning_rate_params
from runtime.runner_utils import get_dataset_builders
from runtime.runner_utils import build_stats
from runtime.runner_utils import preprocess_image_files
from utils.tf_utils import get_num_flops
from utils.tf_utils import get_num_params
from runtime.runner_utils import train_step
__all__ = [
'Runner',
]
class Runner(object):
def __init__(self, flags, logger):
self.params = flags
self.logger = logger
if hvd.rank() == 0:
self.serialize_config()
self.one_hot = self.params.label_smoothing and self.params.label_smoothing > 0
self.initialize()
self.metrics = self.params.metrics
# =================================================
# Define Model
# =================================================
Model = import_module(self.params.mparams.path_to_impl).Model
# we use custom train_step if gradient accumulation is requested
if self.params.grad_accum_steps > 1:
Model.train_step = train_step # monkey patching
self.model = Model(self.params)
try:
# complexity analysis
img_size = 171
params = get_num_params(self.model)
flops = get_num_flops(self.model,(img_size, img_size, 3))
print('''
# ======================================================#
# #params: {:.4f}M #Flops: {:.4f}G [{}x{}x3]
# ======================================================#
'''.format(params,flops, img_size,img_size))
except:
print('''
# ======================================================#
# Skip complexity analysis
# ======================================================#
''')
def initialize(self):
"""Initializes backend related initializations."""
if self.params.data_format:
tf.keras.backend.set_image_data_format(self.params.data_format)
if self.params.run_eagerly:
# Enable eager execution to allow step-by-step debugging
tf.config.experimental_run_functions_eagerly(True)
def load_model_weights(self, model_dir_or_fn, expect_partial=False):
"""
Resumes from the latest checkpoint, if possible.
Loads the model weights and optimizer settings from a checkpoint.
This function should be used in case of preemption recovery.
Args:
model_dir: The directory where model weights were saved.
Returns:
The iteration of the latest checkpoint, or 0 if not restoring.
"""
if gfile.IsDirectory(model_dir_or_fn):
latest_checkpoint = tf.train.latest_checkpoint(model_dir_or_fn)
if not latest_checkpoint:
return 0
else:
latest_checkpoint = model_dir_or_fn
if expect_partial:
self.model.load_weights(latest_checkpoint).expect_partial()
else:
self.model.load_weights(latest_checkpoint)
if self.model.optimizer:
return int(self.model.optimizer.iterations)
else:
# optimizer has not been compiled (predict mode)
return 0
def serialize_config(self):
"""Serializes and saves the experiment config."""
save_dir=self.params.log_dir if self.params.log_dir is not None else self.params.model_dir
mode=self.params.mode
if mode in ["train", "train_and_eval", "training_benchmark"]:
params_save_path = os.path.join(save_dir, 'params.yaml')
else:
# to avoid overwriting the training config file that may exist in the same dir
params_save_path = os.path.join(save_dir, 'eval_params.yaml')
self.params.save_to_yaml(params_save_path)
def train(self):
# get global batch size, #epochs, and equivalent #steps
global_batch_size_tr = self.params.train_batch_size * hvd.size() * self.params.grad_accum_steps
train_epochs = self.params.max_epochs
assert train_epochs >= self.params.n_stages, "each training stage requires at least 1 training epoch"
train_steps = self.params.steps_per_epoch if self.params.steps_per_epoch else self.params.train_num_examples // global_batch_size_tr * self.params.grad_accum_steps
train_iterations = train_steps // self.params.grad_accum_steps
global_batch_size_eval = self.params.eval_batch_size * hvd.size()
validation_steps = self.params.eval_num_examples //global_batch_size_eval if "eval" in self.params.mode else None
# set up lr schedule
learning_rate = optimizer_factory.build_learning_rate(
params=get_learning_rate_params(name=self.params.lr_decay,
initial_lr=self.params.lr_init,
decay_epochs=self.params.lr_decay_epochs,
decay_rate=self.params.lr_decay_rate,
warmup_epochs=self.params.lr_warmup_epochs),
batch_size=global_batch_size_tr, # updates are iteration based not batch-index based
train_steps=train_iterations,
max_epochs=train_epochs)
# set up optimizer
optimizer = optimizer_factory.build_optimizer(
optimizer_name=self.params.optimizer,
base_learning_rate=learning_rate,
params=get_optimizer_params(name=self.params.optimizer,
decay=self.params.decay,
epsilon=self.params.opt_epsilon,
momentum=self.params.momentum,
moving_average_decay=self.params.moving_average_decay,
nesterov=self.params.nesterov,
beta_1=self.params.beta_1,
beta_2=self.params.beta_2)
)
if self.params.grad_accum_steps > 1:
# we use custom train_step when self.params.grad_accum_steps > 1
if self.params.use_amp:
# in which case we must manually wrap the optimizer with LossScaleOptimizer
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(optimizer, "dynamic")
# Horovod allreduce across devices takes place in the custom train_step
else:
# Without custom train_step, AMP optimizer is automatically taken care of.
# Without custom train_step, we need to wrap the optimizer to enable Horovod
optimizer = hvd.DistributedOptimizer(optimizer, compression=hvd.Compression.fp16 if self.params.hvd_fp16_compression else hvd.Compression.none)
# define metrics depending on target labels (1-hot vs. sparse)
metrics_map = get_metrics(self.one_hot)
metrics = [metrics_map[metric] for metric in self.metrics]
# define loss functions depending on target labels (1-hot vs. sparse)
if self.one_hot:
loss_obj = tf.keras.losses.CategoricalCrossentropy(
label_smoothing=self.params.label_smoothing)
else:
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()
# model compilation
self.model.compile(optimizer=optimizer,
loss=loss_obj,
metrics=metrics,
run_eagerly=self.params.run_eagerly,
)
resumed_iterations = resumed_epoch = 0
if self.params.resume_checkpoint:
print('# ==================MODEL LOADING BEGINS=====================#')
resumed_iterations = self.load_model_weights(self.params.model_dir)
resumed_epoch = resumed_iterations // train_iterations
if resumed_iterations > 0:
print('''
# =======================================
ckpt at iteration {} loaded!
# ======================================='''.format(resumed_iterations))
#Define Callbacks (TODO)
callbacks=[hvd.callbacks.BroadcastGlobalVariablesCallback(0)]
callbacks += custom_callbacks.get_callbacks(
model_checkpoint=self.params.enable_checkpoint_saving,
include_tensorboard=self.params.enable_tensorboard,
time_history=self.params.time_history,
track_lr=True,
write_model_weights=self.params.tb_write_model_weights,
initial_step=resumed_epoch * train_steps,
batch_size=global_batch_size_tr / self.params.grad_accum_steps, # throughput calc: 1 batch is 1 step
log_steps=self.params.log_steps,
model_dir=self.params.model_dir,
save_checkpoint_freq=train_steps * self.params.save_checkpoint_freq, # conditioned on batch index
ema_decay=self.params.moving_average_decay,
intratrain_eval_using_ema=self.params.intratrain_eval_using_ema,
logger=self.logger)
n_stages = self.params.n_stages
if not n_stages or n_stages == 1:
# =================================================
# Define Datasets
# =================================================
builders = get_dataset_builders(self.params, self.one_hot)
datasets = [builder.build() if builder else None for builder in builders]
self.train_dataset, self.validation_dataset = datasets
self.train_builder, self.validation_builder = builders
# set model validation args
if "eval" not in self.params.mode:
validation_kwargs = {}
else:
validation_kwargs = {
'validation_data': self.validation_dataset,
'validation_steps': validation_steps,
'validation_freq': self.params.num_epochs_between_eval,
}
history = self.model.fit(
self.train_dataset,
epochs=train_epochs,
steps_per_epoch=train_steps,
initial_epoch=resumed_epoch,
callbacks=callbacks,
verbose=2,
**validation_kwargs)
else: # progressive training
# determine regularization schedule
base_img_size=self.params.base_img_size
base_mixup=self.params.base_mixup
base_cutmix=self.params.base_cutmix
base_randaug_mag=self.params.base_randaug_mag
ram_list = np.linspace(base_randaug_mag, self.params.raug_magnitude, n_stages)
mixup_list = np.linspace(base_mixup, self.params.mixup_alpha, n_stages)
cutmix_list = np.linspace(base_cutmix, self.params.cutmix_alpha, n_stages)
target_img_size = self.params.train_img_size
epochs_per_stage = train_epochs // n_stages
resumed_stage = min(resumed_epoch // epochs_per_stage, n_stages-1)
for stage in range(resumed_stage, n_stages):
epoch_st = stage * epochs_per_stage
epoch_end = (epoch_st + epochs_per_stage) if stage < n_stages-1 else train_epochs
epoch_curr = epoch_st if epoch_st >= resumed_epoch else resumed_epoch
ratio = float(stage + 1) / float(n_stages)
image_size = int(base_img_size + (target_img_size - base_img_size) * ratio)
# reassign new param vals
self.params.raug_magnitude = ram_list[stage]
self.params.mixup_alpha = mixup_list[stage]
self.params.cutmix_alpha = cutmix_list[stage]
self.params.train_img_size = image_size
# =================================================
# Define Datasets
# =================================================
builders = get_dataset_builders(self.params, self.one_hot)
datasets = [builder.build() if builder else None for builder in builders]
self.train_dataset, self.validation_dataset = datasets
self.train_builder, self.validation_builder = builders
# set model validation args
if "eval" not in self.params.mode:
validation_kwargs = {}
else:
validation_kwargs = {
'validation_data': self.validation_dataset,
'validation_steps': validation_steps,
'validation_freq': self.params.num_epochs_between_eval,
}
print('''
# ===============================================
Training stage: {}
Epochs: {}-{}: starting at {}
batch size: {}
grad accum steps: {}
image size: {}
cutmix_alpha: {}
mixup_alpha:{}
raug_magnitude: {}
# ==============================================='''.format(stage,
epoch_st,
epoch_end,
epoch_curr,
self.params.train_batch_size,
self.params.grad_accum_steps,
self.params.train_img_size,
self.params.cutmix_alpha,
self.params.mixup_alpha,
self.params.raug_magnitude,
))
history = self.model.fit(
self.train_dataset,
epochs=epoch_end,
steps_per_epoch=train_steps,
initial_epoch=epoch_curr,
callbacks=callbacks,
verbose=2,
**validation_kwargs)
# we perform final evaluation using 1 GPU (hvd_size=1)
builders = get_dataset_builders(self.params, self.one_hot, hvd_size=1)
datasets = [builder.build() if builder else None for builder in builders]
_, self.validation_dataset = datasets
_, self.validation_builder = builders
validation_output = None
eval_callbacks = []
if not self.params.skip_eval and self.validation_builder is not None:
eval_callbacks.append(custom_callbacks.EvalTimeHistory(batch_size=self.params.eval_batch_size, logger=self.logger))
validation_output = self.model.evaluate(
self.validation_dataset, callbacks=eval_callbacks, verbose=2)
# stats are printed regardless of whether eval is requested or not
build_stats(history, validation_output, callbacks, eval_callbacks, self.logger, comment="eval using original weights")
else:
build_stats(history, validation_output, callbacks, eval_callbacks, self.logger, comment="eval not requested")
if self.params.moving_average_decay > 0:
ema_validation_output = None
eval_callbacks = []
if not self.params.skip_eval and self.validation_builder is not None:
eval_callbacks.append(custom_callbacks.EvalTimeHistory(batch_size=self.params.eval_batch_size, logger=self.logger))
eval_callbacks.append(custom_callbacks.MovingAverageCallback(intratrain_eval_using_ema=True))
ema_validation_output = self.model.evaluate(
self.validation_dataset, callbacks=eval_callbacks, verbose=2)
# we print stats again if eval using EMA weights requested
build_stats(history, ema_validation_output, callbacks, eval_callbacks, self.logger, comment="eval using EMA weights")
if hvd.rank() == 0 and self.params.export_SavedModel:
if not self.params.skip_eval and self.validation_builder is not None:
# with the availability of eval stats and EMA weights, we choose the better weights for saving
if self.params.moving_average_decay > 0 and float(ema_validation_output[1]) > float(validation_output[1]):
self.ema_opt = optimizer_factory.fetch_optimizer(self.model, optimizer_factory.MovingAverage)
self.ema_opt.swap_weights()
self.model.save(self.params.model_dir + '/savedmodel', include_optimizer=True, save_format='tf')
def evaluate(self):
metrics_map = get_metrics(self.one_hot)
metrics = [metrics_map[metric] for metric in self.metrics]
if self.one_hot:
loss_obj = tf.keras.losses.CategoricalCrossentropy(
label_smoothing=self.params.label_smoothing)
else:
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()
# set up optimizer
optimizer = optimizer_factory.build_optimizer(
optimizer_name=self.params.optimizer,
base_learning_rate=0.1,
params=get_optimizer_params(name=self.params.optimizer,
decay=self.params.decay,
epsilon=self.params.opt_epsilon,
momentum=self.params.momentum,
moving_average_decay=self.params.moving_average_decay,
nesterov=self.params.nesterov,
beta_1=self.params.beta_1,
beta_2=self.params.beta_2)
)
self.model.compile(optimizer=optimizer,
loss=loss_obj,
metrics=metrics,
run_eagerly=self.params.run_eagerly)
if self.params.weights_format == 'saved_model':
self.model = tf.keras.models.load_model(self.params.model_dir + '/savedmodel', custom_objects = {"HvdMovingAverage":optimizer_factory.HvdMovingAverage})
#self.model.set_weights(loaded_model.get_weights())
print('''
# =======================================
Saved_model loaded successfully!
# =======================================''')
else:
# we allow for partial loading
resumed_step = self.load_model_weights(self.params.model_dir, expect_partial=True)
if resumed_step > 0:
print('''
# =======================================
ckpt at iteration {} loaded!
# ======================================='''.format(resumed_step))
# Ckpt format contains both original weights and EMA weights. However, saved_model format only stores the better performing
# weights between the original and EMA weights. As such, saved_model format doesn't allow for evaluation using EMA weights,
# because we simply don't know which weights have ended up being saved in this format.
if self.params.moving_average_decay > 0 and self.params.weights_format != 'saved_model':
# =================================================
# Define Datasets
# =================================================
builders = get_dataset_builders(self.params, self.one_hot)
datasets = [builder.build() if builder else None for builder in builders]
_, self.validation_dataset = datasets
_, self.validation_builder = builders
eval_callbacks = []
if not self.params.skip_eval and self.validation_builder is not None:
eval_callbacks.append(custom_callbacks.EvalTimeHistory(batch_size=self.params.eval_batch_size, logger=self.logger))
eval_callbacks.append(custom_callbacks.MovingAverageCallback(intratrain_eval_using_ema=True))
ema_worker_validation_output = self.model.evaluate(
self.validation_dataset, callbacks=eval_callbacks, verbose=1)
build_stats(None, ema_worker_validation_output, None, eval_callbacks, self.logger, comment="eval using EMA weights")
for round_num in range(self.params.n_repeat_eval):
# =================================================
# Define Datasets
# =================================================
builders = get_dataset_builders(self.params, self.one_hot)
datasets = [builder.build() if builder else None for builder in builders]
_, self.validation_dataset = datasets
_, self.validation_builder = builders
eval_callbacks = []
if not self.params.skip_eval and self.validation_builder is not None:
eval_callbacks.append(custom_callbacks.EvalTimeHistory(batch_size=self.params.eval_batch_size, logger=self.logger))
worker_validation_output = self.model.evaluate(
self.validation_dataset, callbacks=eval_callbacks, verbose=1)
build_stats(None, worker_validation_output, None, eval_callbacks, self.logger, comment="eval using original weights: Round {} ".format(round_num))
if self.params.export_SavedModel and self.params.weights_format != 'saved_model':
if self.params.moving_average_decay > 0 and float(ema_worker_validation_output[1]) > float(worker_validation_output[1]):
self.ema_opt = optimizer_factory.fetch_optimizer(self.model, optimizer_factory.MovingAverage)
self.ema_opt.swap_weights()
self.model.save(self.params.model_dir + '/savedmodel' , include_optimizer=True, save_format='tf')
def predict(self, img_dir, checkpoint_path=None, print_results=True):
# verify checkpoint_name validity
# if not, we use rnd weights
# if so, load the model conditioned on the format
# load the weights if ckpt exists
if checkpoint_path and os.path.exists(checkpoint_path):
if self.params.weights_format == 'saved_model':
loaded_model = tf.keras.models.load_model(checkpoint_path, custom_objects = {"HvdMovingAverage":optimizer_factory.HvdMovingAverage})
self.model.set_weights(loaded_model.get_weights())
elif self.params.weights_format == 'ckpt':
self.load_model_weights(checkpoint_path, expect_partial=True)
else:
print('***ckpt not found! predicting using random weights...')
try:
tf.keras.backend.set_learning_phase(0)
dtype = self.params.mparams.dtype
images = preprocess_image_files(img_dir, self.params.predict_img_size, self.params.predict_batch_size, dtype)
nb_samples = len(images)
file_names = images.filenames
num_files = len(file_names)
REPEAT=50 if self.params.benchmark else 1
print_results = not self.params.benchmark
# start_time = time.time()
# inference_results = self.model.predict(images, verbose=1, steps=nb_samples)
# total_time = time.time() - start_time
# score = tf.nn.softmax(inference_results, axis=1)
num_files = num_files * REPEAT
batch_times = []
for i in range(nb_samples*REPEAT):
start_time = time.time()
image = images.next()
batch_result = np.asarray(self.model(image),dtype='float32')
batch_times.append(time.time() - start_time)
if not i:
inference_results = batch_result
else:
inference_results = np.vstack((inference_results,batch_result))
total_time = np.sum(batch_times)
score = tf.nn.softmax(tf.convert_to_tensor(inference_results, dtype = tf.float32), axis=1)
#
if print_results:
for i, name in enumerate(file_names):
print(
"This {} image most likely belongs to {} class with a {} percent confidence."
.format(name, tf.math.argmax(score[i]), 100 * tf.math.reduce_max(score[i]))
)
print("Total time to infer {} images :: {}".format(num_files, total_time))
print("Inference Throughput {}".format(num_files/total_time))
print("Inference Latency {}".format(total_time/num_files))
except KeyboardInterrupt:
print("Keyboard interrupt")
print('Ending Inference ...')
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/runtime/runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import math
import tensorflow as tf
import horovod.tensorflow as hvd
from utils import hvd_utils
from utils import callbacks
from dataloader import dataset_factory
__all__ = ['get_optimizer_params', 'get_metrics', 'get_learning_rate_params', 'build_model_params', 'get_models', 'build_augmenter_params', \
'get_image_size_from_model', 'get_dataset_builders', 'build_stats', 'parse_inference_input', 'preprocess_image_files']
def get_optimizer_params(name,
decay,
epsilon,
momentum,
moving_average_decay,
nesterov,
beta_1,
beta_2):
return {
'name': name,
'decay': decay,
'epsilon': epsilon,
'momentum': momentum,
'moving_average_decay': moving_average_decay,
'nesterov': nesterov,
'beta_1': beta_1,
'beta_2': beta_2
}
def get_metrics(one_hot: bool):
"""Get a dict of available metrics to track."""
if one_hot:
return {
# (name, metric_fn)
'acc': tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
'accuracy': tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
'top_1': tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
'top_5': tf.keras.metrics.TopKCategoricalAccuracy(
k=5,
name='top_5_accuracy'),
}
else:
return {
# (name, metric_fn)
'acc': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
'accuracy': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
'top_1': tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
'top_5': tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=5,
name='top_5_accuracy'),
}
def get_learning_rate_params(name,
initial_lr,
decay_epochs,
decay_rate,
warmup_epochs):
return {
'name':name,
'initial_lr': initial_lr,
'decay_epochs': decay_epochs,
'decay_rate': decay_rate,
'warmup_epochs': warmup_epochs,
'examples_per_epoch': None,
'boundaries': None,
'multipliers': None,
'scale_by_batch_size': 1./128.,
'staircase': True
}
def build_augmenter_params(augmenter_name, cutout_const, translate_const, num_layers, magnitude, autoaugmentation_name):
if augmenter_name is None or augmenter_name not in ['randaugment', 'autoaugment']:
return {}
augmenter_params = {}
if cutout_const is not None:
augmenter_params['cutout_const'] = cutout_const
if translate_const is not None:
augmenter_params['translate_const'] = translate_const
if augmenter_name == 'randaugment':
if num_layers is not None:
augmenter_params['num_layers'] = num_layers
if magnitude is not None:
augmenter_params['magnitude'] = magnitude
if augmenter_name == 'autoaugment':
if autoaugmentation_name is not None:
augmenter_params['autoaugmentation_name'] = autoaugmentation_name
return augmenter_params
# def get_image_size_from_model(arch):
# """If the given model has a preferred image size, return it."""
# if 'efficientnet_v1' in arch:
# if arch in efficientnet_model_v1.MODEL_CONFIGS:
# return efficientnet_model_v1.MODEL_CONFIGS[arch]['resolution']
# elif 'efficientnet_v2' in arch:
# if arch in efficientnet_model_v2.MODEL_CONFIGS:
# return efficientnet_model_v2.MODEL_CONFIGS[arch]['resolution']
# return None
def get_dataset_builders(params, one_hot, hvd_size=None):
"""Create and return train and validation dataset builders."""
builders = []
validation_dataset_builder = None
train_dataset_builder = None
if "train" in params.mode:
img_size = params.train_img_size
print("Image size {} used for training".format(img_size))
print("Train batch size {}".format(params.train_batch_size))
train_dataset_builder = dataset_factory.Dataset(data_dir=params.data_dir,
index_file_dir=params.index_file,
split='train',
num_classes=params.num_classes,
image_size=img_size,
batch_size=params.train_batch_size,
one_hot=one_hot,
use_dali=params.train_use_dali,
augmenter=params.augmenter_name,
augmenter_params=build_augmenter_params(params.augmenter_name,
params.cutout_const,
params.translate_const,
params.raug_num_layers,
params.raug_magnitude,
params.autoaugmentation_name),
mixup_alpha=params.mixup_alpha,
cutmix_alpha=params.cutmix_alpha,
defer_img_mixing=params.defer_img_mixing,
mean_subtract=params.mean_subtract_in_dpipe,
standardize=params.standardize_in_dpipe,
hvd_size=hvd_size,
disable_map_parallelization=params.disable_map_parallelization
)
if "eval" in params.mode:
img_size = params.eval_img_size
print("Image size {} used for evaluation".format(img_size))
validation_dataset_builder = dataset_factory.Dataset(data_dir=params.data_dir,
index_file_dir=params.index_file,
split='validation',
num_classes=params.num_classes,
image_size=img_size,
batch_size=params.eval_batch_size,
one_hot=one_hot,
use_dali=params.eval_use_dali,
hvd_size=hvd_size)
builders.append(train_dataset_builder)
builders.append(validation_dataset_builder)
return builders
def build_stats(history, validation_output, train_callbacks, eval_callbacks, logger, comment=''):
stats = {}
stats['comment'] = comment
if validation_output:
stats['eval_loss'] = float(validation_output[0])
stats['eval_accuracy_top_1'] = float(validation_output[1])
stats['eval_accuracy_top_5'] = float(validation_output[2])
#This part is train loss on GPU_0
if history and history.history:
train_hist = history.history
#Gets final loss from training.
stats['training_loss'] = float(hvd.allreduce(tf.constant(train_hist['loss'][-1], dtype=tf.float32), average=True))
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats['training_accuracy_top_1'] = float(hvd.allreduce(tf.constant(train_hist['categorical_accuracy'][-1], dtype=tf.float32), average=True))
elif 'sparse_categorical_accuracy' in train_hist:
stats['training_accuracy_top_1'] = float(hvd.allreduce(tf.constant(train_hist['sparse_categorical_accuracy'][-1], dtype=tf.float32), average=True))
elif 'accuracy' in train_hist:
stats['training_accuracy_top_1'] = float(hvd.allreduce(tf.constant(train_hist['accuracy'][-1], dtype=tf.float32), average=True))
stats['training_accuracy_top_5'] = float(hvd.allreduce(tf.constant(train_hist['top_5_accuracy'][-1], dtype=tf.float32), average=True))
# Look for the time history callback which was used during keras.fit
if train_callbacks:
for callback in train_callbacks:
if isinstance(callback, callbacks.TimeHistory):
if callback.epoch_runtime_log:
stats['avg_exp_per_second_training'] = callback.average_examples_per_second
stats['avg_exp_per_second_training_per_GPU'] = callback.average_examples_per_second / hvd.size()
if eval_callbacks:
for eval_callback in eval_callbacks:
if not isinstance(eval_callback, callbacks.EvalTimeHistory):
continue
stats['avg_exp_per_second_eval'] = float(eval_callback.average_examples_per_second) # * hvd.size(), performing one-gpu evluation now
stats['avg_exp_per_second_eval_per_GPU'] = float(eval_callback.average_examples_per_second)
stats['avg_time_per_exp_eval'] = 1000./stats['avg_exp_per_second_eval']
batch_time = eval_callback.batch_time
batch_time.sort()
latency_pct_per_batch = sum( batch_time[:-1] ) / int( len(batch_time) - 1 )
stats['latency_pct'] = 1000.0 * latency_pct_per_batch
latency_90pct_per_batch = sum( batch_time[:int( 0.9 * len(batch_time) )] ) / int( 0.9 * len(batch_time) )
stats['latency_90pct'] = 1000.0 * latency_90pct_per_batch
latency_95pct_per_batch = sum( batch_time[:int( 0.95 * len(batch_time) )] ) / int( 0.95 * len(batch_time) )
stats['latency_95pct'] = 1000.0 * latency_95pct_per_batch
latency_99pct_per_batch = sum( batch_time[:int( 0.99 * len(batch_time) )] ) / int( 0.99 * len(batch_time) )
stats['latency_99pct'] = 1000.0 * latency_99pct_per_batch
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
logger.log(step=(), data=stats)
def preprocess_image_files(directory_name, img_size, batch_size, dtype):
# data format should always be channels_last. If need be, it will be adjusted in the model module.
data_format = "channels_last"
datagen = tf.keras.preprocessing.image.ImageDataGenerator(data_format=data_format, dtype=dtype)
images = datagen.flow_from_directory(directory_name, class_mode=None, batch_size=batch_size, target_size=(img_size, img_size), shuffle=False)
return images
def parse_inference_input(to_predict):
filenames = []
image_formats = ['.jpg', '.jpeg', '.JPEG', '.JPG', '.png', '.PNG']
if os.path.isdir(to_predict):
filenames = [f for f in os.listdir(to_predict)
if os.path.isfile(os.path.join(to_predict, f))
and os.path.splitext(f)[1] in image_formats]
elif os.path.isfile(to_predict):
filenames.append(to_predict)
return filenames
@tf.function
def train_step(self, data):
"""[summary]
custom training step, which is used in case the user requests gradient accumulation.
"""
# params
use_amp = self.config.use_amp
grad_accum_steps = self.config.grad_accum_steps
hvd_fp16_compression = self.config.hvd_fp16_compression
grad_clip_norm = self.config.grad_clip_norm
#Forward and Backward pass
x,y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
if use_amp:
loss = self.optimizer.get_scaled_loss(loss)
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
#Backprop gradients
# tape = hvd.DistributedGradientTape(tape, compression=hvd.Compression.fp16 if use_amp and hvd_fp16_compression else hvd.Compression.none)
gradients = tape.gradient(loss, self.trainable_variables)
#Get unscaled gradients if AMP
if use_amp:
gradients = self.optimizer.get_unscaled_gradients(gradients)
#Accumulate gradients
self.grad_accumulator(gradients)
if self.local_step % grad_accum_steps == 0:
gradients = [None if g is None else hvd.allreduce(g / tf.cast(grad_accum_steps, g.dtype),
compression=hvd.Compression.fp16 if use_amp and hvd_fp16_compression else hvd.Compression.none)
for g in self.grad_accumulator.gradients]
if grad_clip_norm > 0:
(gradients, gradients_gnorm) = tf.clip_by_global_norm(gradients, clip_norm=grad_clip_norm)
self.gradients_gnorm.assign(gradients_gnorm) # this will later appear on tensorboard
#Weight update & iteration update
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
self.grad_accumulator.reset()
# update local counter
self.local_step.assign_add(1)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics} | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/runtime/runner_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from runtime.runner import Runner | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/runtime/__init__.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import yaml
def _add_bool_argument(parser, name=None, default=False, required=False, help=None):
if not isinstance(default, bool):
raise ValueError()
feature_parser = parser.add_mutually_exclusive_group(required=required)
feature_parser.add_argument('--' + name, dest=name, action='store_true', help=help, default=default)
feature_parser.add_argument('--no' + name, dest=name, action='store_false')
feature_parser.set_defaults(name=default)
def parse_cmdline():
p = argparse.ArgumentParser(description="JoC-Efficientnet-v2-TF, full list of general hyperparameters."
"Model specific hyperparameters must be provided via a config file (see --cfg)"
"User config can be provided in a separate config file like config/efficientnet_v2/s_cfg.py"
"Use True/False or 1/0 for boolean flags.")
p.add_argument(
'--cfg',
type=str,
default=None,
required=True,
help=('Path to the config file that contains the hyperparameters of your model.'
'The path must be relative to the current dir. The user can override model hyperparameters'
'in the command line (see --mparams).'))
p.add_argument(
'--mparams',
type=str,
default=None,
required=False,
help=('A comma seperated list of key=val where the key is a model hyperparameter and the val is the new value.'
'This flag becomes handy when you want to override the model hyperparameters placed in the model config (see --cfg).'))
#######################runtime-related hparams##########################
p.add_argument( '--log_steps', type=int, default=100, help='The interval of steps between logging of batch level stats.')
p.add_argument( '--mode', type=str, default='train_and_eval', required=False, help='Mode to run: `train`, `eval`, `train_and_eval`, `training_benchmark` or `predict`.')
p.add_argument( '--time_history', type=str, default=1, help='Enable a callback to log the time for training steps.')
p.add_argument( '--use_xla', action='store_true', default=False, help='Have this flag to enable xla')
p.add_argument( '--use_amp', action='store_true', default=False, help='Have this flag to enable training with automated mixed precision (AMP)')
p.add_argument( '--intraop_threads', type=str, default=None, help='intra thread should match the number of CPU cores')
p.add_argument( '--interop_threads', type=str, default=None, help='inter thread should match the number of CPU sockets')
p.add_argument( '--model_dir', type=str, default='/results/', help=('The directory where the model and training/evaluation summaries'
'are stored. When resuming from a previous checkpoint,'
'all necessary files should be placed in this directory '))
p.add_argument('--log_dir', type=str, default=None,
help=('The directory where the model and training/evaluation summaries'
'are stored. '))
p.add_argument( '--log_filename', type=str, default='log.json', help="Name of the JSON file to which write the training log")
p.add_argument('--seed', type=int, default=None, required=False, help="Random seed.")
# Tensor format used for the computation.
p.add_argument('--data_format', choices=['channels_first', 'channels_last'], type=str, default='channels_first', required=False, help=argparse.SUPPRESS)
p.add_argument('--run_eagerly', type=str, default=0, help="Set this flag 1/0 to run/disable eager execution mode.")
p.add_argument('--memory_limit', type=int, default=None, help="Set the maximum GPU memory (MB) that can be allocated by tensorflow. Sometimes tensorflow"
"allocates more GPU memory than it actually needs, which results in OOM or halt without stopping. Setting this to be "
"slightly less than full GPU memory will help prevent this. For example, on A100 80G gpu, this value can be set to 81000")
p.add_argument('--weights_format', type=str, default='ckpt', required=False, help="Whether to read pretrained weights from a ckpt or SavedModel format")
#######################train-related hparams##########################
# Tensor format used for the computation.
p.add_argument('--train_img_size', default=224, type=int, required=False, help="Image size used for training dataset.")
p.add_argument( '--max_epochs', default=300, type=int, required=False, help="Number of epochs of training.")
p.add_argument( '--steps_per_epoch', default=None, type=int, required=False, help="Manually set training steps that will be executed in each epoch, leave blank to iter over the whole training dataset every epoch." )
p.add_argument('--train_batch_size', type=int, default=32, required=False, help="Training batch size per GPU.")
p.add_argument('--train_use_dali', action='store_true', default=False, help='Have this flag to use dali for data loading and preprocessing of dataset, attention, dali does not support having auto-augmentation or image mixup.')
##optimizer##
p.add_argument(
'--optimizer', type=str, default='rmsprop', required=False, help="Optimizer to be used.")
p.add_argument(
'--momentum', type=float, default=0.9, required=False, help="The value of Momentum used when optimizer name is `rmsprop` or `momentum`.")
p.add_argument(
'--beta_1', type=float, default=0.0, required=False, help="beta1 for Adam/AdamW.")
p.add_argument(
'--beta_2', type=float, default=0.0, required=False, help="beta2 for Adam/AdamW..")
p.add_argument(
'--nesterov', action='store_true', default=False, required=False, help="nesterov bool for momentum SGD.")
p.add_argument(
'--opt_epsilon', type=float, default=0.001, required=False, help="The value of Epsilon for optimizer, required for `adamw`, `adam` and `rmsprop`.")
p.add_argument(
'--decay', type=float, default=0.9, required=False, help="The value of decay for `rmsprop`.")
p.add_argument(
'--weight_decay', default=5e-6, type=float, required=False, help="Weight Decay scale factor, for adamw or can be used in layers as L2 reg.")
p.add_argument(
'--label_smoothing', type=float, default=0.1, required=False, help="The value of label smoothing.")
p.add_argument(
'--moving_average_decay', type=float, default=0.0, required=False, help="Empirically it has been found that using the moving average of the trained parameters"
"of a deep network is better than using its trained parameters directly. This optimizer"
"allows you to compute this moving average and swap the variables at save time so that"
"any code outside of the training loop will use by default the average values instead"
"of the original ones.")
p.add_argument(
'--lookahead', action='store_true', default=False, required=False, help="Having this flag to enable lookahead, the optimizer iteratively updates two sets of weights: the search directions for weights"
"are chosen by the inner optimizer, while the `slow weights` are updated each k steps"
"based on the directions of the `fast weights` and the two sets of weights are "
"synchronized. This method improves the learning stability and lowers the variance of"
"its inner optimizer.")
p.add_argument(
'--intratrain_eval_using_ema', action='store_true', default=True, required=False, help="Model evaluation during training can be done using the original weights,"
"or using EMA weights. The latter takes place if moving_average_decay > 0 and intratrain_eval_using_ema is requested")
p.add_argument(
'--grad_accum_steps', type=int, default=1, required=False, help="Use multiple steps to simulate a large batch size")
p.add_argument(
'--grad_clip_norm', type=float, default=0, required=False,
help="grad clipping is used in the custom train_step, which is called when grad_accum_steps > 1. Any non-zero value activates grad clipping")
p.add_argument(
'--hvd_fp16_compression', action='store_true', default=True, required=False, help="Optimize grad reducing across all workers")
p.add_argument(
'--export_SavedModel', action='store_true', default=False, required=False, help='Have this flag to export the trained model into SavedModel format after training is complete. When `moving_average_decay` > 0'
'it will store the set of weights with better accuracy between the original and EMA weights. This flag also has the effect of exporting the model as SavedModel at the end of evaluation.')
##lr schedule##
p.add_argument('--lr_init', default=0.008, type=float, required=False, help="Initial value for the learning rate without scaling, the final learning rate is scaled by ."
"lr_init * global_batch_size / 128.")
p.add_argument('--lr_decay', choices=['exponential', 'piecewise_constant_with_warmup', 'cosine', 'linearcosine'], type=str, default='exponential', required=False, help="Choose from the supported decay types")
p.add_argument('--lr_decay_rate', default=0.97, type=float, required=False, help="LR Decay rate for exponential decay.")
p.add_argument('--lr_decay_epochs', default=2.4, type=float, required=False, help="LR Decay epoch for exponential decay.")
p.add_argument('--lr_warmup_epochs', default=5, type=int, required=False, help="Number of warmup epochs for learning rate schedule.")
p.add_argument('--metrics', default=['accuracy', 'top_5'], nargs='+', action='extend', required=False, help="Metrics used to evaluate the model")
p.add_argument('--resume_checkpoint', action='store_true', default=True, required=False, help="Resume from a checkpoint in the model_dir.")
p.add_argument('--save_checkpoint_freq', type=int, default=5, required=False, help='Number of epochs to save checkpoint.')
##progressive training##
p.add_argument('--n_stages', type=int, default=1, required=False, help='Number of stages for progressive training in efficientnet_v2.')
p.add_argument('--base_img_size', type=int, default=128, required=False, help='Used to determine image size for stage 1 in progressive training. Image size will then be scaled linearly until it reaches train_img_size in the last stage of training.')##Nima
p.add_argument('--base_mixup', type=float, default=0, required=False, help='Mixup alpha for stage 1 in progressive training. Will then be scaled linearly until it reaches mixup_alpha in the last stage of training.')##Nima
p.add_argument('--base_cutmix', type=float, default=0, required=False, help='Cutmix alpha for stage 1 in progressive training. Will then be scaled linearly until it reaches cutmix_alpha in the last stage of training.')##Nima
p.add_argument('--base_randaug_mag', type=float, default=5, required=False, help='Strength of random augmentation for stage 1 in progressive training. Will then be scaled linearly until it reaches raug_magnitude in the last stage of training.')##Nima
##callbacks##
p.add_argument('--enable_checkpoint_saving', action='store_true', default=True, required=False, help="saves model checkpoints during trining at desired intervals.")
p.add_argument('--enable_tensorboard', action='store_true', default=False, required=False, help=argparse.SUPPRESS)
p.add_argument('--tb_write_model_weights', action='store_true', default=False, required=False, help=argparse.SUPPRESS)
#######################eval-related hparams##########################
p.add_argument('--skip_eval', action='store_true', default=False, required=False, help="Skip eval at the end of training.")
p.add_argument('--n_repeat_eval', type=int, default=1, required=False, help="Number of time to repeat evaluation. Useful to check variations in throughputs.")
p.add_argument('--num_epochs_between_eval', type=int, default=1, required=False, help="Eval after how many epochs of training.")
p.add_argument('--eval_use_dali', action='store_true', default=False, help='Use dali for data loading and preprocessing of eval dataset.')
p.add_argument('--eval_batch_size', type=int, default=100, required=False, help="Evaluation batch size per GPU.")
p.add_argument('--eval_img_size', default=224, type=int, required=False, help="Image size used for validation dataset.")
#######################predict mode related hparams##########################
p.add_argument('--predict_img_dir', type=str, required=False, default='/infer_data', help="Path to image to do inference on.")
p.add_argument('--predict_ckpt', type=str, required=False, default=None, help="Path to checkpoint to do inference on.")
p.add_argument('--predict_img_size', default=224, type=int, required=False,help="Image size used for inference.")
p.add_argument('--predict_batch_size', type=int, default=32, required=False, help="Predict batch size per GPU.")
p.add_argument('--benchmark', action='store_true', default=False, required=False, help="Benchmarking or not. Available in the predict mode.")
####################### data related hparams##########################
p.add_argument('--dataset', type=str, default='ImageNet', required=False, help='The name of the dataset, e.g. ImageNet, etc.')
p.add_argument('--augmenter_name', type=str, default='autoaugment', required=False, help="Type of Augmentation during preprocessing only during training.")
##Rand-augment params##
p.add_argument('--raug_num_layers', type=int, default=None, required=False, help="Rand Augmentation parameter.")
p.add_argument('--raug_magnitude', type=float, default=None, required=False, help="Rand Augmentation parameter.")
p.add_argument('--cutout_const', type=float, default=None, required=False, help="Rand/Auto Augmentation parameter.")
p.add_argument('--mixup_alpha', type=float, default=0., required=False, help="Mix up alpha")
p.add_argument('--cutmix_alpha', type=float, default=0., required=False, help="Cut mix alpha")
p.add_argument('--defer_img_mixing', action='store_true', default=False, required=False, help="Have this flag to perform image mixing in the compute graph")
p.add_argument('--translate_const', type=float, default=None, required=False, help="Rand/Auto Augmentation parameter.")
p.add_argument('--disable_map_parallelization', action='store_true', default=False, required=False, help="Have this flag to disable map parallelization of tf.Dataset. While this flag will hurt the throughput of multi-GPU/node sessions, it can prevent OOM errors during 1-GPU training sessions.")###Must add to scripts
##Auto-augment params
p.add_argument('--autoaugmentation_name', type=str, default=None, required=False, help="Auto-Augmentation parameter.")
##Dali usage
p.add_argument('--index_file', type=str, default=None, required=False, help="Path to index file required for dali.")
# dataset and split
p.add_argument('--data_dir', type=str, default='/data/', required=False, help='The location of the input data. Files should be named `train-*` and `validation-*`.')
p.add_argument('--num_classes', type=int, default=1000, required=False, help="Number of classes to train on.")
p.add_argument('--train_num_examples', type=int, default=1281167, required=False, help="Training number of examples.")
p.add_argument('--eval_num_examples', type=int, default=50000, required=False, help="Evaluation number of examples")
p.add_argument('--mean_subtract_in_dpipe', action='store_true', default=False, required=False, help="Whether to perform mean image subtraction in the data pipeline (dpipe) or not. If set to False, you can implement this in the compute graph.")##Nima
p.add_argument('--standardize_in_dpipe', action='store_true', default=False, required=False, help="Whether to perform image standardization in the data pipeline (dpipe) or not. If set to False, you can implement this in the compute graph.")##Nima
FLAGS, unknown_args = p.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
return FLAGS
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/utils/cmdline_helper.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import horovod.tensorflow as hvd
__all__ = [
'is_using_hvd',
]
def is_using_hvd():
return hvd.size() > 1 | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/utils/hvd_utils.py |
# Lint as: python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate utilities for vision tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, List, Mapping
import tensorflow as tf
BASE_LEARNING_RATE = 0.1
__all__ = [ 'WarmupDecaySchedule', 'PiecewiseConstantDecayWithWarmup' ]
@tf.keras.utils.register_keras_serializable(package='Custom')
class WarmupDecaySchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""A wrapper for LearningRateSchedule that includes warmup steps."""
def __init__(
self,
lr_schedule: tf.keras.optimizers.schedules.LearningRateSchedule,
warmup_steps: int,
**kwargs):
"""Add warmup decay to a learning rate schedule.
Args:
lr_schedule: base learning rate scheduler
warmup_steps: number of warmup steps
"""
super(WarmupDecaySchedule, self).__init__()
self._lr_schedule = lr_schedule
self._warmup_steps = warmup_steps
def __call__(self, step: int):
lr = self._lr_schedule(step)
if self._warmup_steps:
step_decay = step - self._warmup_steps
lr = self._lr_schedule(step_decay)
initial_learning_rate = tf.convert_to_tensor(
self._lr_schedule.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
global_step_recomp = tf.cast(step, dtype)
warmup_steps = tf.cast(self._warmup_steps, dtype)
warmup_lr = initial_learning_rate * global_step_recomp / warmup_steps
lr = tf.cond(global_step_recomp < warmup_steps,
lambda: warmup_lr,
lambda: lr)
return lr
def get_config(self) -> Mapping[str, Any]:
config = self._lr_schedule.get_config()
config.update({
"warmup_steps": self._warmup_steps,
})
config.update({
"lr_schedule": self._lr_schedule,
})
return config
# TODO(b/149030439) - refactor this with
# tf.keras.optimizers.schedules.PiecewiseConstantDecay + WarmupDecaySchedule.
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self,
batch_size: int,
epoch_size: int,
warmup_epochs: int,
boundaries: List[int],
multipliers: List[float]):
"""Piecewise constant decay with warmup.
Args:
batch_size: The training batch size used in the experiment.
epoch_size: The size of an epoch, or the number of examples in an epoch.
warmup_epochs: The number of warmup epochs to apply.
boundaries: The list of floats with strictly increasing entries.
multipliers: The list of multipliers/learning rates to use for the
piecewise portion. The length must be 1 less than that of boundaries.
"""
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError("The length of boundaries must be 1 less than the "
"length of multipliers")
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
self._rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self._step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
self._lr_values = [self._rescaled_lr * m for m in multipliers]
self._warmup_steps = warmup_epochs * steps_per_epoch
def __call__(self, step: int):
"""Compute learning rate at given step."""
def warmup_lr():
return self._rescaled_lr * (
step / tf.cast(self._warmup_steps, tf.float32))
def piecewise_lr():
return tf.compat.v1.train.piecewise_constant(
tf.cast(step, tf.float32), self._step_boundaries, self._lr_values)
return tf.cond(step < self._warmup_steps, warmup_lr, piecewise_lr)
def get_config(self) -> Mapping[str, Any]:
return {
"rescaled_lr": self._rescaled_lr,
"step_boundaries": self._step_boundaries,
"lr_values": self._lr_values,
"warmup_steps": self._warmup_steps,
}
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/utils/learning_rate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
def set_flags(params):
# os.environ['CUDA_CACHE_DISABLE'] = '1'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
# os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
# os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
os.environ['HOROVOD_CACHE_CAPACITY'] = "0"
os.environ['HOROVOD_CYCLE_TIME'] = "1.0"
if params.intraop_threads:
os.environ['TF_NUM_INTRAOP_THREADS'] = str(params.intraop_threads)
if params.interop_threads:
os.environ['TF_NUM_INTEROP_THREADS'] = str(params.interop_threads)
if params.use_xla:
# it turns out tf_xla_enable_lazy_compilation is used before running main.py, so setting this flag
# in the current function would have no effect. Thus, this flag is already set in Dockerfile. The
# remaining XLA flags are set here.
TF_XLA_FLAGS = os.environ['TF_XLA_FLAGS'] # contains tf_xla_enable_lazy_compilation
# we set tf_xla_async_io_level=0 for 2 reasons: 1) It turns out that XLA doesn't like
# hvd.allreduce ops used in the custom train_step. Because of this issue, training never started.
# 2) XLA doesn't like the tf.cond used in conditional mixing (model module).
# remove async flag since it's obsolete
#os.environ['TF_XLA_FLAGS'] = TF_XLA_FLAGS + " --tf_xla_auto_jit=1 --tf_xla_async_io_level=0"
os.environ['TF_XLA_FLAGS'] = TF_XLA_FLAGS + " --tf_xla_auto_jit=1"
os.environ['TF_EXTRA_PTXAS_OPTIONS'] = "-sw200428197=true"
tf.keras.backend.clear_session()
tf.config.optimizer.set_jit(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_visible_devices(gpus, 'GPU')
if params.memory_limit:
for gpu in gpus:
tf.config.experimental.set_virtual_device_configuration(gpu, [
tf.config.experimental.VirtualDeviceConfiguration(memory_limit=params.memory_limit)])
else:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
assert tf.config.experimental.get_memory_growth(gpu)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
np.random.seed(params.seed)
tf.random.set_seed(params.seed)
if params.use_amp:
# Model.compile will automatically wrap an optimizer with a tf.keras.mixed_precision.LossScaleOptimizer
# if you use the 'mixed_float16' policy. If you use a custom training loop instead of calling Model.compile,
# you should explicitly use a tf.keras.mixed_precision.LossScaleOptimizer to avoid numeric underflow with float16.
policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16', loss_scale='dynamic')
tf.keras.mixed_precision.experimental.set_policy(policy)
else:
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/utils/setup.py |
# Lint as: python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common modules for callbacks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import os
import horovod.tensorflow as hvd
import tensorflow as tf
import tensorflow_addons as tfa
import time
from typing import Any, List, MutableMapping, Text
from tensorflow import keras
from utils import optimizer_factory
def get_callbacks(model_checkpoint: bool = True,
include_tensorboard: bool = True,
time_history: bool = True,
track_lr: bool = True,
write_model_weights: bool = True,
initial_step: int = 0,
batch_size: int = 0,
log_steps: int = 100,
model_dir: str = None,
save_checkpoint_freq: int = 0,
ema_decay=0,
intratrain_eval_using_ema=False,
logger = None) -> List[tf.keras.callbacks.Callback]:
"""Get all callbacks."""
model_dir = model_dir or ''
callbacks = []
if model_checkpoint and hvd.rank() == 0:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
# if ema_decay > 0:
# # save average weights in the ckpt
# ckpt_callback = AverageModelCheckpoint(update_weights=False,
# filepath=ckpt_full_path,
# verbose=1,
# save_weights_only=True,
# save_freq=save_checkpoint_freq)
# else:
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True,
verbose=1,
save_freq=save_checkpoint_freq)
callbacks.append(ckpt_callback)
if ema_decay > 0:
# swaps model weights with the average weights during on-the-fly evaluation.
# once evaluation is completed, the original weights are restoed and training is resumed.
callbacks.append(MovingAverageCallback(intratrain_eval_using_ema=intratrain_eval_using_ema))
if time_history and logger is not None and hvd.rank() == 0:
callbacks.append(
TimeHistory(
batch_size,
log_steps,
logdir=model_dir if include_tensorboard else None,
logger=logger))
# Adding hvd.rank() == 0 to the following if condition halts multi-GPU training at the onset!
if include_tensorboard:
callbacks.append(
CustomTensorBoard(
log_dir=model_dir,
track_lr=track_lr,
initial_step=initial_step,
write_images=write_model_weights))
# ProgbarLogger is in charge of printing metrics at the end of each epoch.
# By default, ProgBar callback is inserted at the begining of the callback list by Keras.
# The downside is that if the callbacks invoked after ProgBar want to add a new metric, they won't be
# reflected in the printed metrics because ProgBar is already called. To override this behavior,
# we append this callback explicitly here at the end. If this line is commented, the learning rate,
# which is added to the metrics by CustomTensorboard, won't be printed.
callbacks.append(tf.keras.callbacks.ProgbarLogger())
return callbacks
def get_scalar_from_tensor(t: tf.Tensor) -> int:
"""Utility function to convert a Tensor to a scalar."""
t = tf.keras.backend.get_value(t)
if callable(t):
return t()
else:
return t
class CustomTensorBoard(tf.keras.callbacks.TensorBoard):
"""A customized TensorBoard callback that tracks additional datapoints.
Metrics tracked:
- Global learning rate
Attributes:
log_dir: the path of the directory where to save the log files to be parsed
by TensorBoard.
track_lr: `bool`, whether or not to track the global learning rate.
initial_step: the initial step, used for preemption recovery.
**kwargs: Additional arguments for backwards compatibility. Possible key is
`period`.
"""
# TODO(b/146499062): track params, flops, log lr, l2 loss,
# classification loss
def __init__(self,
log_dir: str,
track_lr: bool = False,
initial_step: int = 0,
**kwargs):
super(CustomTensorBoard, self).__init__(log_dir=log_dir, **kwargs)
self.step = initial_step
self._track_lr = track_lr
def on_batch_begin(self,
epoch: int,
logs: MutableMapping[str, Any] = None) -> None:
self.step += 1
if logs is None:
logs = {}
logs.update(self._calculate_metrics())
super(CustomTensorBoard, self).on_batch_begin(epoch, logs)
def on_epoch_begin(self,
epoch: int,
logs: MutableMapping[str, Any] = None) -> None:
if logs is None:
logs = {}
metrics = self._calculate_metrics()
logs.update(metrics)
super(CustomTensorBoard, self).on_epoch_begin(epoch, logs)
def on_epoch_end(self,
epoch: int,
logs: MutableMapping[str, Any] = None) -> None:
if logs is None:
logs = {}
metrics = self._calculate_metrics()
logs.update(metrics)
super(CustomTensorBoard, self).on_epoch_end(epoch, logs)
def _calculate_metrics(self) -> MutableMapping[str, Any]:
logs = {}
# TODO(b/149030439): disable LR reporting.
if self._track_lr:
logs['learning_rate'] = self._calculate_lr()
return logs
def _calculate_lr(self) -> int:
"""Calculates the learning rate given the current step."""
return get_scalar_from_tensor(
self._get_base_optimizer()._decayed_lr(var_dtype=tf.float32)) # pylint:disable=protected-access
def _get_base_optimizer(self) -> tf.keras.optimizers.Optimizer:
"""Get the base optimizer used by the current model."""
optimizer = self.model.optimizer
# The optimizer might be wrapped by another class, so unwrap it
while hasattr(optimizer, '_optimizer'):
optimizer = optimizer._optimizer # pylint:disable=protected-access
return optimizer
class MovingAverageCallback(tf.keras.callbacks.Callback):
"""A Callback to be used with a `MovingAverage` optimizer.
Applies moving average weights to the model during validation time to test
and predict on the averaged weights rather than the current model weights.
Once training is complete, the model weights will be overwritten with the
averaged weights (by default).
Attributes:
overwrite_weights_on_train_end: Whether to overwrite the current model
weights with the averaged weights from the moving average optimizer.
**kwargs: Any additional callback arguments.
"""
def __init__(self,
intratrain_eval_using_ema: bool = False,
overwrite_weights_on_train_end: bool = False,
**kwargs):
super(MovingAverageCallback, self).__init__(**kwargs)
self.intratrain_eval_using_ema = intratrain_eval_using_ema
self.overwrite_weights_on_train_end = overwrite_weights_on_train_end
self.ema_opt = None
def set_model(self, model: tf.keras.Model):
super(MovingAverageCallback, self).set_model(model)
self.ema_opt = optimizer_factory.fetch_optimizer(model, optimizer_factory.MovingAverage)
self.ema_opt.shadow_copy(model.weights)
def on_test_begin(self, logs: MutableMapping[Text, Any] = None):
if self.intratrain_eval_using_ema:
self.ema_opt.swap_weights()
def on_test_end(self, logs: MutableMapping[Text, Any] = None):
if self.intratrain_eval_using_ema:
self.ema_opt.swap_weights()
def on_train_end(self, logs: MutableMapping[Text, Any] = None):
if self.overwrite_weights_on_train_end:
self.ema_opt.assign_average_vars(self.model.variables)
class AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
"""Saves and, optionally, assigns the averaged weights.
Taken from tfa.callbacks.AverageModelCheckpoint [original class].
NOTE1: The original class has a type check decorator, which prevents passing non-string save_freq (fix: removed)
NOTE2: The original class may not properly handle layered (nested) optimizer objects (fix: use fetch_optimizer)
Attributes:
update_weights: If True, assign the moving average weights
to the model, and save them. If False, keep the old
non-averaged weights, but the saved model uses the
average weights.
See `tf.keras.callbacks.ModelCheckpoint` for the other args.
"""
def __init__(
self,
update_weights: bool,
filepath: str,
monitor: str = 'val_loss',
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = 'auto',
save_freq: str = 'epoch',
**kwargs):
super().__init__(
filepath,
monitor,
verbose,
save_best_only,
save_weights_only,
mode,
save_freq,
**kwargs)
self.update_weights = update_weights
self.ema_opt = None
def set_model(self, model):
self.ema_opt = optimizer_factory.fetch_optimizer(model, optimizer_factory.MovingAverage)
return super().set_model(model)
def _save_model(self, epoch, logs):
assert isinstance(self.ema_opt, optimizer_factory.MovingAverage)
if self.update_weights:
self.ema_opt.assign_average_vars(self.model.variables)
return super()._save_model(epoch, logs)
else:
# Note: `model.get_weights()` gives us the weights (non-ref)
# whereas `model.variables` returns references to the variables.
non_avg_weights = self.model.get_weights()
self.ema_opt.assign_average_vars(self.model.variables)
# result is currently None, since `super._save_model` doesn't
# return anything, but this may change in the future.
result = super()._save_model(epoch, logs)
self.model.set_weights(non_avg_weights)
return result
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps, logger, logdir=None):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
logdir: Optional directory to write TensorBoard summaries.
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
# on_train_batch_end()
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.last_log_step = 0
self.steps_before_epoch = 0
self.steps_in_epoch = 0
self.start_time = None
self.logger = logger
self.step_per_epoch = 0
if logdir:
self.summary_writer = tf.summary.create_file_writer(logdir)
else:
self.summary_writer = None
# Logs start of step 1 then end of each step based on log_steps interval.
self.timestamp_log = []
# Records the time each epoch takes to run from start to finish of epoch.
self.epoch_runtime_log = []
self.throughput = []
@property
def global_steps(self):
"""The current 1-indexed global step."""
return self.steps_before_epoch + self.steps_in_epoch
@property
def average_steps_per_second(self):
"""The average training steps per second across all epochs."""
return (self.global_steps - self.step_per_epoch) / sum(self.epoch_runtime_log[1:])
@property
def average_examples_per_second(self):
"""The average number of training examples per second across all epochs."""
# return self.average_steps_per_second * self.batch_size
if not self.throughput:
return 0
if len(self.throughput) == 1:
return self.throughput[0] # this throughput is inaccurate because the first step is warmup
ind = max(int(0.1*len(self.throughput)), 1) # ensures exclusion of the first step (warmup step)
return sum(self.throughput[ind:])/(len(self.throughput[ind:])) # removed +1 from denominator
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
if self.summary_writer:
self.summary_writer.flush()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_batch_begin(self, batch, logs=None):
# tf.print('+++++++++++',self.model.optimizer.iterations,batch)
if not self.start_time:
self.start_time = time.time()
# Record the timestamp of the first global step
if not self.timestamp_log:
self.timestamp_log.append(BatchTimestamp(self.global_steps,
self.start_time))
def on_batch_end(self, batch, logs=None):
"""Records elapse time of the batch and calculates examples per second."""
self.steps_in_epoch = batch + 1
steps_since_last_log = self.global_steps - self.last_log_step
if steps_since_last_log >= self.log_steps:
now = time.time()
elapsed_time = now - self.start_time
steps_per_second = steps_since_last_log / elapsed_time
examples_per_second = steps_per_second * self.batch_size
self.timestamp_log.append(BatchTimestamp(self.global_steps, now))
elapsed_time_str='{:.2f} seconds'.format(elapsed_time)
self.logger.log(step='PARAMETER', data={'TimeHistory': elapsed_time_str, 'examples/second': examples_per_second, 'steps': (self.last_log_step, self.global_steps)})
if self.summary_writer:
with self.summary_writer.as_default():
tf.summary.scalar('global_step/sec', steps_per_second,
self.global_steps)
tf.summary.scalar('examples/sec', examples_per_second,
self.global_steps)
# tf.summary.scalar('grad global norm',
# self.model.gradients_gnorm,
# self.global_steps)
self.last_log_step = self.global_steps
self.start_time = None
self.throughput.append(examples_per_second)
def on_epoch_end(self, epoch, logs=None):
if epoch == 0:
self.step_per_epoch = self.steps_in_epoch
epoch_run_time = time.time() - self.epoch_start
self.epoch_runtime_log.append(epoch_run_time)
self.steps_before_epoch += self.steps_in_epoch
self.steps_in_epoch = 0
class EvalTimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, logger, logdir=None):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
logdir: Optional directory to write TensorBoard summaries.
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
# on_train_batch_end()
self.batch_size = batch_size
self.global_steps = 0
self.batch_time = []
self.eval_time = 0
super(EvalTimeHistory, self).__init__()
self.logger = logger
@property
def average_steps_per_second(self):
"""The average training steps per second across all epochs."""
return (self.global_steps - 1) / self.eval_time
@property
def average_examples_per_second(self):
"""The average number of training examples per second across all epochs."""
return self.average_steps_per_second * self.batch_size
def on_test_batch_end(self, batch, logs=None):
self.global_steps += 1
self.batch_time.append(time.time() - self.test_begin)
def on_test_batch_begin(self, epoch, logs=None):
self.test_begin = time.time()
def on_test_end(self, epoch, logs=None):
self.eval_time = sum(self.batch_time) - self.batch_time[0]
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/utils/callbacks.py |
import tensorflow as tf
import numpy as np
from tensorflow.python.profiler.model_analyzer import profile
from tensorflow.python.profiler.option_builder import ProfileOptionBuilder
def get_num_params(model, readable_format=True):
"""Return number of parameters and flops."""
nparams = np.sum([
np.prod(v.get_shape().as_list())
for v in model.trainable_weights
])
if readable_format:
nparams = float(nparams) * 1e-6
return nparams
def get_num_flops(model, input_shape, readable_format=True):
if hasattr(model,'model'):
model = model.model
forward_pass = tf.function(model.call, input_signature=[tf.TensorSpec(shape=(1,) + input_shape)])
graph_info = profile(forward_pass.get_concrete_function().graph,
options=ProfileOptionBuilder.float_operation())
# The //2 is necessary since `profile` counts multiply and accumulate
# as two flops, here we report the total number of multiply accumulate ops
flops = graph_info.total_float_ops // 2
if readable_format:
flops = float(flops) * 1e-9
return flops | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/utils/tf_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer factory for vision tasks."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import tensorflow as tf
import tensorflow_addons as tfa
from typing import Any, Dict, Text, List
from tensorflow import keras
from tensorflow_addons.optimizers import MovingAverage
# pylint: disable=protected-access
from utils import learning_rate
def fetch_optimizer(model,opt_type) -> tf.keras.optimizers.Optimizer:
"""Get the base optimizer used by the current model."""
# this is the case where our target optimizer is not wrapped by any other optimizer(s)
if isinstance(model.optimizer,opt_type):
return model.optimizer
# Dive into nested optimizer object until we reach the target opt
opt = model.optimizer
while hasattr(opt, '_optimizer'):
opt = opt._optimizer
if isinstance(opt,opt_type):
return opt
raise TypeError(f'Failed to find {opt_type} in the nested optimizer object')
# Inspired from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py
class GradientAccumulator(object):
"""Distribution strategies-aware gradient accumulation utility."""
def __init__(self):
"""Initializes the accumulator."""
self._gradients = []
self._accum_steps = tf.Variable(
initial_value=0, dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA
)
@property
def step(self):
"""Number of accumulated steps."""
return self._accum_steps.value()
@property
def gradients(self):
"""The accumulated gradients."""
return list(
gradient.value() if gradient is not None else gradient for gradient in self._get_replica_gradients()
)
def __call__(self, gradients):
"""Accumulates :obj:`gradients`."""
if not self._gradients:
self._gradients.extend(
[
tf.Variable(tf.zeros_like(gradient), trainable=False) if gradient is not None else gradient
for gradient in gradients
]
)
if len(gradients) != len(self._gradients):
raise ValueError("Expected %s gradients, but got %d" % (len(self._gradients), len(gradients)))
for accum_gradient, gradient in zip(self._get_replica_gradients(), gradients):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(gradient)
self._accum_steps.assign_add(1)
def reset(self):
"""Resets the accumulated gradients."""
if self._gradients:
self._accum_steps.assign(0)
for gradient in self._get_replica_gradients():
if gradient is not None:
gradient.assign(tf.zeros_like(gradient))
def normalize(self):
"""Normalizes the accumulated gradients."""
for gradient in self._get_replica_gradients():
if gradient is not None:
gradient.assign(gradient*tf.cast(1/self._accum_steps, gradient.dtype))
def _get_replica_gradients(self):
if tf.distribute.has_strategy():
# In a replica context, we want to accumulate gradients on each replica
# without synchronization, so we directly assign the value of the
# current replica.
replica_context = tf.distribute.get_replica_context()
if replica_context is None or tf.distribute.get_strategy().num_replicas_in_sync == 1:
return self._gradients
return (
gradient.device_map.select_for_current_replica(gradient.values, replica_context)
for gradient in self._gradients
if gradient is not None
)
else:
return self._gradients
class HvdMovingAverage(MovingAverage):
def swap_weights(self):
"""Swap the average and moving weights.
The original function in the parent class assumes a cross replica
context, which fails for single GPU training. It also failed in the case of
multi-GPU training with Horovod.
"""
self._swap_weights()
def _create_slots(self, var_list):
"""[summary]
The original function in the parent class, in addition to calling
_create_slots() of the base optimizer, reassigns trainable tensors to
self._average_weights and self._model_weights, which has the effect of
removing non-trainable tensors (e.g., moving means and variances) from EMA.
By overriding it, we simply keep the part that calls _create_slots of the base
optimizer. To make up for the removed part of the code, we call shadow_copy, which
assigns both trainable and non-trainable tensors to self._average_weights and
self._model_weights.
Args:
var_list ([type]): [description]
"""
self._optimizer._create_slots(var_list=var_list)
def apply_gradients(self, grads_and_vars, name=None, experimental_aggregate_gradients=True):
self._optimizer._iterations = self.iterations
result = super().apply_gradients(grads_and_vars, name)
# update EMA weights after the weights are updated
self.update_average(self._optimizer.iterations)
return result
def _resource_apply_dense(self, grad, var):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_dense(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_sparse(grad, var, indices)
def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_sparse_duplicate_indices(
grad, var, indices)
@tf.function
def update_average(self, step: tf.Tensor):
step = tf.cast(step, tf.float32)
average_decay = self._get_hyper("average_decay", tf.dtypes.float32)
if step < self._start_step:
decay = tf.constant(0., tf.float32)
elif self._dynamic_decay:
decay = step - self._start_step
decay = tf.minimum(average_decay, (1. + decay) / (10. + decay))
else:
decay = average_decay
def _apply_moving(v_moving, v_normal):
diff = v_moving - v_normal
v_moving.assign_sub(tf.cast(1. - decay, v_moving.dtype) * diff)
return v_moving
def _update(strategy, v_moving_and_v_normal):
for v_moving, v_normal in v_moving_and_v_normal:
strategy.extended.update(v_moving, _apply_moving, args=(v_normal,))
ctx = tf.distribute.get_replica_context()
return ctx.merge_call(_update, args=(zip(self._average_weights,
self._model_weights),))
@classmethod
def from_config(cls, config, custom_objects=None):
optimizer = tf.keras.optimizers.deserialize(
config.pop('optimizer'),
custom_objects=custom_objects,
)
# For some reason, it is necessary to pass the optimizer as a keyword arg
return cls(optimizer=optimizer, **config)
def build_optimizer(
optimizer_name: Text,
base_learning_rate: tf.keras.optimizers.schedules.LearningRateSchedule,
params: Dict[Text, Any]):
"""Build the optimizer based on name.
Args:
optimizer_name: String representation of the optimizer name. Examples:
sgd, momentum, rmsprop.
base_learning_rate: `tf.keras.optimizers.schedules.LearningRateSchedule`
base learning rate.
params: String -> Any dictionary representing the optimizer params.
This should contain optimizer specific parameters such as
`base_learning_rate`, `decay`, etc.
Returns:
A tf.keras.Optimizer.
Raises:
ValueError if the provided optimizer_name is not supported.
"""
optimizer_name = optimizer_name.lower()
if optimizer_name == 'sgd':
nesterov = params.get('nesterov', False)
optimizer = tf.keras.optimizers.SGD(learning_rate=base_learning_rate,
nesterov=nesterov)
elif optimizer_name == 'momentum':
nesterov = params.get('nesterov', False)
optimizer = tf.keras.optimizers.SGD(learning_rate=base_learning_rate,
momentum=params['momentum'],
nesterov=nesterov)
elif optimizer_name == 'rmsprop':
rho = params.get('decay', None) or params.get('rho', 0.9)
momentum = params.get('momentum', 0.9)
epsilon = params.get('epsilon', 1e-07)
optimizer = tf.keras.optimizers.RMSprop(learning_rate=base_learning_rate,
rho=rho,
momentum=momentum,
epsilon=epsilon)
elif optimizer_name == 'adam':
beta_1 = params.get('beta_1', 0.9)
beta_2 = params.get('beta_2', 0.999)
epsilon = params.get('epsilon', 1e-07)
optimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
elif optimizer_name == 'adamw':
weight_decay = params.get('weight_decay', 0.01)
beta_1 = params.get('beta_1', 0.9)
beta_2 = params.get('beta_2', 0.999)
epsilon = params.get('epsilon', 1e-07)
optimizer = tfa.optimizers.AdamW(weight_decay=weight_decay,
learning_rate=base_learning_rate,
beta_1=beta_1,
beta_2=beta_2,
epsilon=epsilon)
else:
raise ValueError('Unknown optimizer %s' % optimizer_name)
if params.get('lookahead', None):
optimizer = tfa.optimizers.Lookahead(optimizer)
# Moving average should be applied last, as it's applied at test time
moving_average_decay = params.get('moving_average_decay', 0.)
if moving_average_decay is not None and moving_average_decay > 0.:
optimizer = HvdMovingAverage(# tfa.optimizers.MovingAverage
optimizer,
average_decay=moving_average_decay,
dynamic_decay=True)
return optimizer
def build_learning_rate(params: Dict[Text, Any],
batch_size: int = None,
train_steps: int = None,
max_epochs: int = None):
"""Build the learning rate given the provided configuration."""
decay_type = params['name']
base_lr = params['initial_lr']
decay_rate = params['decay_rate']
if params['decay_epochs'] is not None:
decay_steps = params['decay_epochs'] * train_steps
else:
decay_steps = 0
if params['warmup_epochs'] is not None:
warmup_steps = params['warmup_epochs'] * train_steps
else:
warmup_steps = 0
lr_multiplier = params['scale_by_batch_size']
if lr_multiplier and lr_multiplier > 0:
# Scale the learning rate based on the batch size and a multiplier
base_lr *= lr_multiplier * batch_size
if decay_type == 'exponential':
lr = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=base_lr,
decay_steps=decay_steps,
decay_rate=decay_rate,
staircase=params['staircase'])
elif decay_type == 'piecewise_constant_with_warmup':
lr = learning_rate.PiecewiseConstantDecayWithWarmup(
batch_size=batch_size,
epoch_size=params['examples_per_epoch'],
warmup_epochs=params['warmup_epochs'],
boundaries=params['boundaries'],
multipliers=params['multipliers'])
elif decay_type == 'cosine':
decay_steps = (max_epochs - params['warmup_epochs']) * train_steps
lr = tf.keras.experimental.CosineDecay(
initial_learning_rate=base_lr,
decay_steps=decay_steps,
alpha=0.0
)
elif decay_type == 'linearcosine':
decay_steps = (max_epochs - params['warmup_epochs']) * train_steps
lr = tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate=base_lr,
decay_steps=decay_steps,
initial_variance=0.5,
variance_decay=0.55,
num_periods=0.5, alpha=0.0, beta=0.001
)
if warmup_steps > 0:
if decay_type != 'piecewise_constant_with_warmup':
lr = learning_rate.WarmupDecaySchedule(lr, warmup_steps)
return lr
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/utils/optimizer_factory.py |
# Lint as: python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet v2 model.
[1] Mingxing Tan, Quoc V. Le
EfficientNetV2: Smaller Models and Faster Training.
xxxx, https://arxiv.org/pdf/2104.00298v2.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from typing import Any, Dict, Optional, List, Text, Tuple
import copy
import tensorflow as tf
import horovod.tensorflow as hvd
from tensorflow import keras
from utils.optimizer_factory import GradientAccumulator
from model.layers import simple_swish, hard_swish, identity, gelu, get_activation
from model.blocks import conv2d_block, mb_conv_block, fused_mb_conv_block
from model.common_modules import round_filters, round_repeats, load_weights
from dataloader import preprocessing
from dataloader.dataset_factory import mixing_lite
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1 / 3.0,
'mode': 'fan_in',
'distribution': 'uniform'
}
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class Model(tf.keras.Model):
"""Wrapper class for an EfficientNet v2 Keras model.
Contains helper methods to build, manage, and save metadata about the model.
"""
def __init__(self,
config: Dict[Text, Any] = None):
"""Create an EfficientNet v2 model.
Args:
config: (optional) the main model parameters to create the model
"""
super().__init__()
self.config = config
if self.config.grad_accum_steps > 1:
self.grad_accumulator = GradientAccumulator()
self.gradients_gnorm = tf.Variable(0, trainable=False, dtype=tf.float32)
self.local_step = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
input_channels = config.mparams.input_channels
# Consistent with channels-last format. will be permuted in _build, if channels first requested.
input_shape = (None, None, input_channels) # Should handle any image size
image_input = tf.keras.layers.Input(shape=input_shape)
is_predict ="predict" in config.mode
if not is_predict:
mixup_input = tf.keras.layers.Input(shape=(1, 1, 1))
cutmix_input = tf.keras.layers.Input(shape=(None, None, 1))
is_tr_split = tf.keras.layers.Input(shape=(1)) # indicates whether we use tr or eval data loader
inputs = [image_input,mixup_input,cutmix_input,is_tr_split]
else:
inputs = [image_input]
output = self._build(inputs)
# Cast to float32 in case we have a different model dtype
output = tf.cast(output, tf.float32)
self.model = tf.keras.Model(inputs=inputs,outputs=output)
def call(self,data):
is_predict ="predict" in self.config.mode
if not is_predict:
x=data['image']
mixup_weights = data['mixup_weight']
cutmix_masks = data['cutmix_mask']
is_tr_split = data['is_tr_split']
return self.model([x,mixup_weights,cutmix_masks,is_tr_split])
else:
return self.model([data])
def _build(self,
input: List[tf.keras.layers.Input]):
"""Creates an EfficientNet v2 graph given the model parameters.
This function is wrapped by the `EfficientNet_v2` class to make a tf.keras.Model.
Args:
image_input: the input batch of images
Returns:
the output of efficientnet v2
"""
config = self.config
depth_coefficient = config.mparams.depth_coefficient
blocks = config.mparams.blocks
stem_base_filters = config.mparams.stem_base_filters
top_base_filters = config.mparams.top_base_filters
activation = get_activation(config.mparams.activation)
dropout_rate = config.mparams.dropout_rate
drop_connect_rate = config.mparams.drop_connect_rate
num_classes = config.mparams.num_classes
input_channels = config.mparams.input_channels
rescale_input = config.mparams.rescale_input
data_format = tf.keras.backend.image_data_format()
dtype = config.mparams.dtype
weight_decay = config.weight_decay
weight_init = config.mparams.weight_init
train_batch_size = config.train_batch_size
do_mixup = config.mixup_alpha > 0
do_cutmix = config.cutmix_alpha > 0
def cond_mixing(args):
images,mixup_weights,cutmix_masks,is_tr_split = args
return tf.cond(tf.keras.backend.equal(is_tr_split[0],0),
lambda: images, # eval phase
lambda: mixing_lite(images,mixup_weights,cutmix_masks,train_batch_size, do_mixup, do_cutmix)) # tr phase
images = input[0]
x = images
if len(input) > 1:
# we get here only during train or train_and_eval modes
if self.config.defer_img_mixing:
# we get here only if we chose not to perform image mixing in the data loader
# image mixing on device further accelrates training
mixup_weights = input[1]
cutmix_masks = input[2]
is_tr_split = input[3]
x = tf.keras.layers.Lambda(cond_mixing)([images,mixup_weights,cutmix_masks,is_tr_split])
# data loader outputs data in the channels last format
if data_format == 'channels_first':
# Happens on GPU/TPU if available.
x = tf.keras.layers.Permute((3, 1, 2))(x)
if rescale_input:
# x-mean/std
x = preprocessing.normalize_images(x,
mean_rgb=config.mparams.mean_rgb,
stddev_rgb=config.mparams.std_rgb,
num_channels=input_channels,
dtype=dtype,
data_format=data_format)
outputs = dict()
# Build stem
x = conv2d_block(x,
round_filters(stem_base_filters, config),
config,
kernel_size=[3, 3], # OK
strides=[2, 2], # OK
activation=activation,
name='stem')
outputs['stem'] = x
# Build blocks
num_blocks_total = sum(
round_repeats(block['num_repeat'], depth_coefficient) for block in blocks)
block_num = 0
for stack_idx, block in enumerate(blocks):
assert block['num_repeat'] > 0
# Update block input and output filters based on depth multiplier
block.update({
'input_filters':round_filters(block['input_filters'], config),
'output_filters':round_filters(block['output_filters'], config),
'num_repeat':round_repeats(block['num_repeat'], depth_coefficient)})
# The first block needs to take care of stride and filter size increase
conv_block = fused_mb_conv_block if block['fused_conv'] else mb_conv_block
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config.mparams.update({'drop_connect_rate': drop_rate}) # TODO(Sugh) replace
block_prefix = 'stack_{}/block_0/'.format(stack_idx)
x = conv_block(x, block, config, block_prefix)
outputs[f'b_{block_num}'] = x
block_num += 1
if block['num_repeat'] > 1:
block.update({
'input_filters':block['output_filters'],
'strides':(1, 1)
})
for block_idx in range(block['num_repeat'] - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config.mparams.update({'drop_connect_rate': drop_rate})
block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1)
x = conv_block(x, block, config, prefix=block_prefix)
outputs[f'b_{block_num}'] = x
block_num += 1
# Build top
x = conv2d_block(x,
round_filters(top_base_filters, config),
config,
activation=activation,
name='top')
# Build classifier
DENSE_KERNEL_INITIALIZER['config']['mode'] = weight_init
x = tf.keras.layers.GlobalAveragePooling2D(name='top_pool',data_format=data_format)(x)
if dropout_rate and dropout_rate > 0:
x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x)
x = tf.keras.layers.Dense(
num_classes,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='logits')(x)
x = tf.keras.layers.Activation('softmax', name='probs', dtype=tf.float32)(x)
return x
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/efficientnet_model_v2.py |
DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/__init__.py |
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common modeling utilities."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import numpy as np
import math
import tensorflow as tf
from typing import Text, Optional
__all__ = ['count_params', 'load_weights', 'round_filters', 'round_repeats']
def count_params(model, trainable_only=True):
"""Returns the count of all model parameters, or just trainable ones."""
if not trainable_only:
return model.count_params()
else:
return int(np.sum([tf.keras.backend.count_params(p)
for p in model.trainable_weights]))
def load_weights(model: tf.keras.Model,
model_weights_path: Text,
weights_format: Text = 'saved_model'):
"""Load model weights from the given file path.
Args:
model: the model to load weights into
model_weights_path: the path of the model weights
weights_format: the model weights format. One of 'saved_model', 'h5',
or 'checkpoint'.
"""
if weights_format == 'saved_model':
loaded_model = tf.keras.models.load_model(model_weights_path)
# The weight values should be passed in the order they are created by the layer.
# Note that the layer's weights must be instantiated before calling this function, by calling the layer.
model.set_weights(loaded_model.get_weights()) # list to list assignment (order matters)
else:
model.load_weights(model_weights_path)
def round_filters(filters: int,
config: dict) -> int:
"""Round number of filters based on width coefficient."""
width_coefficient = config.mparams.width_coefficient
min_depth = config.mparams.min_depth
divisor = config.mparams.depth_divisor
orig_filters = filters
if not width_coefficient:
return filters
filters *= width_coefficient
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += divisor
return int(new_filters)
def round_repeats(repeats: int, depth_coefficient: float) -> int:
"""Round number of repeats based on depth coefficient."""
return int(math.ceil(depth_coefficient * repeats))
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/common_modules.py |
# Lint as: python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet v1 model.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
from typing import Any, Dict, Optional, List, Text, Tuple
import copy
import tensorflow as tf
import tensorflow.keras.backend as K
import horovod.tensorflow as hvd
from utils.optimizer_factory import GradientAccumulator
from model.layers import simple_swish, hard_swish, identity, gelu, get_activation
from model.blocks import conv2d_block, mb_conv_block
from model.common_modules import round_filters, round_repeats, load_weights
from dataloader import preprocessing
from dataloader.dataset_factory import mixing_lite
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1 / 3.0,
'mode': 'fan_in',
'distribution': 'uniform'
}
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class Model(tf.keras.Model):
"""Wrapper class for an EfficientNet v1 Keras model.
Contains helper methods to build, manage, and save metadata about the model.
"""
def __init__(self,
config: Dict[Text, Any] = None):
"""Create an EfficientNet v1 model.
Args:
config: (optional) the main model parameters to create the model
overrides: (optional) a dict containing keys that can override
config
"""
super().__init__()
self.config = config
if self.config.grad_accum_steps > 1:
self.grad_accumulator = GradientAccumulator()
self.gradients_gnorm = tf.Variable(0, trainable=False, dtype=tf.float32)
self.local_step = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
input_channels = config.mparams.input_channels
# Consistent with channels last format. will be permuted in _build, if channels first requested.
input_shape = (None, None, input_channels) # Should handle any image size
image_input = tf.keras.layers.Input(shape=input_shape)
is_training ="predict" not in config.mode
if is_training:
mixup_input = tf.keras.layers.Input(shape=(1, 1, 1))
cutmix_input = tf.keras.layers.Input(shape=(None, None, 1))
is_tr_split = tf.keras.layers.Input(shape=(1)) # indicates whether we use tr or eval data loader
inputs = [image_input,mixup_input,cutmix_input,is_tr_split]
else:
inputs = [image_input]
output = self._build(inputs)
# Cast to float32 in case we have a different model dtype
output = tf.cast(output, tf.float32)
# defining a Model object within another Model object is not the best design idea,
# but I wanted to make use of existing functional API code from Subhankar
self.model = tf.keras.Model(inputs=inputs,outputs=output)
def call(self,data):
is_predict ="predict" in self.config.mode
if not is_predict:
x=data['image']
mixup_weights = data['mixup_weight']
cutmix_masks = data['cutmix_mask']
is_tr_split = data['is_tr_split']
return self.model([x,mixup_weights,cutmix_masks,is_tr_split])
else:
return self.model([data])
def _build(self,
input: List[tf.keras.layers.Input]):
"""Creates an EfficientNet v1 graph given the model parameters.
This function is wrapped by the `EfficientNet_v1` class to make a tf.keras.Model.
Args:
image_input: the input batch of images
Returns:
the output of efficientnet v1
"""
config = self.config
depth_coefficient = config.mparams.depth_coefficient
blocks = config.mparams.blocks
stem_base_filters = config.mparams.stem_base_filters
top_base_filters = config.mparams.top_base_filters
activation = get_activation(config.mparams.activation)
dropout_rate = config.mparams.dropout_rate
drop_connect_rate = config.mparams.drop_connect_rate
num_classes = config.mparams.num_classes
input_channels = config.mparams.input_channels
rescale_input = config.mparams.rescale_input
data_format = tf.keras.backend.image_data_format()
dtype = config.mparams.dtype
weight_decay = config.weight_decay
weight_init = config.mparams.weight_init
train_batch_size = config.train_batch_size
do_mixup = config.mixup_alpha > 0
do_cutmix = config.cutmix_alpha > 0
def cond_mixing(args):
images,mixup_weights,cutmix_masks,is_tr_split = args
return tf.cond(tf.keras.backend.equal(is_tr_split[0],0),
lambda: images, # eval phase
lambda: mixing_lite(images,mixup_weights,cutmix_masks, train_batch_size, do_mixup, do_cutmix)) # tr phase
images = input[0]
x = images
if len(input) > 1:
# we get here only during train or train_and_eval modes
if self.config.defer_img_mixing:
# we get here only if we chose not to perform image mixing in the data loader
# image mixing on device further accelrates training
mixup_weights = input[1]
cutmix_masks = input[2]
is_tr_split = input[3]
x = tf.keras.layers.Lambda(cond_mixing)([images,mixup_weights,cutmix_masks,is_tr_split])
# data loader outputs data in the channels last format
if data_format == 'channels_first':
# Happens on GPU/TPU if available.
x = tf.keras.layers.Permute((3, 1, 2))(x)
if rescale_input:
# x-mean/std
x = preprocessing.normalize_images(x,
mean_rgb=config.mparams.mean_rgb,
stddev_rgb=config.mparams.std_rgb,
num_channels=input_channels,
dtype=dtype,
data_format=data_format)
# Build stem
x = conv2d_block(x,
round_filters(stem_base_filters, config),
config,
kernel_size=[3, 3],
strides=[2, 2],
activation=activation,
name='stem')
# Build blocks
num_blocks_total = sum(
round_repeats(block['num_repeat'], depth_coefficient) for block in blocks)
block_num = 0
for stack_idx, block in enumerate(blocks):
assert block['num_repeat'] > 0
# Update block input and output filters based on depth multiplier
block.update({
'input_filters':round_filters(block['input_filters'], config),
'output_filters':round_filters(block['output_filters'], config),
'num_repeat':round_repeats(block['num_repeat'], depth_coefficient)})
# The first block needs to take care of stride and filter size increase
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config.mparams.update({'drop_connect_rate': drop_rate}) # TODO(Sugh) replace
block_prefix = 'stack_{}/block_0/'.format(stack_idx)
x = mb_conv_block(x, block, config, block_prefix)
block_num += 1
if block['num_repeat'] > 1:
block.update({
'input_filters':block['output_filters'],
'strides':(1, 1)
})
for block_idx in range(block['num_repeat'] - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
config.mparams.update({'drop_connect_rate': drop_rate})
block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1)
x = mb_conv_block(x, block, config, prefix=block_prefix)
block_num += 1
# Build top
x = conv2d_block(x,
round_filters(top_base_filters, config),
config,
activation=activation,
name='top')
# Build classifier
DENSE_KERNEL_INITIALIZER['config']['mode'] = weight_init
x = tf.keras.layers.GlobalAveragePooling2D(name='top_pool')(x)
if dropout_rate and dropout_rate > 0:
x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x)
x = tf.keras.layers.Dense(
num_classes,
kernel_initializer=DENSE_KERNEL_INITIALIZER,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
bias_regularizer=tf.keras.regularizers.l2(weight_decay),
name='logits')(x)
x = tf.keras.layers.Activation('softmax', name='probs', dtype=tf.float32)(x)
return x
| DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/efficientnet_model_v1.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from typing import Any, Dict, Optional, Text, Tuple
from model.layers import get_activation
from model.blocks import conv2d_block
__all__ = ['fused_mb_conv_block']
def fused_mb_conv_block(inputs: tf.Tensor,
block: dict,
config: dict,
prefix: Text = None):
"""Mobile Inverted Residual Bottleneck.
Args:
inputs: the Keras input to the block
block: BlockConfig, arguments to create a Block
config: ModelConfig, a set of model parameters
prefix: prefix for naming all layers
Returns:
the output of the block
"""
# use_se = config.mparams.use_se
activation = get_activation(config.mparams.activation)
drop_connect_rate = config.mparams.drop_connect_rate
conv_dropout = config.mparams.conv_dropout
data_format = tf.keras.backend.image_data_format()
# use_depthwise = block['conv_type'] != 'no_depthwise'
prefix = prefix or ''
filters = block['input_filters'] * block['expand_ratio']
x = inputs
if block['expand_ratio'] != 1:
# Expansion phase
x = conv2d_block(x,
filters,
config,
kernel_size= block['kernel_size'],
strides= block['strides'],
activation=activation,
name=prefix + 'expand')
if conv_dropout and conv_dropout > 0 and block['expand_ratio'] > 1:
x = tf.keras.layers.Dropout(conv_dropout)(x) # training unset
# Squeeze and Excitation phase
if block['se_ratio'] is not None:
assert 0 < block['se_ratio'] <= 1
num_reduced_filters = max(1, int(
block['input_filters'] * block['se_ratio']
))
if data_format == 'channels_first':
se_shape = (filters, 1, 1)
else:
se_shape = (1, 1, filters)
se = tf.keras.layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze',data_format=data_format)(x)
se = tf.keras.layers.Reshape(se_shape, name=prefix + 'se_reshape')(se)
se = conv2d_block(se,
num_reduced_filters,
config,
use_bias=True,
use_batch_norm=False,
activation=activation,
name=prefix + 'se_reduce')
se = conv2d_block(se,
filters,
config,
use_bias=True,
use_batch_norm=False,
activation='sigmoid',
name=prefix + 'se_expand')
x = tf.keras.layers.multiply([x, se], name=prefix + 'se_excite')
# Output phase
x = conv2d_block(x,
block['output_filters'],
config,
kernel_size=1 if block['expand_ratio'] != 1 else block['kernel_size'],
strides=1 if block['expand_ratio'] != 1 else block['strides'],
activation=None,
name=prefix + 'project')
# add act if no expansion. check interaction with identity act below.
if block['expand_ratio'] == 1:
x = tf.keras.layers.Activation(activation,
name=prefix + 'project_activation')(x)
# Add identity so that quantization-aware training can insert quantization
# ops correctly.
x = tf.keras.layers.Activation(get_activation('identity'),
name=prefix + 'id')(x)
if (block['id_skip']
and all(s == 1 for s in block['strides'])
and block['input_filters'] == block['output_filters']):
if drop_connect_rate and drop_connect_rate > 0:
# Apply dropconnect
# The only difference between dropout and dropconnect in TF is scaling by
# drop_connect_rate during training. See:
# https://github.com/keras-team/keras/pull/9898#issuecomment-380577612
x = tf.keras.layers.Dropout(drop_connect_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop')(x)
x = tf.keras.layers.add([x, inputs], name=prefix + 'add')
return x | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/blocks/fused_mb_conv_block.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model.blocks.conv2d_block import conv2d_block
from model.blocks.mb_conv_block import mb_conv_block
from model.blocks.fused_mb_conv_block import fused_mb_conv_block
__all__ = ['conv2d_block', 'mb_conv_block','fused_mb_conv_block'] | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/blocks/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from typing import Any, Dict, Optional, Text, Tuple
from model.layers import get_batch_norm
__all__ = ['conv2d_block']
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_in',
# Note: this is a truncated normal distribution
'distribution': 'normal'
}
}
def conv2d_block(inputs: tf.Tensor,
conv_filters: Optional[int],
config: dict,
kernel_size: Any = (1, 1),
strides: Any = (1, 1),
use_batch_norm: bool = True,
use_bias: bool = False,
activation: Any = None,
depthwise: bool = False,
name: Text = None):
"""A conv2d followed by batch norm and an activation."""
batch_norm = get_batch_norm(config.mparams.batch_norm)
bn_momentum = config.mparams.bn_momentum
bn_epsilon = config.mparams.bn_epsilon
data_format = tf.keras.backend.image_data_format()
weight_decay = config.weight_decay
name = name or ''
# Collect args based on what kind of conv2d block is desired
init_kwargs = {
'kernel_size': kernel_size,
'strides': strides,
'use_bias': use_bias,
'padding': 'same',
'name': name + '_conv2d',
'kernel_regularizer': tf.keras.regularizers.l2(weight_decay),
'bias_regularizer': tf.keras.regularizers.l2(weight_decay),
'data_format':data_format
}
CONV_KERNEL_INITIALIZER['config']['mode'] = config.mparams.weight_init
if depthwise:
conv2d = tf.keras.layers.DepthwiseConv2D
init_kwargs.update({'depthwise_initializer': CONV_KERNEL_INITIALIZER})
else:
conv2d = tf.keras.layers.Conv2D
init_kwargs.update({'filters': conv_filters,
'kernel_initializer': CONV_KERNEL_INITIALIZER})
x = conv2d(**init_kwargs)(inputs)
if use_batch_norm:
bn_axis = 1 if data_format == 'channels_first' else -1
x = batch_norm(axis=bn_axis,
momentum=bn_momentum,
epsilon=bn_epsilon,
name=name + '_bn')(x)
if activation is not None:
x = tf.keras.layers.Activation(activation,
name=name + '_activation')(x)
return x | DeepLearningExamples-master | TensorFlow2/Classification/ConvNets/model/blocks/conv2d_block.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.