python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification inference module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/inference/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Builds Transforms based on training and validation."""
import torchvision.transforms as T
import random
import math
def build_transforms(model_config, is_train=True):
"""Return transforms for images based on the training and validation context.
This function generates different sets of transformation operations for training and validation processes.
For training, the operations include resizing, horizontal flip, padding, random crop, normalization and random erasing.
For validation, the operations include only resizing and normalization.
Args:
model_config (dict): A dictionary containing the model and dataset configurations.
is_train (bool): Indicates if the transformations are for training. If False, transformations are for validation.
Defaults to True.
Returns:
torchvision.transforms.Compose: A compose object that contains the list of transformations to be applied on an image.
"""
normalize_transform = T.Normalize(mean=model_config['dataset']['pixel_mean'], std=model_config['dataset']['pixel_std'])
if is_train:
transform = T.Compose([
T.Resize([model_config['model']['input_height'], model_config['model']['input_width']]),
T.RandomHorizontalFlip(p=model_config['dataset']['prob']),
T.Pad(model_config['dataset']['padding']),
T.RandomCrop([model_config['model']['input_height'], model_config['model']['input_width']]),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=model_config['dataset']['re_prob'], mean=model_config['dataset']['pixel_mean'])
])
else:
transform = T.Compose([
T.Resize([model_config['model']['input_height'], model_config['model']['input_width']]),
T.ToTensor(),
normalize_transform
])
return transform
class RandomErasing(object):
"""A data augmentation technique that randomly selects a rectangle region in an image and erases its pixels.
This technique can help in the training process by introducing a form of noise. The target area is computed based on
a set of pre-defined probability and aspect ratio parameters. The pixel values of the erased region are replaced
by the mean pixel values of the image.
Args:
probability (float, optional): The probability that the random erasing operation will be performed. Defaults to 0.5.
sl (float, optional): The lower bound of the range from which the area of the erase region is randomly sampled. Defaults to 0.02.
sh (float, optional): The upper bound of the range from which the area of the erase region is randomly sampled. Defaults to 0.4.
r1 (float, optional): The lower bound of the range from which the aspect ratio of the erase region is randomly sampled. Defaults to 0.3.
mean (tuple, optional): The pixel mean values for each channel. Defaults to (0.4914, 0.4822, 0.4465).
Methods:
__call__(img): Performs the random erasing operation on the input image tensor.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
"""
Constructor to initialize random erasing technique for augmentation.
Args:
probability (float): Configuration file.
sl (float): Lower interval of uniform distribution for target area.
sh (float): Higher interval of uniform distribution for target area.
r1 (float): Lower interval of uniform distribution for aspect ratio.
mean (tuple): Pixel mean in 3 channels for normalization.
Returns:
transform (transforms): Image Transform for traning, testing & validation data.
"""
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
"""Perform the random erasing operation on the input image tensor.
The function computes a target erase region on the image based on the initialized parameters.
If the region is valid, the pixel values in this region will be replaced by the initialized mean values.
Args:
img (torch.Tensor): The input image tensor, expected in (C, H, W) format.
Returns:
torch.Tensor: The image tensor after random erasing operation.
"""
if random.uniform(0, 1) >= self.probability:
return img
for _ in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/dataloader/transforms.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build torch data loader."""
import os
import torch
from torch.utils.data import DataLoader
from nvidia_tao_pytorch.cv.re_identification.dataloader.datasets.market1501 import Market1501
from nvidia_tao_pytorch.cv.re_identification.dataloader.sampler import RandomIdentitySampler
from nvidia_tao_pytorch.cv.re_identification.dataloader.datasets.bases import ImageDataset
from nvidia_tao_pytorch.cv.re_identification.dataloader.transforms import build_transforms
def list_dataset(top_dir):
"""
Returns a dictionary of image paths.
This function iterates over the given directory, considering every file as an image.
It then stores the image paths in a dictionary with a value of 1, indicating that
the image exists.
Args:
top_dir (str): Path to the top-level directory containing images.
Returns:
dict: A dictionary where keys are image file names and values are all 1s,
indicating the existence of the corresponding images.
"""
sample_dict = {}
for img in os.listdir(top_dir):
sample_dict[img] = 1
return sample_dict
def train_collate_fn(batch):
"""
Returns a processed batch of images for training.
This function takes a batch of image data, unpacks the images and person IDs,
stacks the images into a tensor, and returns both the image tensor and the person IDs.
Args:
batch (Tensor): A batch of image data.
Returns:
tuple: A tuple containing the tensor of stacked images and the tensor of person IDs.
"""
imgs, pids, _, _, = zip(*batch)
pids = torch.tensor(pids, dtype=torch.int64)
return torch.stack(imgs, dim=0), pids
def val_collate_fn(batch):
"""
Returns a processed batch of images for validation & testing.
This function takes a batch of image data, unpacks the images, person IDs, camera IDs,
and image paths, stacks the images into a tensor, and returns the image tensor, person IDs,
camera IDs, and image paths.
Args:
batch (Tensor): A batch of image data.
Returns:
tuple: A tuple containing the tensor of stacked images, person IDs, camera IDs, and image paths.
"""
imgs, pids, camids, img_paths = zip(*batch)
return torch.stack(imgs, dim=0), pids, camids, img_paths
def build_dataloader(cfg, is_train):
"""
Builds a PyTorch DataLoader object for training or validation/testing.
The DataLoader is created based on whether the process is for training or validation.
For the training process, a RandomIdentitySampler is used to order the data and the
function 'train_collate_fn' is used to process the data. For the validation process,
the function 'val_collate_fn' is used to process the data.
Args:
cfg (DictConfig): Configuration file specifying the parameters for the DataLoader.
is_train (bool): If True, the DataLoader is for training; otherwise, it's for validation/testing.
Returns:
DataLoader: The DataLoader object for training if 'is_train' is True.
DataLoader: The DataLoader object for validation/testing if 'is_train' is False.
int: The number of query samples.
int: The number of classes in the dataset.
"""
val_transforms = build_transforms(cfg, is_train=False)
num_gpus = len(cfg["train"]["gpu_ids"])
num_workers = cfg["dataset"]["num_workers"] * num_gpus
dataset = Market1501(cfg, is_train)
train_loader, val_loader = None, None
if is_train:
train_transforms = build_transforms(cfg, is_train=True)
num_classes = dataset.num_train_pids
train_dataset = ImageDataset(dataset.train, train_transforms)
train_loader = DataLoader(
train_dataset, batch_size=cfg["dataset"]["batch_size"] * num_gpus,
sampler=RandomIdentitySampler(dataset.train, cfg["dataset"]["batch_size"] * num_gpus,
cfg["dataset"]["num_instances"] * num_gpus),
num_workers=num_workers, collate_fn=train_collate_fn
)
enumerate(train_loader)
else:
num_classes = dataset.num_gallery_pids
val_dataset = ImageDataset(dataset.query + dataset.gallery, val_transforms)
val_loader = DataLoader(
val_dataset, batch_size=cfg["dataset"]["val_batch_size"] * num_gpus, shuffle=False,
num_workers=num_workers, collate_fn=val_collate_fn
)
return train_loader, val_loader, len(dataset.query), num_classes
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/dataloader/build_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sampler Module for Re-Identification."""
from torch.utils.data.sampler import Sampler
from collections import defaultdict
import copy
import random
import numpy as np
class RandomIdentitySampler(Sampler):
"""Randomly samples N identities, then for each identity, randomly samples K instances, therefore batch size is N*K.
RandomIdentitySampler is a subclass of torch.utils.data.sampler. It overrides the __iter__ and __len__ methods based
on the batch_size, num_samples and num_pids_per_batch. It ensures that for each identity, K consecutive identities
are obtained.
Args:
data_source (list): A list of tuples, where each tuple contains (img_path, pid, camid).
num_instances (int): Number of instances per identity in a batch.
batch_size (int): Number of examples in a batch.
Attributes:
data_source (list): The list of data provided as input.
batch_size (int): The number of examples per batch.
num_instances (int): The number of instances per identity.
num_pids_per_batch (int): The number of unique identities per batch.
index_dic (defaultdict): A dictionary where the keys are unique identities (pid) and the values are
lists of indices corresponding to the identities in the data_source.
pids (list): A list of unique identities (pid) in the data.
length (int): The estimated number of examples in an epoch.
"""
def __init__(self, data_source, batch_size, num_instances):
"""Initialize the sampler with the data, batch size, and number of instances.
Args:
data_source (list): The list of data.
batch_size (int): The size of each batch of data.
num_instances (int): The number of instances per identity.
"""
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances
self.index_dic = defaultdict(list) # dict with list value
# {783: [0, 5, 116, 876, 1554, 2041],...,}
for index, (_, pid, _) in enumerate(self.data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
# estimate number of examples in an epoch
self.length = 0
for pid in self.pids:
idxs = self.index_dic[pid]
num = len(idxs)
if num < self.num_instances:
num = self.num_instances
self.length += num - num % self.num_instances
def __iter__(self):
"""Create an iterator for the sampler.
Returns:
final_idxs (iterator): An iterator over the indices of the images to be included in the batch.
"""
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = copy.deepcopy(self.index_dic[pid])
if len(idxs) < self.num_instances:
idxs = np.random.choice(idxs, size=self.num_instances, replace=True)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.num_instances:
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while len(avai_pids) >= self.num_pids_per_batch:
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[pid]) == 0:
avai_pids.remove(pid)
self.length = len(final_idxs)
return iter(final_idxs)
def __len__(self):
"""Return the length of the sampler.
Returns:
length (int): The total number of images to be included in the epoch.
"""
return self.length
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/dataloader/sampler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom class for Market1501 dataset."""
import glob
import re
import os.path as osp
from nvidia_tao_pytorch.cv.re_identification.dataloader.datasets.bases import BaseImageDataset
class Market1501(BaseImageDataset):
"""Custom class for the Market1501 dataset.
This class provides an interface to the Market1501 dataset and inherits from the BaseImageDataset class.
"""
def __init__(self, experiment_spec, prepare_for_training, verbose=False):
"""Initialize the Market1501 dataset.
Args:
experiment_spec (dict): Specification of the experiment.
prepare_for_training (bool): If True, prepare the dataset for training.
verbose (bool, optional): If True, print verbose information. Defaults to False.
"""
super(Market1501, self).__init__()
self.prepare_for_training = prepare_for_training
if self.prepare_for_training:
self.train_dir = experiment_spec["dataset"]["train_dataset_dir"]
self.query_dir = experiment_spec["dataset"]["query_dataset_dir"]
self.gallery_dir = experiment_spec["dataset"]["test_dataset_dir"]
elif experiment_spec["inference"]["query_dataset"] and experiment_spec["inference"]["test_dataset"]:
self.query_dir = experiment_spec["inference"]["query_dataset"]
self.gallery_dir = experiment_spec["inference"]["test_dataset"]
elif experiment_spec["evaluate"]["query_dataset"] and experiment_spec["evaluate"]["test_dataset"]:
self.query_dir = experiment_spec["evaluate"]["query_dataset"]
self.gallery_dir = experiment_spec["evaluate"]["test_dataset"]
self._check_before_run()
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if self.prepare_for_training:
train = self._process_dir(self.train_dir, relabel=True)
self.print_dataset_statistics(train, query, gallery)
else:
self.print_dataset_statistics(query, gallery)
if self.prepare_for_training:
self.train = train
self.query = query
self.gallery = gallery
self.transform = None
if self.prepare_for_training:
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
"""Check if all files are available before going deeper."""
if self.prepare_for_training and not osp.exists(self.train_dir):
raise FileNotFoundError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise FileNotFoundError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise FileNotFoundError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
"""Check the directory and return a dataset.
Args:
dir_path (str): Path to the directory.
relabel (bool, optional): If True, relabel the data. Defaults to False.
Returns:
list: A list of tuples containing the image path, person ID, and camera ID.
"""
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
# assert 0 <= pid <= 1501, "The number of person IDs should be between 0 and 1501."
# assert 1 <= camid <= 6, "The number of camera IDs should be between 0 and 6."
camid -= 1 # index starts from 0
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset
def __len__(self):
"""Return the length of the training dataset if it's prepared for training, otherwise None."""
if self.prepare_for_training:
return self.num_train_pids
return None
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/dataloader/datasets/market1501.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification datasets module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/dataloader/datasets/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Module for all datasets."""
from tabulate import tabulate
from nvidia_tao_pytorch.cv.re_identification.utils.common_utils import read_image
from torch.utils.data import Dataset
class BaseDataset(object):
"""Base class for all datasets.
This class serves as the base class for all re-identification datasets.
It provides methods for retrieving metadata from the images.
"""
def get_imagedata_info(self, data):
"""Return metadata from the images.
Args:
data (list): A list of tuples containing image data.
Returns:
tuple: A tuple containing the number of unique person IDs, the total number of images,
and the number of unique camera IDs.
"""
pids, cams = [], []
for _, pid, camid in data:
pids += [pid]
cams += [camid]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_imgs = len(data)
return num_pids, num_imgs, num_cams
def print_dataset_statistics(self):
"""Base class for image re-identification datasets.
This class inherits from BaseDataset and provides a method to print dataset statistics.
"""
raise NotImplementedError("Printing dataset statistics is not implemented.")
class BaseImageDataset(BaseDataset):
"""Base class for image re-identification datasets.
This class inherits from BaseDataset and provides a method to print dataset statistics.
"""
def print_dataset_statistics(self, *args):
"""Print the dataset statistics.
This method prints the number of person IDs, number of images, and number of cameras
for each subset of the dataset.
Args:
*args: Variable length argument list of datasets.
"""
table = []
if len(args) == 3:
dataset_type = ["Train", "Query", "Gallery"]
elif len(args) == 2:
dataset_type = ["Query", "Gallery"]
for index, dataset in enumerate(args):
num_pids, num_imgs, num_cams = self.get_imagedata_info(dataset)
table.append([dataset_type[index], num_pids, num_imgs, num_cams])
print(tabulate(table, headers=["Subset", "# IDs", "# Images", "# Cameras"], floatfmt=".4f", tablefmt="fancy_grid"))
class ImageDataset(Dataset):
"""Dataset class for images.
This class stores images, object IDs, camera IDs, and image paths.
"""
def __init__(self, dataset, transform=None):
"""Initialize the ImageDataset.
Args:
dataset (list): A list of tuples containing image data.
transform (callable, optional): A function/transform to apply to the images. Defaults to None.
"""
self.dataset = dataset
self.transform = transform
def __len__(self):
"""Return the length of the dataset."""
return len(self.dataset)
def __getitem__(self, index):
"""Return the image, person ID, camera ID, and image path for a given index.
Args:
index (int): Index of the item to retrieve.
Returns:
tuple: A tuple containing the image, person ID, camera ID, and image path.
"""
img_path, pid, camid = self.dataset[index]
img = read_image(img_path)
if self.transform is not None:
img = self.transform(img)
return img, pid, camid, img_path
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/dataloader/datasets/bases.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition root module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class ARModelConfig:
"""Action recognition model config."""
model_type: str = "joint"
backbone: str = "resnet_18"
input_type: str = "2d"
of_seq_length: int = 10
of_pretrained_model_path: Optional[str] = None
of_pretrained_num_classes: int = 0 # 0 means the pretrained model has the same classes number
rgb_seq_length: int = 3
rgb_pretrained_model_path: Optional[str] = None
rgb_pretrained_num_classes: int = 0 # 0 means the pretrained model has the same classes number
num_fc: int = 64
joint_pretrained_model_path: Optional[str] = None
sample_strategy: str = "random_interval" # [random_interval, consecutive]
sample_rate: int = 1
imagenet_pretrained: bool = False # Only for internal use. Will change to False when release
# 0.0 for resnet18 2D on SHAD, 0.5 for I3D on HMDB51, 0.8 for ResNet3D on HMDB51
dropout_ratio: float = 0.5
input_width: int = 224
input_height: int = 224
@dataclass
class OptimConfig:
"""Optimizer config."""
lr: float = 5e-4
momentum: float = 0.9
weight_decay: float = 5e-4
lr_scheduler: str = "MultiStep" # {AutoReduce, MultiStep}
lr_monitor: str = "val_loss" # {val_loss, train_loss}
patience: int = 1
min_lr: float = 1e-4
lr_steps: List[int] = field(default_factory=lambda: [15, 25])
lr_decay: float = 0.1
@dataclass
class ARAugmentationConfig:
"""Augmentation config."""
train_crop_type: str = "random_crop" # [random_crop, multi_scale_crop, no_crop]
scales: List[float] = field(default_factory=lambda: [1])
horizontal_flip_prob: float = 0.5
rgb_input_mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406])
rgb_input_std: List[float] = field(default_factory=lambda: [0.229, 0.224, 0.225])
of_input_mean: List[float] = field(default_factory=lambda: [0.5])
of_input_std: List[float] = field(default_factory=lambda: [0.5])
val_center_crop: bool = False
crop_smaller_edge: int = 256
@dataclass
class ARDatasetConfig:
"""Dataset config."""
train_dataset_dir: Optional[str] = None
val_dataset_dir: Optional[str] = None
label_map: Optional[Dict[str, int]] = None
batch_size: int = 32
workers: int = 8
clips_per_video: int = 1
augmentation_config: ARAugmentationConfig = ARAugmentationConfig()
@dataclass
class ARTrainExpConfig:
"""Train experiment config."""
results_dir: Optional[str] = None
gpu_ids: List[int] = field(default_factory=lambda: [0])
resume_training_checkpoint_path: Optional[str] = None
optim: OptimConfig = OptimConfig()
num_epochs: int = 10
clip_grad_norm: float = 0.0
checkpoint_interval: int = 5
@dataclass
class ARInferenceExpConfig:
"""Inference experiment config."""
checkpoint: str = MISSING
results_dir: Optional[str] = None
gpu_id: int = 0
inference_dataset_dir: str = MISSING
batch_size: int = 1
video_inf_mode: str = "center" # [center, conv, all]
video_num_segments: int = 1
@dataclass
class AREvalExpConfig:
"""Evaluation experiment config."""
checkpoint: str = MISSING
results_dir: Optional[str] = None
gpu_id: int = 0
test_dataset_dir: str = MISSING
batch_size: int = 1
video_eval_mode: str = "center" # [center, conv, all]
video_num_segments: int = 10
@dataclass
class ARExportExpConfig:
"""Export experiment config."""
checkpoint: str = MISSING
results_dir: Optional[str] = None
onnx_file: Optional[str] = None
gpu_id: int = 0
batch_size: int = 1
@dataclass
class ExperimentConfig:
"""Experiment config."""
model: ARModelConfig = ARModelConfig()
dataset: ARDatasetConfig = ARDatasetConfig()
train: ARTrainExpConfig = ARTrainExpConfig()
evaluate: AREvalExpConfig = AREvalExpConfig()
export: ARExportExpConfig = ARExportExpConfig()
inference: ARInferenceExpConfig = ARInferenceExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Group transformation for action recognition"""
import numpy as np
from PIL import Image
import random
import torch
class GroupWorker(object):
"""Wrapper for group transformation using torchvision."""
def __init__(self, worker):
"""Init worker."""
self.worker = worker
def __call__(self, img_group):
"""img_group: PIL Images list."""
return [self.worker(img) for img in img_group]
class GroupRandomCrop(object):
"""RandomCrop for the group of frames."""
def __init__(self, size):
"""Initialize GroupRandomCrop.
Args:
size (int): The crop size.
"""
self.size = size
def __call__(self, img_group):
"""img_group: PIL Images list."""
w, h = img_group[0].size
th, tw = self.size
out_images = []
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
for img in img_group:
assert (img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_images.append(img)
else:
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return out_images
class MultiScaleCrop(object):
"""
Crop images with a list of randomly selected scales.
"""
def __init__(self,
input_size,
scales=[1, 0.875, 0.75, 0.66],
max_distort=0,
fix_crop=True,
more_fix_crop=True):
"""Initialize MultiScaleCrop.
Args:
input_size (int | tuple[int]): The (width, height) of the network input.
scales (list[float], optional): The width and height scales to be selected. Defaults to [1, 0.875, 0.75, 0.66].
max_distort (int, optional): The maximum distortion. Defaults to 0.
fix_crop (bool, optional): Whether to use fixed cropping. Defaults to True.
more_fix_crop (bool, optional): Whether to use more fixed cropping. Defaults to True.
"""
self.scales = scales
self.max_distort = max_distort
self.fix_crop = fix_crop
self.more_fix_crop = more_fix_crop
self.input_size = input_size
self.interpolation = Image.BILINEAR
def __call__(self, img_group):
"""img_group: PIL Images list."""
im_size = img_group[0].size
crop_w, crop_h, offset_w, offset_h = self._sample_crop_patch(im_size)
crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in img_group]
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
for img in crop_img_group]
return ret_img_group
def _fill_crop_size(self, img_w, img_h):
"""Generate crop size collections."""
base_size = min(img_w, img_h)
crop_sizes = [int(base_size * s) for s in self.scales]
crop_w = [self.input_size[0] if abs(x - self.input_size[0]) < 3 else x for x in crop_sizes]
crop_h = [self.input_size[1] if abs(x - self.input_size[1]) < 3 else x for x in crop_sizes]
candidate_sizes = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= self.max_distort:
candidate_sizes.append((w, h))
return candidate_sizes
def _fill_fix_offset(self, image_w, image_h, crop_w, crop_h):
"""Generate crop offset collections."""
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = []
ret.append((0, 0)) # upper left
ret.append((4 * w_step, 0)) # upper right
ret.append((0, 4 * h_step)) # lower left
ret.append((4 * w_step, 4 * h_step)) # lower right
ret.append((2 * w_step, 2 * h_step)) # center
if self.more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
return ret
def _sample_crop_patch(self, im_size):
"""Random choose crop patch."""
img_w, img_h = im_size
# find a crop size
candidate_sizes = self._fill_crop_size(img_w, img_h)
crop_width, crop_height = random.choice(candidate_sizes)
if not self.fix_crop:
w_offset = random.randint(0, img_w - crop_width)
h_offset = random.randint(0, img_h - crop_height)
else:
offsets = self._fill_fix_offset(img_w, img_h, crop_width, crop_height)
w_offset, h_offset = random.choice(offsets)
return crop_width, crop_height, w_offset, h_offset
class GroupRandomHorizontalFlip(object):
"""Random horizontal flip group of frames."""
def __init__(self, flip_prob=0.5, is_flow=False):
"""Initialize GroupRandomHorizontalFlip.
Args:
flip_prob (float, optional): The probability of flipping the frames horizontally. Defaults to 0.5.
is_flow (bool, optional): Whether the input is optical flow. Defaults to False.
"""
self.flip_prob = flip_prob
self.is_flow = is_flow
def __call__(self, img_group):
"""img_group: PIL Images list."""
if random.random() < self.flip_prob:
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
# @TODO(tylerz): figure out the right way to flip optical flow
else:
ret = img_group
return ret
class GroupNormalize(object):
"""Normalize the group of frames. substract mean -> divide std."""
def __init__(self, mean, std):
"""Initialize GroupNormalize.
Args:
mean (list[float]): The mean values for each channel.
std (list[float]): The standard deviation values for each channel.
"""
self.mean = mean
self.std = std
def __call__(self, tensor):
"""tensor: torch tensor CTHW."""
if len(self.mean) != 0 and len(self.std) != 0:
rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
rep_std = self.std * (tensor.size()[0] // len(self.std))
# TODO: make efficient
for t, m, s in zip(tensor, rep_mean, rep_std):
t.sub_(m).div_(s)
elif len(self.mean) != 0 and len(self.std) == 0:
rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
# TODO: make efficient
for t, m in zip(tensor, rep_mean):
t.sub_(m)
elif len(self.std) != 0 and len(self.mean) == 0:
rep_std = self.std * (tensor.size()[0] // len(self.std))
# TODO: make efficient
for t, s in zip(tensor, rep_std):
t.div_(s)
return tensor
class GroupThreeCrop(object):
"""Crop group of frames. Crop three parts of each frames."""
def __init__(self, size):
"""Init."""
self.size = size
def __call__(self, img_group):
"""img_group: PIL Images list."""
w, h = img_group[0].size
th, tw = self.size
assert th == h or tw == w
if th == h:
w_step = (w - tw) // 2
offsets = []
offsets.append((0, 0)) # left
offsets.append((2 * w_step, 0)) # right
offsets.append((w_step, 0)) # middle
elif tw == w:
h_step = (h - th) // 2
offsets = []
offsets.append((0, 0)) # top
offsets.append((0, 2 * h_step)) # down
offsets.append((0, h_step)) # middle
new_clips = []
for ow, oh in offsets:
for cur_img in img_group:
# crop_img = cur_img[oh:oh+th, ow:ow+tw, :]
crop_img = cur_img.crop((ow, oh, ow + tw, oh + th))
new_clips.append(crop_img)
return new_clips
class ToTorchFormatTensor(object):
""" Converts numpy.ndarray (T x H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x T x H x W) in the range [0.0, 1.0]
"""
def __init__(self, div=True):
"""Init."""
self.div = div
def __call__(self, pic):
"""pic: ndarray (THWC)"""
if isinstance(pic, np.ndarray):
# handle numpy array
# put it from THWC to CTHW format
imgs = torch.from_numpy(pic).permute(3, 0, 1, 2).contiguous()
else:
raise TypeError("pic should be numpy.ndarray")
return imgs.float().div(255) if self.div else imgs.float()
class ToNumpyNDArray(object):
"""Convert PIL Images to nd array."""
def __call__(self, img_group):
"""img_group: PIL Images list."""
if img_group[0].mode == 'L':
return np.array([np.stack((np.array(img_group[x]), np.array(img_group[x + 1])), axis=-1)
for x in range(0, len(img_group), 2)])
if img_group[0].mode == 'RGB':
return np.array([np.array(x) for x in img_group])
return np.array([])
class GroupJointWorker(object):
"""Wrapper for joint group transformation using torchvision."""
def __init__(self, worker):
"""Init."""
self.worker = worker
def __call__(self, img_group):
"""img_group: two PIL Images lists for rgb and of respectively."""
rgb_group, of_group = img_group
rgb_group = [self.worker(img) for img in rgb_group]
of_group = [self.worker(img) for img in of_group]
return [rgb_group, of_group]
class JointWorker(object):
"""Wrapper for joint group transformation using other group op."""
def __init__(self, worker):
"""Init."""
self.worker = worker
def __call__(self, img_group):
"""img_group: two PIL Images lists or ndarray for rgb and of respectively."""
rgb_group, of_group = img_group
rgb_ret_group = self.worker(rgb_group)
of_ret_group = self.worker(of_group)
return [rgb_ret_group, of_ret_group]
class GroupJointRandomCrop(object):
"""Group random crop for joint training."""
def __init__(self, size):
"""init."""
self.size = size
def __call__(self, img_group):
"""img_group: two PIL Images lists for rgb and of respectively."""
rgb_group, of_group = img_group
w, h = rgb_group[0].size
th, tw = self.size
out_rgb_images = []
out_of_images = []
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
for img in rgb_group:
assert (img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_rgb_images.append(img)
else:
out_rgb_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
for img in of_group:
assert (img.size[0] == w and img.size[1] == h)
if w == tw and h == th:
out_of_images.append(img)
else:
out_of_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return [out_rgb_images, out_of_images]
class JointMultiScaleCrop(MultiScaleCrop):
"""MultiScaleCrop for joint training."""
def __call__(self, img_group):
"""img_group: two PIL Images lists for rgb and of respectively."""
rgb_group, of_group = img_group
im_size = rgb_group[0].size
crop_w, crop_h, offset_w, offset_h = self._sample_crop_patch(im_size)
rgb_crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in rgb_group]
rgb_ret_img_group = [img.resize((self.input_size[0], self.input_size[1]),
self.interpolation) for img in rgb_crop_img_group]
of_crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in of_group]
of_ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
for img in of_crop_img_group]
return [rgb_ret_img_group, of_ret_img_group]
class GroupJointRandomHorizontalFlip(object):
"""Group random horizontal flip for joint training."""
def __init__(self, flip_prob=0.5):
"""Init."""
self.flip_prob = flip_prob
def __call__(self, img_group):
"""img_group: two PIL Images lists for rgb and of respectively."""
rgb_group, of_group = img_group
if random.random() < self.flip_prob:
rgb_ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in rgb_group]
of_ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in of_group]
else:
rgb_ret = rgb_group
of_ret = of_group
return [rgb_ret, of_ret]
class GroupJointNormalize(object):
"""Group normalization for joint training."""
def __init__(self, rgb_input_mean, rgb_input_std,
of_input_mean, of_input_std):
"""Init"""
self.rgb_normalize = GroupNormalize(rgb_input_mean,
rgb_input_std)
self.of_normalize = GroupNormalize(of_input_mean,
of_input_std)
def __call__(self, img_group):
"""img_group: two torch tensors for rgb and of respectively."""
rgb_group, of_group = img_group
rgb_ret_group = self.rgb_normalize(rgb_group)
of_ret_group = self.of_normalize(of_group)
return [rgb_ret_group, of_ret_group]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/utils/group_transforms.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for action recognition"""
import os
import csv
import torch
import shutil
import struct
from eff.core.codec import encrypt_stream
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import decrypt_checkpoint
def patch_decrypt_checkpoint(checkpoint, key):
"""Decrypt checkpoint to work when using a multi-GPU trained model in a single-GPU environment.
Args:
checkpoint (dict): The encrypted checkpoint.
key (str): The decryption key.
Returns:
dict: The patched decrypted checkpoint.
"""
from functools import partial
legacy_load = torch.load
torch.load = partial(legacy_load, map_location="cpu")
checkpoint = decrypt_checkpoint(checkpoint, key)
torch.load = legacy_load
# set the encrypted status to be False when it is decrypted
checkpoint["state_dict_encrypted"] = False
return checkpoint
def encrypt_onnx(tmp_file_name, output_file_name, key):
"""Encrypt the onnx model"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
# set the input name magic number
open_encoded_file.write(struct.pack("<i", 0))
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def check_and_create(d):
"""Create a directory."""
if not os.path.isdir(d):
os.makedirs(d)
def data_to_device(data):
"""Transfer data to GPU."""
if isinstance(data, list):
cuda_data = []
for item in data:
cuda_item = item.cuda(non_blocking=True)
cuda_data.append(cuda_item)
else:
cuda_data = data.cuda(non_blocking=True)
return cuda_data
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
"""Init"""
self.reset()
def reset(self):
"""reset parameters."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""update accuracy."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_checkpoint(state, is_best, checkpoint, model_best):
"""Naive checkpoint saver."""
torch.save(state, checkpoint)
if is_best:
shutil.copyfile(checkpoint, model_best)
def record_train_info(info, filename):
"""Naive log information."""
str_log = "train_loss: {} val_loss: {} train_acc@1: {} val_acc@1: {} lr: {}".format(
info['train_loss'],
info['val_loss'],
info['train_acc@1'],
info['val_acc@1'],
info['lr'])
print(str_log)
column_names = ['epoch', 'train_loss', 'val_loss', 'train_acc@1', 'val_acc@1', 'lr']
if not os.path.isfile(filename):
with open(filename, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=column_names)
writer.writeheader()
writer.writerow(info)
else: # else it exists so append without writing the header
with open(filename, "a") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=column_names)
writer.writerow(info)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition scripts module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export action recognition model to ONNX."""
import os
import torch
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.action_recognition.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.action_recognition.model.pl_ar_model import ActionRecognitionModel
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""CLI wrapper to run export.
This function parses the command line interface for tlt-export, instantiates the respective
exporter and serializes the trained model to an etlt file. The tools also runs optimization
to the int8 backend.
Args:
cl_args(list): Arguments to parse.
Returns:
No explicit returns.
"""
try:
if cfg.export.results_dir is not None:
results_dir = cfg.export.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "export")
run_export(cfg, output_dir=results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
def run_export(args, output_dir):
"""Wrapper to run export of tlt models.
Args:
args (dict): Dictionary of parsed arguments to run export.
Returns:
No explicit returns.
"""
check_and_create(output_dir)
# Set status logging
status_file = os.path.join(output_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting Action recognition export")
gpu_id = args.export.gpu_id
torch.cuda.set_device(gpu_id)
# Parsing command line arguments.
model_path = args["export"]['checkpoint']
key = args['encryption_key']
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# data_type = args['data_type']
output_file = args["export"]['onnx_file']
experiment_config = args
# Set default output filename if the filename
# isn't provided over the command line.
if output_file is None:
split_name = os.path.splitext(model_path)[0]
output_file = "{}.onnx".format(split_name)
# Warn the user if an exported file already exists.
assert not os.path.exists(output_file), "Default output file {} already "\
"exists".format(output_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# load model
pl_model = ActionRecognitionModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config,
export=True)
model = pl_model.model
model.eval()
model.cuda()
model_type = experiment_config['model']['model_type']
if model_type == "of":
input_names = ["input_of"]
elif model_type == "rgb":
input_names = ["input_rgb"]
elif model_type == "joint":
input_names = ["input_rgb", "input_of"]
else:
raise ValueError("Wrong model type in the config")
output_names = ["fc_pred"]
# create dummy input
output_shape = [experiment_config["model"]["input_height"],
experiment_config["model"]["input_width"]]
rgb_seq_length = experiment_config['model']['rgb_seq_length']
of_seq_length = experiment_config['model']['of_seq_length']
input_type = experiment_config['model']['input_type']
if input_type == "2d":
if model_type == "of":
dummy_input = torch.randn(1, 2 * of_seq_length,
output_shape[0], output_shape[1]).cuda()
dynamic_axes = {"input_of": {0: "batch"}, "fc_pred": {0: "batch"}}
elif model_type == "rgb":
dummy_input = torch.randn(1, 3 * rgb_seq_length,
output_shape[0], output_shape[1]).cuda()
dynamic_axes = {"input_rgb": {0: "batch"}, "fc_pred": {0: "batch"}}
elif model_type == "joint":
dummy_input = (torch.randn(1, 3 * rgb_seq_length,
output_shape[0], output_shape[1]).cuda(),
torch.randn(1, 2 * of_seq_length,
output_shape[0], output_shape[1]).cuda())
dynamic_axes = {"input_rgb": {0: "batch"}, "input_of": {0: "batch"},
"fc_pred": {0: "batch"}}
else:
raise ValueError("Wrong model type in the config")
elif input_type == "3d":
if model_type == "of":
dummy_input = torch.randn(1, 2, of_seq_length,
output_shape[0], output_shape[1]).cuda()
dynamic_axes = {"input_of": {0: "batch"}, "fc_pred": {0: "batch"}}
elif model_type == "rgb":
dummy_input = torch.randn(1, 3, rgb_seq_length,
output_shape[0], output_shape[1]).cuda()
dynamic_axes = {"input_rgb": {0: "batch"}, "fc_pred": {0: "batch"}}
elif model_type == "joint":
dummy_input = (torch.randn(1, 3, rgb_seq_length,
output_shape[0], output_shape[1]).cuda(),
torch.randn(1, 2, of_seq_length,
output_shape[0], output_shape[1]).cuda())
dynamic_axes = {"input_rgb": {0: "batch"}, "input_of": {0: "batch"},
"fc_pred": {0: "batch"}}
else:
raise ValueError("Wrong model type in the config")
# export
torch.onnx.export(model,
dummy_input,
output_file,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
verbose=True)
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train action recognition model."""
import os
import re
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import TLTCheckpointConnector
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.action_recognition.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.action_recognition.model.pl_ar_model import ActionRecognitionModel
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import check_and_create
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
def run_experiment(experiment_config,
results_dir,
key):
"""Start the training."""
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
ar_model = ActionRecognitionModel(experiment_config)
total_epochs = experiment_config['train']['num_epochs']
check_and_create(results_dir)
status_logger_callback = TAOStatusLogger(results_dir, append=True, num_epochs=total_epochs)
status_logging.set_status_logger(status_logger_callback.logger)
clip_grad = experiment_config['train']['clip_grad_norm']
gpus_ids = experiment_config['train']["gpu_ids"]
acc_flag = None
if len(gpus_ids) > 1:
acc_flag = "ddp"
trainer = Trainer(gpus=gpus_ids,
max_epochs=total_epochs,
check_val_every_n_epoch=experiment_config['train']['checkpoint_interval'],
default_root_dir=results_dir,
accelerator='gpu',
strategy=acc_flag,
gradient_clip_val=clip_grad)
# Overload connector to enable intermediate ckpt encryption & decryption.
resume_ckpt = experiment_config['train']['resume_training_checkpoint_path']
trainer._checkpoint_connector = TLTCheckpointConnector(trainer)
if resume_ckpt is not None:
trainer._checkpoint_connector = TLTCheckpointConnector(trainer, resume_from_checkpoint=resume_ckpt)
else:
trainer._checkpoint_connector = TLTCheckpointConnector(trainer)
ckpt_inter = experiment_config['train']['checkpoint_interval']
# setup checkpointer:
ModelCheckpoint.FILE_EXTENSION = ".tlt"
checkpoint_callback = ModelCheckpoint(every_n_epochs=ckpt_inter,
dirpath=results_dir,
save_on_train_epoch_end=True,
monitor=None,
save_top_k=-1,
filename='ar_model_{epoch:03d}')
if resume_ckpt:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_ckpt}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_ckpt)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1 # make sure callback epoch matches resumed epoch
trainer.callbacks.append(status_logger_callback)
trainer.callbacks.append(checkpoint_callback)
trainer.fit(ar_model)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
if cfg.train.results_dir is not None:
results_dir = cfg.train.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "train")
run_experiment(experiment_config=cfg,
results_dir=results_dir,
key=cfg.encryption_key)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Inference on single patch.
"""
import os
import torch
from tqdm import tqdm
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.action_recognition.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.action_recognition.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.action_recognition.inference.inferencer import Inferencer
from nvidia_tao_pytorch.cv.action_recognition.model.pl_ar_model import ActionRecognitionModel
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
def run_experiment(experiment_config, model_path, key, output_dir,
batch_size=1, inference_dataset_dir=None):
"""Start the inference."""
check_and_create(output_dir)
# Set status logging
status_file = os.path.join(output_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Action recognition inference"
)
gpu_id = experiment_config.inference.gpu_id
torch.cuda.set_device(gpu_id)
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# build dataloader
model_config = experiment_config["model"]
label_map = experiment_config["dataset"]["label_map"]
output_shape = [experiment_config["model"]["input_height"],
experiment_config["model"]["input_width"]]
sample_dict = {}
for sample_id in os.listdir(inference_dataset_dir):
sample_path = os.path.join(inference_dataset_dir, sample_id)
sample_dict[sample_path] = "unknown"
aug_config = experiment_config["dataset"]["augmentation_config"]
dataloader = build_dataloader(sample_dict=sample_dict,
model_config=model_config,
dataset_mode="inf",
output_shape=output_shape,
input_type=model_config["input_type"],
label_map=label_map,
batch_size=batch_size,
workers=experiment_config["dataset"]["workers"],
eval_mode=experiment_config["inference"]["video_inf_mode"],
augmentation_config=aug_config,
num_segments=experiment_config["inference"]["video_num_segments"])
# build inferencer @TODO TRT support
model = ActionRecognitionModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config)
infer = Inferencer(model, ret_prob=False)
# do inference
progress = tqdm(dataloader)
id2name = {v: k for k, v in label_map.items()}
sample_result_dict = {}
with torch.no_grad():
for sample_path, data in progress:
batch_size = len(sample_path)
pred_id = infer.inference(data)
pred_name = []
for label_idx in pred_id:
pred_name.append(id2name[label_idx])
for idx in range(batch_size):
if sample_path[idx] not in sample_result_dict:
sample_result_dict[sample_path[idx]] = [pred_name[idx]]
else:
sample_result_dict[sample_path[idx]].append(pred_name[idx])
# save the output and visualize
for k, v in sample_result_dict.items():
print("{} : {}".format(k, v))
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
if cfg.inference.results_dir is not None:
results_dir = cfg.inference.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "inference")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
output_dir=results_dir,
model_path=cfg.inference.checkpoint,
batch_size=cfg.inference.batch_size,
inference_dataset_dir=cfg.inference.inference_dataset_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained action recognition model."""
import csv
import os
import numpy as np
import torch
from tqdm import tqdm
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.action_recognition.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.action_recognition.model.pl_ar_model import ActionRecognitionModel
from nvidia_tao_pytorch.cv.action_recognition.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.action_recognition.inference.inferencer import Inferencer
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
def compute_metrics(confusion_matrix):
"""Computes evaluation metrics.
Args:
confusion_matrix (numpy.ndarray): The confusion matrix.
Returns:
dict: A dictionary containing the evaluation metrics.
"""
row_sum = np.sum(confusion_matrix, axis=1)
_shape = confusion_matrix.shape
percentage_confusion_matrix = np.zeros(
_shape, dtype=np.float32)
for x in range(_shape[0]):
for y in range(_shape[1]):
if not row_sum[x] == 0:
percentage_confusion_matrix[x][y] = np.float32(confusion_matrix[x][y]) / \
row_sum[x] * 100.0
trace = np.trace(confusion_matrix)
percent_trace = np.trace(percentage_confusion_matrix)
accuracy = float(trace) / np.sum(confusion_matrix) * 100.0
m_accuracy = percent_trace / _shape[0]
return percentage_confusion_matrix, accuracy, m_accuracy
def dump_cm(csv_path, cm, id2name):
"""Dumps the confusion matrix to a CSV file.
Args:
csv_path (str): The path to the CSV file.
cm (numpy.ndarray): The confusion matrix.
id2name (dict): A dictionary mapping class IDs to class names.
"""
n_class = len(id2name.keys())
with open(csv_path, "w") as f:
writer = csv.writer(f)
label_list = ["class"]
for idx in range(n_class):
label_list.append(id2name[idx])
writer.writerow(label_list)
for row_id in range(n_class):
row = [id2name[row_id]]
for col_id in range(n_class):
row.append(cm[row_id][col_id])
writer.writerow(row)
def run_experiment(experiment_config, model_path, key, output_dir,
batch_size=1, test_dataset_dir=None):
"""Run experiment."""
check_and_create(output_dir)
# Set status logging
status_file = os.path.join(output_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Action recognition evaluation"
)
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# build dataloader
model_config = experiment_config["model"]
label_map = experiment_config["dataset"]["label_map"]
gpu_id = experiment_config.evaluate.gpu_id
torch.cuda.set_device(gpu_id)
num_classes = len(label_map.keys())
confusion_matrix = np.zeros((num_classes, num_classes), dtype=np.int32)
output_shape = [experiment_config["model"]["input_height"],
experiment_config["model"]["input_width"]]
action_set = os.listdir(test_dataset_dir)
sample_dict = {}
for action in action_set:
action_root_path = os.path.join(test_dataset_dir, action)
for video in os.listdir(action_root_path):
video_path = os.path.join(action_root_path, video)
sample_dict[video_path] = action
aug_config = experiment_config["dataset"]["augmentation_config"]
dataloader = build_dataloader(sample_dict=sample_dict,
model_config=model_config,
dataset_mode="val",
output_shape=output_shape,
input_type=model_config["input_type"],
label_map=label_map,
batch_size=batch_size,
workers=experiment_config["dataset"]["workers"],
eval_mode=experiment_config["evaluate"]["video_eval_mode"],
augmentation_config=aug_config,
num_segments=experiment_config["evaluate"]["video_num_segments"])
model = ActionRecognitionModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config)
# build inferencer @TODO TRT support
eval_mode_flag = experiment_config["evaluate"]["video_eval_mode"] == "conv"
if eval_mode_flag:
infer = Inferencer(model, ret_prob=True)
else:
infer = Inferencer(model)
# do evaluation
progress = tqdm(dataloader)
sample_pred_dict = {}
with torch.no_grad():
if eval_mode_flag:
for sample_path, data, action_label in progress:
batch_size = len(sample_path)
prob = infer.inference(data)
for idx in range(batch_size):
if sample_path[idx] not in sample_pred_dict:
sample_pred_dict[sample_path[idx]] = prob[idx]
sample_pred_dict[sample_path[idx]] += prob[idx]
for k, v in sample_pred_dict.items():
pred_id = np.argmax(v)
action = sample_dict[k]
confusion_matrix[label_map[action], pred_id] += 1
else:
for sample_path, data, action_label in progress:
batch_size = len(sample_path)
pred_id = infer.inference(data)
for idx in range(batch_size):
confusion_matrix[action_label[idx], pred_id[idx]] += 1
percentage_confusion_matrix, accuracy, m_accuracy = compute_metrics(confusion_matrix)
id2name = {v: k for k, v in label_map.items()}
print("*******************************")
for idx in range(len(label_map)):
cls_acc = percentage_confusion_matrix[idx][idx]
print("{:<14}{:.4}".format(
id2name[idx], cls_acc))
print("*******************************")
print("Total accuracy: {}".format(round(accuracy, 3)))
print("Average class accuracy: {}".format(round(m_accuracy, 3)))
status_logging.get_status_logger().kpi = {"accuracy": round(accuracy, 3),
"m_accuracy": round(m_accuracy, 3)}
status_logging.get_status_logger().write(
message="Evaluation metrics generated.",
status_level=status_logging.Status.RUNNING
)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
if cfg.evaluate.results_dir is not None:
results_dir = cfg.evaluate.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "evaluate")
run_experiment(experiment_config=cfg,
output_dir=results_dir,
key=cfg.encryption_key,
model_path=cfg.evaluate.checkpoint,
batch_size=cfg.evaluate.batch_size,
test_dataset_dir=cfg.evaluate.test_dataset_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the action recognition task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to action recognition.
"""
import importlib
import os
import pkgutil
import argparse
import subprocess
import sys
from time import time
from nvidia_tao_pytorch.core.tlt_logging import logging
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
import nvidia_tao_pytorch.cv.action_recognition.scripts as scripts
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, network=None):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
network (str): Name of the network.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
logging.error("The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
logging.error("The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
if args.subtask in ["train"]:
if args.results_dir:
script_args += " results_dir=" + args.results_dir
# Add encryption key.
if args.subtask in ["train", "evaluate", "inference", "export"]:
if args.key is not None:
script_args += " encryption_key=" + args.key
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except (KeyboardInterrupt, SystemExit):
logging.info("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=1,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
logging.info("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
logging.error("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
logging.info("Execution status: PASS")
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"action_recognition", add_help=True, description="Transfer Learning Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="action_recognition")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/entrypoint/action_recognition.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resnet3D backbones for action recognition."""
import torch.nn as nn
from torchvision._internally_replaced_utils import load_state_dict_from_url
model_urls = {
'resnet_18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet_34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet_50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet_101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet_152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3x3 convolution with padding.
This function constructs a 3x3x3 convolutional layer with the specified number of input planes, output planes, stride,
groups, dilation, and bias. It returns the constructed convolutional layer.
Args:
in_planes (int): The number of input planes.
out_planes (int): The number of output planes.
stride (int or tuple, optional): The stride of the convolution. Defaults to 1.
groups (int, optional): The number of groups. Defaults to 1.
dilation (int, optional): The dilation of the convolution. Defaults to 1.
Returns:
nn.Conv3d: The constructed convolutional layer.
"""
if isinstance(stride, int):
stride = (1, stride, stride)
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation, groups=groups, bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
"""1x1x1 convolution.
This function constructs a 1x1x1 convolutional layer with the specified number of input planes, output planes, and stride.
It returns the constructed convolutional layer.
Args:
in_planes (int): The number of input planes.
out_planes (int): The number of output planes.
stride (int or tuple, optional): The stride of the convolution. Defaults to 1.
Returns:
nn.Conv3d: The constructed convolutional layer.
"""
if isinstance(stride, int):
stride = (1, stride, stride)
return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock3d(nn.Module):
"""Basic block for ResNet3D.
This class defines a basic block for ResNet3D, which consists of two 3x3x3 convolutional layers with batch normalization
and ReLU activation, and a residual connection. The block downsamples the input when the stride is not equal to 1.
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
"""Initializes the basic block.
This method initializes the basic block by defining the two 3x3x3 convolutional layers with batch normalization
and ReLU activation, and the residual connection. The block downsamples the input when the stride is not equal to 1.
Args:
inplanes (int): The number of input planes.
planes (int): The number of output planes.
stride (int, optional): The stride of the block. Defaults to 1.
downsample (nn.Module, optional): The downsampling layer. Defaults to None.
groups (int, optional): The number of groups. Defaults to 1.
base_width (int, optional): The base width. Defaults to 64.
dilation (int, optional): The dilation of the convolution. Defaults to 1.
norm_layer (nn.Module, optional): The normalization layer. Defaults to None.
Raises:
ValueError: If groups is not equal to 1 or base_width is not equal to 64.
NotImplementedError: If dilation is greater than 1.
"""
super(BasicBlock3d, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Forward"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck3d(nn.Module):
"""Bottleneck module for ResNet3D.
This class defines a bottleneck module for ResNet3D, which consists of three convolutional layers with batch normalization
and ReLU activation, and a residual connection. The module downsamples the input when the stride is not equal to 1.
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
"""Initializes the bottleneck module.
This method initializes the bottleneck module by defining the three convolutional layers with batch normalization
and ReLU activation, and the residual connection. The module downsamples the input when the stride is not equal to 1.
Args:
inplanes (int): The number of input planes.
planes (int): The number of output planes.
stride (int, optional): The stride of the module. Defaults to 1.
downsample (nn.Module, optional): The downsampling layer. Defaults to None.
groups (int, optional): The number of groups. Defaults to 1.
base_width (int, optional): The base width. Defaults to 64.
dilation (int, optional): The dilation of the convolution. Defaults to 1.
norm_layer (nn.Module, optional): The normalization layer. Defaults to None.
"""
super(Bottleneck3d, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Forward"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet3d(nn.Module):
"""ResNet3D module"""
def __init__(self, block, layers, nb_classes, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, modality='rgb', dropout_ratio=0.8):
"""Initializes a ResNet3D module.
Args:
block (nn.Module): The block type.
layers (list of int): The number of layers in each block.
nb_classes (int): The number of output classes.
zero_init_residual (bool, optional): Whether to zero-initialize the last batch normalization layer in each
residual branch. Defaults to False.
groups (int, optional): The number of groups. Defaults to 1.
width_per_group (int, optional): The base width. Defaults to 64.
replace_stride_with_dilation (list of bool, optional): Whether to replace the 2x2x2 stride with a dilated
convolution instead. Defaults to None.
norm_layer (nn.Module, optional): The normalization layer. Defaults to None.
modality (str, optional): The modality, either "rgb" or "of". Defaults to 'rgb'.
dropout_ratio (float, optional): The dropout ratio. Defaults to 0.8.
"""
super(ResNet3d, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm3d
self._norm_layer = norm_layer
self.modality = modality
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self._make_stem_layer()
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
for m in self.modules(): # self.modules() --> Depth-First-Search the Net
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm3d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck3d):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock3d):
nn.init.constant_(m.bn2.weight, 0)
if dropout_ratio > 0.0:
self.dropout = nn.Dropout(p=dropout_ratio)
else:
self.dropout = None
self.avg_pool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc_cls = nn.Linear(512 * block.expansion, nb_classes)
self.block_expansion = block.expansion
def replace_logits(self, nb_classes):
"""Replace the final logits with new class.
Args:
nb_classes (int): number of new classes.
"""
self.fc_cls = nn.Linear(512 * self.block_expansion, nb_classes)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
"""Make module layer.
Args:
block (nn.Module): The block type.
planes (int): The number of output planes.
blocks (int): The number of blocks in the layer.
stride (int, optional): The stride of the convolution. Defaults to 1.
dilate (bool, optional): Whether to use dilated convolution. Defaults to False.
Returns:
nn.Sequential: The module layer.
"""
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _make_stem_layer(self):
"""Construct the stem layers consists of a conv+norm+act module and a
pooling layer.
"""
if self.modality == 'rgb':
inchannels = 3
elif self.modality == 'of':
inchannels = 2
else:
raise ValueError('Unknown modality: {}'.format(self.modality))
self.conv1 = nn.Conv3d(inchannels, self.inplanes, kernel_size=(5, 7, 7),
stride=2, padding=(2, 3, 3), bias=False)
self.bn1 = self._norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=2,
padding=(0, 1, 1)) # kernel_size=(2, 3, 3)
def _forward_impl(self, x):
"""Forward implementation."""
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avg_pool(x)
# [N, in_channels, 1, 1, 1]
if self.dropout is not None:
x = self.dropout(x)
# [N, in_channels, 1, 1, 1]
x = x.view(x.shape[0], -1)
# [N, in_channels]
cls_score = self.fc_cls(x)
# [N, num_classes]
return cls_score
return x
def forward(self, x):
"""Forward."""
return self._forward_impl(x)
def _inflate_conv_params(self, conv3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a conv module from 2d to 3d.
Args:
conv3d (nn.Module): The destination conv3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d models.
module_name_2d (str): The name of corresponding conv module in the
2d models.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
weight_2d_name = module_name_2d + '.weight'
conv2d_weight = state_dict_2d[weight_2d_name]
kernel_t = conv3d.weight.data.shape[2]
new_weight = conv2d_weight.data.unsqueeze(2).expand_as(conv3d.weight) / kernel_t
conv3d.weight.data.copy_(new_weight)
inflated_param_names.append(weight_2d_name)
if getattr(conv3d, 'bias') is not None:
bias_2d_name = module_name_2d + '.bias'
conv3d.bias.data.copy_(state_dict_2d[bias_2d_name])
inflated_param_names.append(bias_2d_name)
def _inflate_bn_params(self, bn3d, state_dict_2d, module_name_2d,
inflated_param_names):
"""Inflate a norm module from 2d to 3d.
Args:
bn3d (nn.Module): The destination bn3d module.
state_dict_2d (OrderedDict): The state dict of pretrained 2d models.
module_name_2d (str): The name of corresponding bn module in the
2d models.
inflated_param_names (list[str]): List of parameters that have been
inflated.
"""
for param_name, param in bn3d.named_parameters():
param_2d_name = f'{module_name_2d}.{param_name}'
param_2d = state_dict_2d[param_2d_name]
param.data.copy_(param_2d)
inflated_param_names.append(param_2d_name)
for param_name, param in bn3d.named_buffers():
param_2d_name = f'{module_name_2d}.{param_name}'
# some buffers like num_batches_tracked may not exist in old
# checkpoints
if param_2d_name in state_dict_2d:
param_2d = state_dict_2d[param_2d_name]
param.data.copy_(param_2d)
inflated_param_names.append(param_2d_name)
def inflate_weights(self, state_dict_r2d):
"""Inflate the resnet2d parameters to resnet3d.
The differences between resnet3d and resnet2d mainly lie in an extra
axis of conv kernel. To utilize the pretrained parameters in 2d models,
the weight of conv2d models should be inflated to fit in the shapes of
the 3d counterpart.
"""
inflated_param_names = []
for name, module in self.named_modules():
if isinstance(module, nn.Conv3d) or isinstance(module, nn.BatchNorm3d): # pylint:disable=R1701
if name + '.weight' not in state_dict_r2d:
print(f'Module not exist in the state_dict_r2d: {name}')
else:
shape_2d = state_dict_r2d[name + '.weight'].shape
shape_3d = module.weight.data.shape
if shape_2d != shape_3d[:2] + shape_3d[3:]:
print(f'Weight shape mismatch for: {name}'
f'3d weight shape: {shape_3d}; '
f'2d weight shape: {shape_2d}. ')
else:
if isinstance(module, nn.Conv3d):
self._inflate_conv_params(module, state_dict_r2d, name, inflated_param_names)
else:
self._inflate_bn_params(module, state_dict_r2d, name, inflated_param_names)
# check if any parameters in the 2d checkpoint are not loaded
remaining_names = set(state_dict_r2d.keys()) - set(inflated_param_names)
if remaining_names:
print(f'These parameters in the 2d checkpoint are not loaded: {remaining_names}')
def resnet3d(arch, nb_classes, progress=True, modality='rgb', pretrained2d=True,
pretrained_weights=None, **kwargs):
"""
Args:
arch (str): The architecture of resnet.
modality (str): The modality of input, 'RGB' or 'Flow'.
progress (bool): If True, displays a progress bar of the download to stderr.
pretrained2d (bool): If True, utilize the pretrained parameters in 2d models.
pretrained_weights (dict): torch pretrained weights.
"""
arch_settings = {
'resnet_18': (BasicBlock3d, (2, 2, 2, 2)),
'resnet_34': (BasicBlock3d, (3, 4, 6, 3)),
'resnet_50': (Bottleneck3d, (3, 4, 6, 3)),
'resnet_101': (Bottleneck3d, (3, 4, 23, 3)),
'resnet_152': (Bottleneck3d, (3, 8, 36, 3))
}
model = ResNet3d(*arch_settings[arch], modality=modality, nb_classes=nb_classes, **kwargs)
if pretrained2d:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.inflate_weights(state_dict)
if pretrained_weights:
pretrain_dict = pretrained_weights
model.load_state_dict(pretrain_dict)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/model/resnet3d.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Main PTL model file for action recognition """
from typing import Any, Dict, Optional
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
import torchmetrics
from nvidia_tao_pytorch.cv.action_recognition.dataloader.build_data_loader import build_dataloader, list_dataset
from nvidia_tao_pytorch.cv.action_recognition.model.build_nn_model import build_ar_model
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import patch_decrypt_checkpoint
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
# pylint:disable=too-many-ancestors
class ActionRecognitionModel(pl.LightningModule):
""" PTL module for action recognition model."""
def __init__(self, experiment_spec, export=False):
"""Init training for 2D/3D action recognition model.
Args:
experiment_spec (dict): The experiment specification.
export (bool, optional): Whether to build the model that can be exported to ONNX format. Defaults to False.
"""
super().__init__()
self.experiment_spec = experiment_spec
self.dataset_config = experiment_spec["dataset"]
self.model_config = experiment_spec["model"]
self.data_shape = [self.model_config.input_height, self.model_config.input_width]
# init the model
self._build_model(experiment_spec, export)
self.train_accuracy = torchmetrics.Accuracy()
self.val_accuracy = torchmetrics.Accuracy()
self.status_logging_dict = {"train_loss": 0.0,
"train_acc": 0.0,
"val_loss": 0.0,
"val_acc": 0.0}
def _build_model(self, experiment_spec, export):
"""Internal function to build the model.
This method constructs a model using the specified experiment specification and export flag. It returns the model.
Args:
experiment_spec (dict): The experiment specification.
export (bool): Whether to build the model that can be exported to ONNX format.
"""
self.model = build_ar_model(experiment_config=experiment_spec,
export=export)
print(self.model)
def setup(self, stage: Optional[str] = None):
""" Set up the dataset for train and val"""
train_top_dir = self.dataset_config["train_dataset_dir"]
val_top_dir = self.dataset_config["val_dataset_dir"]
if train_top_dir is not None:
self.train_dict = list_dataset(train_top_dir)
else:
raise ValueError("Please set the train dataset in the spec file")
if val_top_dir is not None:
self.val_dict = list_dataset(val_top_dir)
else:
self.val_dict = {}
print("Train dataset samples: {}".format(len(self.train_dict)))
print("Validation dataset samples: {}".format(len(self.val_dict)))
def train_dataloader(self):
"""Build the dataloader for training."""
train_loader = \
build_dataloader(sample_dict=self.train_dict,
model_config=self.model_config,
output_shape=self.data_shape,
label_map=self.dataset_config["label_map"],
dataset_mode="train",
batch_size=self.dataset_config["batch_size"],
workers=self.dataset_config["workers"],
input_type=self.model_config["input_type"],
shuffle=True,
pin_mem=True,
clips_per_video=self.dataset_config["clips_per_video"],
augmentation_config=self.dataset_config["augmentation_config"])
return train_loader
def val_dataloader(self):
"""Build the dataloader for validation."""
val_loader = build_dataloader(sample_dict=self.val_dict,
model_config=self.model_config,
output_shape=self.data_shape,
label_map=self.dataset_config["label_map"],
dataset_mode="val",
batch_size=self.dataset_config["batch_size"],
workers=self.dataset_config["workers"],
input_type=self.model_config["input_type"],
clips_per_video=1,
augmentation_config=self.dataset_config["augmentation_config"]
)
return val_loader
def configure_optimizers(self):
"""Configure optimizers for training"""
self.train_config = self.experiment_spec["train"]
optim_dict = {}
optim = torch.optim.SGD(params=self.parameters(),
lr=self.train_config['optim']['lr'],
momentum=self.train_config['optim']['momentum'],
weight_decay=self.train_config['optim']['weight_decay'])
optim_dict["optimizer"] = optim
scheduler_type = self.train_config['optim']['lr_scheduler']
if scheduler_type == "AutoReduce":
lr_scheduler = ReduceLROnPlateau(optim, 'min',
patience=self.train_config['optim']['patience'],
min_lr=self.train_config['optim']['min_lr'],
factor=self.train_config['optim']["lr_decay"],
verbose=True)
elif scheduler_type == "MultiStep":
lr_scheduler = MultiStepLR(optimizer=optim,
milestones=self.train_config['optim']["lr_steps"],
gamma=self.train_config['optim']["lr_decay"],
verbose=True)
else:
raise ValueError("Only [AutoReduce, MultiStep] scheduler is supported")
optim_dict["lr_scheduler"] = lr_scheduler
optim_dict['monitor'] = self.train_config['optim']['lr_monitor']
return optim_dict
def training_step(self, batch, batch_idx):
"""Training step."""
data, label = batch
output = self.model(data)
loss = F.cross_entropy(output, label)
self.train_accuracy.update(output, label)
self.log("train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
self.log("train_acc_1", self.train_accuracy, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
return loss
def training_epoch_end(self, training_step_outputs):
"""Log Training metrics to status.json"""
average_train_loss = 0.0
for out in training_step_outputs:
average_train_loss += out['loss'].item()
average_train_loss /= len(training_step_outputs)
self.status_logging_dict["train_loss"] = average_train_loss
self.status_logging_dict["train_acc"] = self.train_accuracy.compute().item()
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and Val metrics generated.",
status_level=status_logging.Status.RUNNING
)
def validation_step(self, batch, batch_idx):
"""Validation step."""
_, data, label = batch
output = self.model(data)
loss = F.cross_entropy(output, label)
self.val_accuracy.update(output, label)
self.log("val_loss", loss, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
self.log("val_acc_1", self.val_accuracy, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
return loss
def validation_epoch_end(self, validation_step_outputs):
"""Log Validation metrics to status.json"""
average_val_loss = 0.0
for out in validation_step_outputs:
average_val_loss += out.item()
average_val_loss /= len(validation_step_outputs)
self.status_logging_dict["val_loss"] = average_val_loss
self.status_logging_dict["val_acc"] = self.val_accuracy.compute().item()
def forward(self, x):
"""Forward of the action recognition model."""
output = self.model(x)
return output
# @rank_zero_only
# def training_epoch_end(self, outputs: List[Any]) -> None:
# pass
# @rank_zero_only
# def validation_epoch_end(self, outputs: List[Any]) -> None:
# pass
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Encrpyt the checkpoint. The encryption is done in TLTCheckpointConnector."""
pass
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Decrpyt the checkpoint"""
if checkpoint.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
checkpoint = patch_decrypt_checkpoint(checkpoint, key)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/model/pl_ar_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resnet2D backbones for action recognition."""
import math
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
model_urls = {
'resnet_18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet_34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet_50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet_101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet_152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding.
This function constructs a 3x3 convolutional layer with padding=1 using the specified input planes, output planes,
and stride. It returns the convolutional layer.
Args:
in_planes (int): The number of input planes.
out_planes (int): The number of output planes.
stride (int, optional): The stride of the convolution. Defaults to 1.
Returns:
torch.nn.Conv2d: The constructed convolutional layer.
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
"""Basic block for ResNet.
This class defines a basic block for ResNet that inherits from the `nn.Module` class. It constructs a basic block
using the specified input planes, output planes, stride, and downsample. It defines a `forward` method that applies
the basic block to the input tensor and returns the output tensor.
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
"""Initialize the BasicBlock.
Args:
inplanes (int): The number of input planes.
planes (int): The number of output planes.
stride (int, optional): The stride of the convolution. Defaults to 1.
downsample (nn.Module, optional): The downsampling layer. Defaults to None.
"""
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""forward"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Bottleneck block for ResNet.
This class defines a bottleneck block for ResNet that inherits from the `nn.Module` class
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
"""Initialize Bottleneck.
Args:
inplanes (int): The number of input planes.
planes (int): The number of output planes.
stride (int, optional): The stride of the convolution. Defaults to 1.
downsample (nn.Module, optional): The downsampling layer. Defaults to None.
"""
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""forward"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet2D module.
This class defines a ResNet2D model that inherits from the `nn.Module` class
"""
def __init__(self, block, layers, nb_classes=101, channel=20, dropout_ratio=0.0):
"""Initialize the ResNet
Args:
block (nn.Module): The block to use in the ResNet2D model.
layers (list of int): The number of layers in each block.
nb_classes (int, optional): The number of classes. Defaults to 101.
channel (int, optional): The number of input channels. Defaults to 20.
dropout_ratio (float, optional): The dropout ratio. Defaults to 0.0.
"""
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(channel, 64, kernel_size=7,
stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.avgpool = nn.AvgPool2d(7)
self.block_expansion = block.expansion
self.fc_cls = nn.Linear(512 * block.expansion, nb_classes)
if dropout_ratio > 0.0:
self.dropout = nn.Dropout(p=dropout_ratio)
else:
self.dropout = None
# Uncomment for already trained models
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
"""Construct the layers with module template.
Args:
block (nn.Module): The block template to use in the layer.
planes (int): The number of output planes.
blocks (int): The number of blocks in the layer.
stride (int, optional): The stride of the convolution. Defaults to 1.
Returns:
nn.Sequential: The constructed layer.
"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def replace_logits(self, nb_classes):
"""Replace final logits.
This method replaces the final logits layer of the ResNet2D model with a new linear layer that has the specified number
of output classes.
Args:
nb_classes (int): The number of output classes.
"""
self.fc_cls = nn.Linear(512 * self.block_expansion, nb_classes)
def forward(self, x):
"""forward"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.dropout is not None:
x = self.dropout(x)
x = x.view(x.size(0), -1)
out = self.fc_cls(x)
return out
def resnet2d(backbone,
pretrained_weights=None,
channel=3,
nb_classes=5,
imagenet_pretrained=False, # @TODO(tylerz)Internal test option
**kwargs):
"""
ResNet2D.
This function constructs a ResNet2D model using the specified backbone, pretrained weights, number of input channels,
number of classes, and additional keyword arguments. It returns the constructed ResNet2D model.
Args:
backbone (str): The backbone to use in the ResNet2D model.
pretrained_weights (dict, optional): The pretrained weights. Defaults to None.
channel (int, optional): The number of input channels. Defaults to 3.
nb_classes (int, optional): The number of classes. Defaults to 5.
imagenet_pretrained (bool, optional): Whether to use pretrained weights from ImageNet. Defaults to False.
**kwargs: Additional keyword arguments.
Returns:
nn.Module: The constructed ResNet2D model.
"""
arch_settings = {
'resnet_18': (BasicBlock, [2, 2, 2, 2]),
'resnet_34': (BasicBlock, [3, 4, 6, 3]),
'resnet_50': (Bottleneck, [3, 4, 6, 3]),
'resnet_101': (Bottleneck, [3, 4, 23, 3]),
'resnet_152': (Bottleneck, [3, 8, 36, 3])
}
model = ResNet(arch_settings[backbone][0], arch_settings[backbone][1],
nb_classes=nb_classes, channel=channel, **kwargs)
model_dict = model.state_dict()
if pretrained_weights:
pretrain_dict = pretrained_weights
model_dict = weight_transform(model_dict, pretrain_dict, channel)
model.load_state_dict(model_dict)
else:
if imagenet_pretrained: # @TODO(tylerz) Internal test option
pretrain_dict = model_zoo.load_url(
model_urls[backbone], 'tmp/')
model_dict = model.state_dict()
model_dict = weight_transform(model_dict, pretrain_dict, channel)
model.load_state_dict(model_dict)
return model
def cross_modality_pretrain(conv1_weight, orig_channel, target_channel):
"""Compute weights for cross modality.
This function computes the weights for cross modality by transforming the original channel weight to the target channel.
It returns the new convolutional weight tensor.
Args:
conv1_weight (torch.Tensor): The original convolutional weight tensor.
orig_channel (int): The number of original channels.
target_channel (int): The number of target channels.
Returns:
torch.Tensor: The new convolutional weight tensor.
"""
# transform the original channel weight to target channel
S = 0
for i in range(orig_channel):
S += conv1_weight[:, i, :, :]
avg = S / orig_channel
new_conv1_weight = torch.FloatTensor(64, target_channel, 7, 7)
for i in range(target_channel):
new_conv1_weight[:, i, :, :] = avg.data
return new_conv1_weight
def weight_transform(model_dict, pretrain_dict, target_channel):
"""Weight transform.
This function transforms the weights of the first convolutional layer of a model using the specified pretrained weights
and target channel. It returns the transformed model weights.
Args:
model_dict (dict): The model weights.
pretrain_dict (dict): The pretrained weights.
target_channel (int): The number of target channels.
Returns:
dict: The transformed model weights.
"""
weight_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
wo = pretrain_dict[list(pretrain_dict.keys())[0]]
orig_channel = wo.shape[1]
if target_channel == orig_channel:
wt = wo
else:
print(list(pretrain_dict.keys())[0])
print("orig_channel: {} VS target_channel: {}".format(orig_channel, target_channel))
wt = cross_modality_pretrain(wo, orig_channel, target_channel)
weight_dict['conv1.weight'] = wt
model_dict.update(weight_dict)
return model_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/model/resnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""I3D backbones for action recognition."""
import torch
import torch.nn as nn
import torch.nn.functional as F
class MaxPool3dSamePadding(nn.MaxPool3d):
"""3D MaxPool with padding to same output shape"""
def compute_pad(self, dim, s):
"""Compute padding for same output shape.
Args:
dim (int): the dimension index.
s (int): the size of the dimension.
Returns:
int: The padding required.
"""
if s % self.stride[dim] == 0:
return max(self.kernel_size[dim] - self.stride[dim], 0)
return max(self.kernel_size[dim] - (s % self.stride[dim]), 0)
def forward(self, x):
"""Dynamic same padding forward."""
# compute 'same' padding
(_, _, t, h, w) = x.size()
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
x = F.pad(x, pad)
return super(MaxPool3dSamePadding, self).forward(x)
class Unit3D(nn.Module):
"""3D convolutional unit module.
This class defines a 3D convolutional unit module that inherits from the `nn.Module` class.
"""
def __init__(self, in_channels,
output_channels,
kernel_shape=(1, 1, 1),
stride=(1, 1, 1),
padding=0,
activation_fn=F.relu,
use_batch_norm=True,
use_bias=False,
name='unit_3d'):
"""Initializes Unit3D module.
Args:
in_channels (int): The number of input channels.
output_channels (int): The number of output channels.
kernel_shape (tuple, optional): The kernel shape. Defaults to (1, 1, 1).
stride (tuple, optional): The stride. Defaults to (1, 1, 1).
padding (int, optional): The padding. Defaults to 0.
activation_fn (function, optional): The activation function. Defaults to F.relu.
use_batch_norm (bool, optional): Whether to use batch normalization. Defaults to True.
use_bias (bool, optional): Whether to use bias. Defaults to False.
name (str, optional): The name of the module. Defaults to 'unit_3d'.
"""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels,
out_channels=self._output_channels,
kernel_size=self._kernel_shape,
stride=self._stride,
padding=0, # we always want padding to be 0 here. We will dynamically pad based on input size in forward function
bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01)
def compute_pad(self, dim, s):
"""Compute padding for same."""
if s % self._stride[dim] == 0:
return max(self._kernel_shape[dim] - self._stride[dim], 0)
return max(self._kernel_shape[dim] - (s % self._stride[dim]), 0)
def forward(self, x):
"""Dynamic same padding forward.
This method applies the dynamic same padding to the input tensor, passes it through the 3D convolutional layer,
batch normalization layer, and activation function if specified, and returns the output tensor.
Args:
x (torch.Tensor): The input tensor.
Returns:
torch.Tensor: The output tensor.
"""
# compute 'same' padding
(_, _, t, h, w) = x.size()
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
x = F.pad(x, pad)
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class InceptionModule(nn.Module):
"""Inception module.
This class defines an Inception module that inherits from the `nn.Module` class. It constructs an Inception
module using the specified input channels, output channels, and name. It constructs four 3D convolutional units
and a 3D max pooling layer, and concatenates their outputs along the channel dimension
"""
def __init__(self, in_channels, out_channels, name):
"""Initialize InceptionModule.
Args:
in_channels (int): The number of input channels.
out_channels (list): The number of output channels for each of the four 3D convolutional units and the 3D max
pooling layer.
name (str): The name of the module.
"""
super(InceptionModule, self).__init__()
self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_0/Conv3d_0a_1x1')
self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_1/Conv3d_0a_1x1')
self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[3, 3, 3],
name=name + '/Branch_1/Conv3d_0b_3x3')
self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_2/Conv3d_0a_1x1')
self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[3, 3, 3],
name=name + '/Branch_2/Conv3d_0b_3x3')
self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3],
stride=(1, 1, 1), padding=0)
self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_3/Conv3d_0b_1x1')
self.name = name
def forward(self, x):
"""forward"""
b0 = self.b0(x)
b1 = self.b1b(self.b1a(x))
b2 = self.b2b(self.b2a(x))
b3 = self.b3b(self.b3a(x))
return torch.cat([b0, b1, b2, b3], dim=1)
# pylint: disable=R0911
class InceptionI3d(nn.Module):
"""Inception-v1 I3D architecture.
The model is introduced in:
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
Joao Carreira, Andrew Zisserman
https://arxiv.org/pdf/1705.07750v1.pdf.
See also the Inception architecture, introduced in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
"""
# Endpoints of the model in order. During construction, all the endpoints up
# to a designated `final_endpoint` are returned in a dictionary as the
# second return value.
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'Mixed_4d',
'Mixed_4e',
'Mixed_4f',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c',
'Logits',
'Predictions',
)
def __init__(self, num_classes=400, spatial_squeeze=True,
final_endpoint='Logits', name='inception_i3d', in_channels=3, dropout_keep_prob=0.5):
"""Initializes I3D model instance.
Args:
num_classes: The number of outputs in the logit layer (default 400, which
matches the Kinetics dataset).
spatial_squeeze: Whether to squeeze the spatial dimensions for the logits
before returning (default True).
final_endpoint: The model contains many possible endpoints.
`final_endpoint` specifies the last endpoint for the model to be built
up to. In addition to the output at `final_endpoint`, all the outputs
at endpoints up to `final_endpoint` will also be returned, in a
dictionary. `final_endpoint` must be one of
InceptionI3d.VALID_ENDPOINTS (default 'Logits').
name: A string (optional). The name of this module.
Raises:
ValueError: if `final_endpoint` is not recognized.
"""
if final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % final_endpoint)
super(InceptionI3d, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self.logits = None
if self._final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % self._final_endpoint)
self.end_points = {}
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7],
stride=(2, 2, 2), padding=(3, 3, 3), name=name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if self._final_endpoint == end_point:
return
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0, name=name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1, name=name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_3b'
self.end_points[end_point] = InceptionModule(192, [64, 96, 128, 16, 32, 32], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_3c'
self.end_points[end_point] = InceptionModule(256, [128, 128, 192, 32, 96, 64], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(2, 2, 2), padding=0)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_4b'
self.end_points[end_point] = InceptionModule(128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_4c'
self.end_points[end_point] = InceptionModule(192 + 208 + 48 + 64, [160, 112, 224, 24, 64, 64], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_4d'
self.end_points[end_point] = InceptionModule(160 + 224 + 64 + 64, [128, 128, 256, 24, 64, 64], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_4e'
self.end_points[end_point] = InceptionModule(128 + 256 + 64 + 64, [112, 144, 288, 32, 64, 64], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_4f'
self.end_points[end_point] = InceptionModule(112 + 288 + 64 + 64, [256, 160, 320, 32, 128, 128], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(2, 2, 2), padding=0)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_5b'
self.end_points[end_point] = InceptionModule(256 + 320 + 128 + 128, [256, 160, 320, 32, 128, 128], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'Mixed_5c'
self.end_points[end_point] = InceptionModule(256 + 320 + 128 + 128, [384, 192, 384, 48, 128, 128], name + end_point)
if self._final_endpoint == end_point:
return
end_point = 'Logits'
self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7],
stride=(1, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.logits = Unit3D(in_channels=384 + 384 + 128 + 128, output_channels=self._num_classes,
kernel_shape=[1, 1, 1],
padding=0,
activation_fn=None,
use_batch_norm=False,
use_bias=True,
name='logits')
self.build()
def replace_logits(self, num_classes):
"""Replace final logits.
Args:
num_classes (int): number of classess for new logits layer
"""
self._num_classes = num_classes
self.logits = Unit3D(in_channels=384 + 384 + 128 + 128, output_channels=self._num_classes,
kernel_shape=[1, 1, 1],
padding=0,
activation_fn=None,
use_batch_norm=False,
use_bias=True,
name='logits')
def build(self):
"""Build model"""
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x):
"""Forward"""
for end_point in self.VALID_ENDPOINTS:
if end_point in self.end_points:
x = self._modules[end_point](x) # use _modules to work with dataparallel
x = self.logits(self.dropout(self.avg_pool(x)))
if self._spatial_squeeze:
logits = x.squeeze(3).squeeze(3)
# logits is batch X time X classes, which is what we want to work with
logits = torch.mean(logits, dim=2)
return logits
def extract_features(self, x):
"""Extract features"""
for end_point in self.VALID_ENDPOINTS:
if end_point in self.end_points:
x = self._modules[end_point](x)
return self.avg_pool(x)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/model/i3d.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model builder interface and joint model."""
import torch
import torch.nn as nn
from .resnet import resnet2d
from .resnet3d import resnet3d
from .i3d import InceptionI3d
import torch.nn.functional as F
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import patch_decrypt_checkpoint
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
def load_pretrained_weights(pretrained_backbone_path):
"""Load pretrained weights for a PyTorch model.
This function takes the path to the pretrained weights file as input. It loads the weights using PyTorch's
`torch.load` function and applies a patch to decrypt the checkpoint state_dict if it is encrypted. It then
extracts the state_dict from the loaded weights and converts the keys to match the format expected by the
PyTorch model. Finally, it returns the state_dict.
Args:
pretrained_backbone_path (str): The path to the pretrained weights file.
Returns:
dict: The state_dict for the PyTorch model.
"""
temp = torch.load(pretrained_backbone_path,
map_location="cpu")
if temp.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
temp = patch_decrypt_checkpoint(temp, key)
# for loading pretrained I3D weights released on
# https://github.com/piergiaj/pytorch-i3d
if "state_dict" not in temp:
return temp
state_dict = {}
for key, value in list(temp["state_dict"].items()):
if "model" in key:
new_key = ".".join(key.split(".")[1:])
state_dict[new_key] = value
else:
state_dict[key] = value
return state_dict
# @TODO(tylerz): imagenet_pretrained is an internal option for verification
def get_basemodel(backbone,
input_channel,
nb_classes,
imagenet_pretrained=False,
pretrained_backbone_path=None,
dropout_ratio=0.5,
pretrained_class_num=0):
"""Get backbone model for 2D input.
This function takes the backbone architecture, input channel, number of classes, imagenet pretrained flag,
pretrained backbone path, dropout ratio, and pretrained class number as input. It loads pretrained weights if
specified, sets the number of classes to load based on the pretrained class number or number of classes, and
constructs a 2D backbone model using the specified architecture, input channel, number of classes, imagenet
pretrained flag, pretrained weights, and dropout ratio. If the pretrained class number is not zero, it replaces
the logits layer of the model with a new one that outputs the specified number of classes. Finally, it returns
the constructed model.
Args:
backbone (str): The backbone architecture.
input_channel (int): The input channel.
nb_classes (int): The number of classes.
imagenet_pretrained (bool, optional): Whether to use imagenet pretrained weights. Defaults to False.
pretrained_backbone_path (str, optional): The path to the pretrained backbone weights file. Defaults to None.
dropout_ratio (float, optional): The dropout ratio. Defaults to 0.5.
pretrained_class_num (int, optional): The number of classes in the pretrained backbone. Defaults to 0.
Returns:
torch.nn.Module: The constructed 2D backbone model.
"""
if pretrained_backbone_path:
print("loading trained weights from {}".format(
pretrained_backbone_path))
pretrained_weights = load_pretrained_weights(pretrained_backbone_path)
else:
pretrained_weights = None
if pretrained_class_num != 0:
load_n_classes = pretrained_class_num
else:
load_n_classes = nb_classes
if "resnet" in backbone:
model = resnet2d(backbone=backbone,
pretrained_weights=pretrained_weights,
channel=input_channel,
nb_classes=load_n_classes,
imagenet_pretrained=imagenet_pretrained,
dropout_ratio=dropout_ratio)
if pretrained_class_num != 0:
model.replace_logits(nb_classes)
return model
def get_basemodel3d(backbone,
nb_classes,
modality="rgb",
pretrained_backbone_path=None,
imagenet_pretrained=False,
dropout_ratio=0.5,
pretrained_class_num=0):
"""Get backbone model for 3D input.
This function takes the backbone architecture, number of classes, modality, pretrained backbone path, imagenet
pretrained flag, dropout ratio, and pretrained class number as input. It loads pretrained weights if specified,
sets the number of classes to load based on the pretrained class number or number of classes, and constructs a
3D backbone model using the specified architecture, modality, number of classes, pretrained weights, and dropout
ratio. If the backbone architecture is "i3d" and the modality is "rgb" or "of", it constructs an InceptionI3d
model with the specified number of classes and input channels. If pretrained weights are specified, it loads
them into the model. If the pretrained class number is not zero, it replaces the logits layer of the model with
a new one that outputs the specified number of classes. Finally, it returns the constructed model.
Args:
backbone (str): The backbone architecture.
nb_classes (int): The number of classes.
modality (str, optional): The modality. Defaults to "rgb".
pretrained_backbone_path (str, optional): The path to the pretrained backbone weights file. Defaults to None.
imagenet_pretrained (bool, optional): Whether to use imagenet pretrained weights. Defaults to False.
dropout_ratio (float, optional): The dropout ratio. Defaults to 0.5.
pretrained_class_num (int, optional): The number of classes in the pretrained backbone. Defaults to 0.
Returns:
torch.nn.Module: The constructed 3D backbone model.
"""
if pretrained_backbone_path:
print("loading trained weights from {}".format(
pretrained_backbone_path))
pretrained_weights = load_pretrained_weights(pretrained_backbone_path)
else:
pretrained_weights = None
if pretrained_class_num != 0:
load_n_classes = pretrained_class_num
else:
load_n_classes = nb_classes
if 'resnet' in backbone:
model = resnet3d(backbone,
modality=modality,
nb_classes=load_n_classes,
pretrained_weights=pretrained_weights,
dropout_ratio=dropout_ratio,
pretrained2d=imagenet_pretrained)
elif backbone == "i3d":
if modality == "rgb":
channels = 3
elif modality == "of":
channels = 2
model = InceptionI3d(num_classes=load_n_classes, in_channels=channels,
dropout_keep_prob=dropout_ratio)
if pretrained_weights is not None:
model.load_state_dict(pretrained_weights)
# Replace final FC layer to match dataset
if pretrained_class_num != 0:
model.replace_logits(nb_classes)
return model
class JointModel(nn.Module):
"""Joint model module.
This class defines a joint model module that takes two inputs, an RGB sequence and an optical flow sequence, and
outputs a prediction for the action class
"""
def __init__(self,
of_seq_length,
rgb_seq_length,
nb_classes,
num_fc=64,
backbone='resnet_18',
input_type="2d",
pretrain_of_model=None,
pretrain_rgb_model=None,
imagenet_pretrained=False,
dropout_ratio=0.5):
"""Initialize the JointModel
Args:
of_seq_length (int): The length of the optical flow sequence.
rgb_seq_length (int): The length of the RGB sequence.
nb_classes (int): The number of classes.
num_fc (int, optional): The number of hidden units for the first fully connected layer. Defaults to 64.
backbone (str, optional): The backbone architecture. Defaults to "resnet_18".
input_type (str, optional): The input type, either "2d" or "3d". Defaults to "2d".
pretrain_of_model (str, optional): The path to the pretrained optical flow backbone weights file. Defaults to None.
pretrain_rgb_model (str, optional): The path to the pretrained RGB backbone weights file. Defaults to None.
imagenet_pretrained (bool, optional): Whether to use imagenet pretrained weights. Defaults to False.
dropout_ratio (float, optional): The dropout ratio. Defaults to 0.5.
"""
super(__class__, self).__init__() # pylint:disable=undefined-variable
if input_type == "2d":
self.model_rgb = get_basemodel(backbone=backbone,
input_channel=rgb_seq_length * 3,
nb_classes=nb_classes,
imagenet_pretrained=imagenet_pretrained,
pretrained_backbone_path=pretrain_rgb_model,
dropout_ratio=dropout_ratio)
self.model_of = get_basemodel(backbone=backbone,
input_channel=of_seq_length * 2,
nb_classes=nb_classes,
imagenet_pretrained=imagenet_pretrained,
pretrained_backbone_path=pretrain_of_model,
dropout_ratio=dropout_ratio)
elif input_type == "3d":
self.model_rgb = get_basemodel3d(backbone=backbone,
nb_classes=nb_classes,
modality="rgb",
pretrained_backbone_path=pretrain_rgb_model,
imagenet_pretrained=imagenet_pretrained,
dropout_ratio=dropout_ratio)
self.model_of = get_basemodel3d(backbone=backbone,
nb_classes=nb_classes,
modality="of",
pretrained_backbone_path=pretrain_of_model,
imagenet_pretrained=imagenet_pretrained,
dropout_ratio=dropout_ratio)
self.fc1 = nn.Linear(2 * nb_classes, num_fc)
self.fc2 = nn.Linear(num_fc, nb_classes)
def forward(self, x):
"""Joint forward.
This method takes two input sequences, an RGB sequence and an optical flow sequence, and passes them through
the two backbone models to obtain their output features. It then concatenates the output features and passes
them through two fully connected layers to output the final prediction.
Args:
x (tuple): A tuple containing the RGB sequence and optical flow sequence.
Returns:
torch.Tensor: The predicted action class probabilities.
"""
x_rgb, x_of = x
x_rgb = self.model_rgb(x_rgb)
x_of = self.model_of(x_of)
# x = (x_rgb + x_of)
x = torch.cat((x_rgb, x_of), dim=1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
class JointModel_ONNX(JointModel):
"""Joint model module for export.
This class defines a joint model module that inherits from the `JointModel` class and adds support for exporting
the model to ONNX format.
"""
def __init__(self,
of_seq_length,
rgb_seq_length,
nb_classes,
num_fc=64,
backbone='resnet_18',
input_type="2d",
pretrain_of_model=None,
pretrain_rgb_model=None,
imagenet_pretrained=False,
dropout_ratio=0.5):
"""Initialize the JointModel for ONNX export
Args:
of_seq_length (int): The length of the optical flow sequence.
rgb_seq_length (int): The length of the RGB sequence.
nb_classes (int): The number of classes.
num_fc (int, optional): The number of hidden units for the first fully connected layer. Defaults to 64.
backbone (str, optional): The backbone architecture. Defaults to "resnet_18".
input_type (str, optional): The input type, either "2d" or "3d". Defaults to "2d".
pretrain_of_model (str, optional): The path to the pretrained optical flow backbone weights file. Defaults to None.
pretrain_rgb_model (str, optional): The path to the pretrained RGB backbone weights file. Defaults to None.
imagenet_pretrained (bool, optional): Whether to use imagenet pretrained weights. Defaults to False.
dropout_ratio (float, optional): The dropout ratio. Defaults to 0.5.
"""
super(__class__, self).__init__(of_seq_length=of_seq_length, # pylint:disable=undefined-variable
rgb_seq_length=rgb_seq_length,
nb_classes=nb_classes,
num_fc=num_fc,
backbone=backbone,
input_type=input_type,
pretrain_of_model=pretrain_of_model,
pretrain_rgb_model=pretrain_rgb_model,
imagenet_pretrained=imagenet_pretrained,
dropout_ratio=dropout_ratio)
def forward(self, x_rgb, x_of):
"""Joint model forward."""
x_rgb = self.model_rgb(x_rgb)
x_of = self.model_of(x_of)
x = torch.cat((x_rgb, x_of), dim=1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/model/ar_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The top model builder interface."""
import torch
from .ar_model import (get_basemodel, JointModel, JointModel_ONNX, get_basemodel3d,
load_pretrained_weights)
def build_ar_model(experiment_config,
imagenet_pretrained=True,
export=False):
"""
Build action recognition model according to config.
This function constructs an action recognition model according to the specified experiment configuration.
Args:
experiment_config (dict): The experiment configuration dictionary.
imagenet_pretrained (bool, optional): Whether to use imagenet pretrained weights. Defaults to True.
export (bool, optional): Whether to build the model that can be exported to ONNX format. Defaults to False.
Returns:
nn.Module: The action recognition model.
"""
model_config = experiment_config["model"]
dataset_config = experiment_config["dataset"]
nb_classes = len(dataset_config["label_map"].keys())
model_type = model_config["model_type"]
input_type = model_config["input_type"]
backbone = model_config["backbone"]
imagenet_pretrained = model_config["imagenet_pretrained"]
dropout = model_config["dropout_ratio"]
if input_type == "2d":
if model_type == "of":
model = get_basemodel(backbone=backbone,
input_channel=model_config['of_seq_length'] * 2,
nb_classes=nb_classes,
imagenet_pretrained=imagenet_pretrained,
pretrained_backbone_path=model_config["of_pretrained_model_path"],
pretrained_class_num=model_config["of_pretrained_num_classes"],
dropout_ratio=dropout)
elif model_type == "rgb":
model = get_basemodel(backbone=backbone,
input_channel=model_config['rgb_seq_length'] * 3,
nb_classes=nb_classes,
imagenet_pretrained=imagenet_pretrained,
pretrained_backbone_path=model_config["rgb_pretrained_model_path"],
pretrained_class_num=model_config["rgb_pretrained_num_classes"],
dropout_ratio=dropout)
elif model_type == "joint":
if export:
model = JointModel_ONNX(backbone=backbone,
input_type="2d",
of_seq_length=model_config['of_seq_length'],
rgb_seq_length=model_config['rgb_seq_length'],
nb_classes=nb_classes,
num_fc=model_config['num_fc'],
pretrain_of_model=model_config["of_pretrained_model_path"],
pretrain_rgb_model=model_config["rgb_pretrained_model_path"],
imagenet_pretrained=imagenet_pretrained,
dropout_ratio=dropout)
else:
model = JointModel(backbone=backbone,
input_type="2d",
of_seq_length=model_config['of_seq_length'],
rgb_seq_length=model_config['rgb_seq_length'],
nb_classes=nb_classes,
num_fc=model_config['num_fc'],
pretrain_of_model=model_config["of_pretrained_model_path"],
pretrain_rgb_model=model_config["rgb_pretrained_model_path"],
imagenet_pretrained=imagenet_pretrained,
dropout_ratio=dropout)
if model_config["joint_pretrained_model_path"]:
temp = torch.load(model_config["joint_pretrained_model_path"])
model.load_state_dict(temp["state_dict"])
else:
raise ValueError("Only the type in [of, rgb, joint] is supported")
elif input_type == "3d":
if model_type in ("of", "rgb"):
if model_type == "of":
pretrained_backbone_path = model_config["of_pretrained_model_path"]
pretrained_class_num = model_config["of_pretrained_num_classes"]
elif model_type == "rgb":
pretrained_backbone_path = model_config["rgb_pretrained_model_path"]
pretrained_class_num = model_config["rgb_pretrained_num_classes"]
model = get_basemodel3d(backbone=backbone,
nb_classes=nb_classes,
modality=model_type,
pretrained_backbone_path=pretrained_backbone_path,
pretrained_class_num=pretrained_class_num,
imagenet_pretrained=imagenet_pretrained,
dropout_ratio=dropout)
elif model_type == "joint":
if export:
model = JointModel_ONNX(backbone=backbone,
of_seq_length=model_config['of_seq_length'],
rgb_seq_length=model_config['rgb_seq_length'],
nb_classes=nb_classes,
num_fc=model_config['num_fc'],
pretrain_of_model=model_config["of_pretrained_model_path"],
pretrain_rgb_model=model_config["rgb_pretrained_model_path"],
imagenet_pretrained=imagenet_pretrained,
input_type=input_type,
dropout_ratio=dropout)
else:
model = JointModel(backbone=backbone,
of_seq_length=model_config['of_seq_length'],
rgb_seq_length=model_config['rgb_seq_length'],
nb_classes=nb_classes,
num_fc=model_config['num_fc'],
pretrain_of_model=model_config["of_pretrained_model_path"],
pretrain_rgb_model=model_config["rgb_pretrained_model_path"],
imagenet_pretrained=imagenet_pretrained,
input_type=input_type,
dropout_ratio=dropout)
if model_config["joint_pretrained_model_path"]:
pretrained_weights = \
load_pretrained_weights(model_config["joint_pretrained_model_path"])
model.load_state_dict(pretrained_weights)
else:
raise ValueError("Only the type in [of, rgb, joint] is supported")
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inferencer"""
import torch
from nvidia_tao_pytorch.cv.action_recognition.utils.common_utils import data_to_device
class Inferencer():
"""PyTorch model inferencer.
This class takes a PyTorch model and a boolean flag indicating whether to return probabilities as input.
It initializes the model, sets it to evaluation mode, and moves it to the GPU. It also provides a method for
doing inference on input data and returning predicted class IDs or probabilities.
"""
def __init__(self, model, ret_prob=False):
"""Initialize the inferencer with a PyTorch model.
Args:
model (torch.nn.Module): The PyTorch model.
ret_prob (bool, optional): Whether to return probabilities. Defaults to False.
"""
self.model = model
self.model.eval()
self.model.cuda()
self.ret_prob = ret_prob
def inference(self, data):
"""Do inference on input data and return predicted class IDs or probabilities.
Args:
data (torch.Tensor): The input data.
Returns:
numpy.ndarray or int: The predicted class IDs or probabilities.
"""
cuda_data = data_to_device(data)
cls_scores = self.model(cuda_data)
if self.ret_prob:
prob = torch.softmax(cls_scores, dim=1)
prob = prob.detach().cpu().numpy()
return prob
pred_id = torch.argmax(cls_scores, dim=1)
pred_id = pred_id.cpu().numpy()
return pred_id
# @TODO(tylerz): TRT inference
class TRTInferencer():
"""TRT engine inferencer."""
pass
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/inference/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition inference module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/inference/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video frames sampler."""
import pytest
import random
import numpy as np
def random_interval_sample(max_sample_cnt, seq_length):
"""Randomly sample frames.
This function takes the maximum sample count and sequence length as input. It randomly samples frames based on
the specified parameters and returns the sampled image IDs as a sorted numpy array.
Args:
max_sample_cnt (int): The maximum sample count.
seq_length (int): The sequence length.
Returns:
numpy.ndarray: The sampled image IDs as a sorted numpy array.
"""
if max_sample_cnt < seq_length:
# choose all the images and randomly duplicate
img_ids = np.sort(np.random.randint(max_sample_cnt, size=seq_length))
else:
seq_interval = max_sample_cnt // seq_length
img_ids = np.sort((np.arange(seq_length) * seq_interval +
np.random.randint(seq_interval, size=seq_length)))
return img_ids
def random_consecutive_sample(max_sample_cnt, seq_length, sample_rate=1):
"""Randomly choose a start frame and pick up the continuous frames.
This function takes the maximum sample count, sequence length, and sample rate as input. It randomly chooses
a start frame and picks up the continuous frames based on the specified parameters. The function returns the
selected image IDs as a numpy array.
Args:
max_sample_cnt (int): The maximum sample count.
seq_length (int): The sequence length.
sample_rate (int, optional): The sample rate. Defaults to 1.
Returns:
numpy.ndarray: The selected image IDs as a numpy array.
"""
total_frames_req = seq_length * sample_rate
average_duration = max_sample_cnt - total_frames_req + 1
if average_duration > 0:
start_idx = random.randint(0, average_duration)
else:
start_idx = 0
img_ids = start_idx + np.arange(seq_length) * sample_rate
# loop the video to form sequence:
img_ids = np.mod(img_ids, max_sample_cnt)
return img_ids
@pytest.mark.skip(reason="Not a unit test.")
def test_interval_sample(max_sample_cnt, seq_length):
"""Choose the middle frames of each clip with interval.
It chooses the middle frames of each clip based on the specified parameters
and returns the selected image IDs as a numpy array.
Args:
max_sample_cnt (int): The maximum sample count.
seq_length (int): The sequence length.
Returns:
numpy.ndarray: The selected image IDs as a numpy array.
"""
clip_interval = max_sample_cnt / float(seq_length)
img_ids = np.array([int(clip_interval / 2.0 + clip_interval * x) for x in range(seq_length)])
return img_ids
@pytest.mark.skip(reason="Not a unit test.")
def test_consecutive_sample(max_sample_cnt, seq_length, sample_rate=1, all_frames_3d=False):
"""Choose the middle consecutive frames of each video.
This function takes the maximum sample count, sequence length, sample rate, and all frames 3D flag as input.
It chooses the middle consecutive frames of each video based on the specified parameters and returns the
selected image IDs as a numpy array.
Args:
max_sample_cnt (int): The maximum sample count.
seq_length (int): The sequence length.
sample_rate (int, optional): The sample rate. Defaults to 1.
all_frames_3d (bool, optional): Use all frames for 3D model. Defaults to False.
Returns:
numpy.ndarray: The selected image IDs as a numpy array.
"""
if all_frames_3d:
# inference on all frames for 3D model
img_ids = np.arange(max_sample_cnt)
else:
total_frames_req = seq_length * sample_rate
average_duration = max_sample_cnt - total_frames_req + 1
if average_duration > 0:
start_idx = int(average_duration / 2.0)
else:
start_idx = 0
img_ids = start_idx + np.arange(seq_length) * sample_rate
# loop the video to form sequence:
img_ids = np.mod(img_ids, max_sample_cnt)
return img_ids
def segment_sample(id_list, seq_length):
"""Randomly choose frames out of an averagely segmented frames list.
This function takes a list of image IDs and a sequence length as input. It randomly chooses frames out of an
averagely segmented frames list based on the specified parameters and returns the selected image IDs as a list.
Args:
id_list (list): The list of image IDs.
seq_length (int): The sequence length.
Returns:
list: The selected image IDs as a list.
"""
candidate_id_list = []
max_sample_cnt = len(id_list)
start_idx = 0
seg_length = max_sample_cnt // seq_length
for _ in range(seq_length - 1):
end_idx = start_idx + seg_length - 1
img_idx = random.randint(start_idx, end_idx)
start_idx = start_idx + seg_length
candidate_id_list.append(id_list[img_idx])
end_idx = max_sample_cnt - 1
img_idx = random.randint(start_idx, end_idx)
candidate_id_list.append(id_list[img_idx])
return candidate_id_list
def joint_random_interval_sample(max_sample_cnt, rgb_seq_length, of_seq_length):
"""Randomly choose RGB and optical flow images for joint model training with random interval.
This function takes the maximum sample count, RGB sequence length, and optical flow sequence length as input.
It randomly chooses RGB and optical flow images for joint model training based on the specified parameters
and returns the selected image IDs as a tuple.
Args:
max_sample_cnt (int): The maximum sample count.
rgb_seq_length (int): The RGB sequence length.
of_seq_length (int): The optical flow sequence length.
Returns:
tuple: The selected RGB and optical flow image IDs as a tuple.
"""
if of_seq_length > rgb_seq_length:
of_ids = random_interval_sample(max_sample_cnt, of_seq_length)
rgb_ids = segment_sample(of_ids, rgb_seq_length)
elif of_seq_length < rgb_seq_length:
rgb_ids = random_interval_sample(max_sample_cnt, rgb_seq_length)
of_ids = segment_sample(rgb_ids, of_seq_length)
else:
rgb_ids = random_interval_sample(max_sample_cnt, rgb_seq_length)
of_ids = rgb_ids
return rgb_ids, of_ids
def joint_random_consecutive_sample(max_sample_cnt, rgb_seq_length, of_seq_length,
sample_rate=1):
"""Randomly choose consecutive RGB and optical flow images for joint model training.
This function takes the maximum sample count, RGB sequence length, optical flow sequence length, and sample rate
as input. It randomly chooses RGB and optical flow images for joint model training based on the specified
parameters and returns the selected image IDs as a tuple.
Args:
max_sample_cnt (int): The maximum sample count.
rgb_seq_length (int): The RGB sequence length.
of_seq_length (int): The optical flow sequence length.
sample_rate (int, optional): The sample rate. Defaults to 1.
Returns:
tuple: The selected RGB and optical flow image IDs as a tuple.
"""
if of_seq_length > rgb_seq_length:
of_ids = random_consecutive_sample(max_sample_cnt, of_seq_length, sample_rate)
rgb_ids = []
can_idx = test_consecutive_sample(len(of_ids), rgb_seq_length, sample_rate)
for idx in can_idx:
rgb_ids.append(of_ids[idx])
elif of_seq_length < rgb_seq_length:
rgb_ids = random_consecutive_sample(max_sample_cnt, rgb_seq_length, sample_rate)
of_ids = []
can_idx = test_consecutive_sample(len(rgb_ids), of_seq_length, sample_rate)
for idx in can_idx:
of_ids.append(rgb_ids[idx])
else:
rgb_ids = random_consecutive_sample(max_sample_cnt, rgb_seq_length, sample_rate)
of_ids = rgb_ids
return rgb_ids, of_ids
def joint_test_interval_sample(max_sample_cnt, rgb_seq_length, of_seq_length):
"""Choose RGB and optical flow images for joint model test with consistent interval.
This function takes the maximum sample count, RGB sequence length, and optical flow sequence length as input.
It chooses RGB and optical flow images for joint model test with consistent interval based on the specified
parameters and returns the selected image IDs as a tuple.
Args:
max_sample_cnt (int): The maximum sample count.
rgb_seq_length (int): The RGB sequence length.
of_seq_length (int): The optical flow sequence length.
Returns:
tuple: The selected RGB and optical flow image IDs as a tuple.
"""
if of_seq_length > rgb_seq_length:
of_ids = test_interval_sample(max_sample_cnt, of_seq_length)
rgb_ids = []
can_idx = test_interval_sample(len(of_ids), rgb_seq_length)
for idx in can_idx:
rgb_ids.append(of_ids[idx])
elif of_seq_length < rgb_seq_length:
rgb_ids = test_interval_sample(max_sample_cnt, rgb_seq_length)
of_ids = []
can_idx = test_interval_sample(len(rgb_ids), of_seq_length)
for idx in can_idx:
of_ids.append(rgb_ids[idx])
else:
rgb_ids = test_interval_sample(max_sample_cnt, rgb_seq_length)
of_ids = rgb_ids
return rgb_ids, of_ids
def joint_test_consecutive_sample(max_sample_cnt, rgb_seq_length, of_seq_length,
sample_rate=1, all_frames_3d=False):
"""Choose consecutive RGB and optical flow images for joint model test.
This function takes the maximum sample count, RGB sequence length, optical flow sequence length, sample rate,
and all_frames_3d as input. It chooses consecutive RGB and optical flow images for joint model test based on the
specified parameters and returns the selected image IDs as a tuple.
Args:
max_sample_cnt (int): The maximum sample count.
rgb_seq_length (int): The RGB sequence length.
of_seq_length (int): The optical flow sequence length.
sample_rate (int, optional): The sample rate. Defaults to 1.
all_frames_3d (bool, optional): Whether to choose all frames for 3D model. Defaults to False.
Returns:
tuple: The selected RGB and optical flow image IDs as a tuple.
"""
if all_frames_3d:
rgb_ids = np.arange(max_sample_cnt)
of_ids = rgb_ids
return rgb_ids, of_ids
if of_seq_length > rgb_seq_length:
of_ids = test_consecutive_sample(max_sample_cnt, of_seq_length, sample_rate)
rgb_ids = []
can_idx = test_consecutive_sample(len(of_ids), rgb_seq_length, sample_rate)
for idx in can_idx:
rgb_ids.append(of_ids[idx])
elif of_seq_length < rgb_seq_length:
rgb_ids = test_consecutive_sample(max_sample_cnt, rgb_seq_length, sample_rate)
of_ids = []
can_idx = test_consecutive_sample(len(rgb_ids), of_seq_length, sample_rate)
for idx in can_idx:
of_ids.append(rgb_ids[idx])
else:
rgb_ids = test_consecutive_sample(max_sample_cnt, rgb_seq_length)
of_ids = rgb_ids
return rgb_ids, of_ids
if __name__ == "__main__":
max_sample_cnt = 58
seq_length = 64
sample_rate = 1
print(random_consecutive_sample(max_sample_cnt, seq_length, sample_rate))
print(test_consecutive_sample(max_sample_cnt, seq_length, sample_rate))
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/dataloader/frame_sampler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action recognition dataset."""
import os
from PIL import Image
from torch.utils.data import Dataset
from nvidia_tao_pytorch.cv.action_recognition.dataloader.frame_sampler import test_interval_sample
def load_image(img_path):
"""Load an image from the specified file path and handle corrupted images.
This function attempts to load an image from the given file path. If the image
is corrupted or cannot be read, an appropriate error message will be displayed,
and the function will return None.
Args:
img_path (str): The path to the image file.
Returns:
Image object or None: The loaded image if successful, or None if the image
is corrupted or cannot be read.
Raises:
OSError: If the specified file path does not exist.
"""
img = Image.open(img_path)
try:
img.load()
except OSError:
raise OSError("Corrupted image: {}".format(img_path))
return img
class MotionDataset(Dataset):
"""A custom dataset class for loading and processing optical flow vector data.
This dataset class is designed to handle optical flow vectors only, and it provides
functionality for loading, preprocessing, and sampling the data. It can be used with
PyTorch's DataLoader for efficient data loading and batching.
"""
def __init__(self, sample_dict, output_shape,
seq_length, label_map, train_sampler,
test_sampler, input_type="2d",
mode="train", full_clip=False, transform=None,
clips_per_video=1, sample_clips_path=None,
sample_clips_ids=None):
"""Initialize the custom dataset object with the given parameters.
Args:
sample_dict (dict): A dictionary containing video patch paths as keys and action labels as values.
output_shape (tuple): The desired output shape of the samples.
seq_length (int): The sequence length of the samples.
label_map (dict): A dictionary mapping labels to their corresponding indices.
train_sampler (Sampler): A sampler object for the training set.
test_sampler (Sampler): A sampler object for the test set.
input_type (str, optional): The type of input data, either "2d" or "3d". Defaults to "2d".
mode (str, optional): The mode of the dataset, either "train" or "val". Defaults to "train".
full_clip (bool, optional): Whether to use the full clip or not. Defaults to False.
transform (callable, optional): Optional transform to be applied on a sample. Defaults to None.
clips_per_video (int, optional): The number of clips to be extracted from each video. Defaults to 1.
sample_clips_path (str, optional): The path to the sample clips when use full_clip. Defaults to None.
sample_clips_ids (list, optional): A list of sample clip IDs when use full_clip. Defaults to None.
Raises:
ValueError: If the input_type is not "2d" or "3d".
"""
self.sample_name_list = list(sample_dict.keys())
self.sample_dict = sample_dict
self.output_height = output_shape[0]
self.output_width = output_shape[1]
self.label_map = label_map
self.seq_length = seq_length
self.mode = mode
self.input_type = input_type
self.clips_per_video = clips_per_video
self.train_sampler = train_sampler
self.test_sampler = test_sampler
self.full_clip = full_clip
if self.full_clip:
assert (sample_clips_path is not None) and (sample_clips_ids is not None)
self.sample_clips_path = sample_clips_path
self.sample_clips_ids = sample_clips_ids
self.transform = transform
def __len__(self):
"""Return the length of the dataset"""
if self.full_clip:
return len(self.sample_clips_ids)
return len(self.sample_name_list) * self.clips_per_video
def stack_of(self, sample_path, img_ids):
"""Stack the u and v images of optical flow data.
This method takes the file path of the sample and a list of image IDs as input.
It loads the corresponding u and v images (horizontal and vertical components of
the optical flow) and stacks them together to create a single stacked image.
This stacked image can be used for further processing or analysis.
Args:
sample_path (str): The file path of the sample containing the u and v images.
img_ids (list): A list of image IDs corresponding to the u and v images to be stacked.
Returns:
list: A list containing the u and v components of the optical flow data.
Raises:
FileNotFoundError: If the specified file path does not exist or the images cannot be found.
"""
# video base path will contain u, v and rgb
u = os.path.join(sample_path, "u")
raw_u_list = sorted(os.listdir(u))
v = os.path.join(sample_path, "v")
raw_v_list = sorted(os.listdir(v))
flow = []
for _, idx in enumerate(img_ids):
frame_name = raw_u_list[idx]
assert raw_u_list[idx] == raw_v_list[idx]
h_image = os.path.join(u, frame_name)
v_image = os.path.join(v, frame_name)
imgH = load_image(h_image)
imgV = load_image(v_image)
flow.append(imgH)
flow.append(imgV)
return flow
def get_raw_frames(self, sample_path, img_ids):
"""Get raw frames of optical flow data for joint training.
This method takes the file path of the sample and a list of image IDs as input.
It loads the corresponding raw frames of optical flow data, which can be used
for joint training with other modalities, such as RGB images.
The raw frames are not preprocessed or transformed, allowing for flexibility
in the subsequent processing pipeline.
Args:
sample_path (str): The file path of the sample containing the raw optical flow frames.
img_ids (list): A list of image IDs corresponding to the raw optical flow frames to be loaded.
Returns:
list: A list of raw optical flow frames, where each frame is a Image object.
Raises:
FileNotFoundError: If the specified file path does not exist or the images cannot be found.
"""
if self.mode in ["train", "val"]:
action_label = self.label_map[self.sample_dict[sample_path]]
data = self.stack_of(sample_path, img_ids)
if self.mode == "train":
return data, action_label
if self.mode == "val":
return sample_path, data, action_label
return sample_path, data
def get_frames(self, sample_path, img_ids):
"""Get transformed frames of optical flow data.
This method takes the file path of the sample and a list of image IDs as input.
It loads the corresponding frames of optical flow data and applies any specified
transformations to them.
Args:
sample_path (str): The file path of the sample containing the optical flow frames.
img_ids (list): A list of image IDs corresponding to the optical flow frames to be loaded and transformed.
Returns:
Torch.tensor: A tensor of transformed optical flow frames
Raises:
FileNotFoundError: If the specified file path does not exist or the images cannot be found.
"""
if self.mode in ["train", "val"]:
action_label = self.label_map[self.sample_dict[sample_path]]
data = self.stack_of(sample_path, img_ids)
data = self.transform(data)
if self.input_type == "2d":
# data = data.permute(1, 0, 2, 3) # CTHW -> TCHW
data = data.reshape([2 * self.seq_length, self.output_height, self.output_width])
elif self.input_type == "3d":
pass
else:
raise ValueError("Only 2d/3d input types are supported.")
if self.mode == "train":
return data, action_label
if self.mode == "val":
return sample_path, data, action_label
return sample_path, data
def __getitem__(self, idx):
"""__getitem__"""
if self.full_clip:
sample_path = self.sample_clips_path[idx]
img_ids = self.sample_clips_ids[idx]
else:
idx = idx % len(self.sample_name_list)
sample_path = self.sample_name_list[idx]
max_sample_cnt = len(os.listdir(os.path.join(sample_path, "v")))
if self.mode == "train":
img_ids = self.train_sampler(max_sample_cnt, self.seq_length)
else:
img_ids = self.test_sampler(max_sample_cnt, self.seq_length)
return self.get_frames(sample_path, img_ids)
class SpatialDataset(Dataset):
"""Dataset for RGB frames only"""
def __init__(self, sample_dict, output_shape,
seq_length, label_map, transform,
train_sampler, test_sampler, input_type="2d",
mode="train", full_clip=False,
clips_per_video=1, sample_clips_path=None,
sample_clips_ids=None):
"""Initialize the SpatialDataset with the given parameters.
Args:
sample_dict (dict): A dictionary containing video patch paths as keys and action labels as values.
output_shape (tuple): A tuple containing the output height and width of the frames.
seq_length (int): The sequence length of the frames.
label_map (dict): A dictionary mapping action labels to their corresponding integer values.
transform (callable): A callable object for transforming the frames.
train_sampler (function): A function for sampling frames during training.
test_sampler (function): A function for sampling frames during testing.
input_type (str, optional): The input type of the frames, either "2d" or "3d". Defaults to "2d".
mode (str, optional): The mode of the dataset, either "train", "val". Defaults to "train".
full_clip (bool, optional): Whether to use full clips or not. Defaults to False.
clips_per_video (int, optional): The number of clips to be extracted per video. Defaults to 1.
sample_clips_path (list, optional): A list of sample clip paths when using full clips. Defaults to None.
sample_clips_ids (list, optional): A list of sample clip IDs when using full clips. Defaults to None.
"""
self.sample_name_list = list(sample_dict.keys())
self.sample_dict = sample_dict
self.output_height = output_shape[0]
self.output_width = output_shape[1]
self.seq_length = seq_length
self.label_map = label_map
self.mode = mode
self.input_type = input_type
self.full_clip = full_clip
self.clips_per_video = clips_per_video
self.train_sampler = train_sampler
self.test_sampler = test_sampler
if self.full_clip:
assert (sample_clips_path is not None) and (sample_clips_ids is not None)
self.sample_clips_path = sample_clips_path
self.sample_clips_ids = sample_clips_ids
self.transform = transform
def __len__(self):
"""Return the length of the dataset"""
if self.full_clip:
return len(self.sample_clips_ids)
return len(self.sample_name_list) * self.clips_per_video
def get_raw_frames(self, sample_path, img_ids):
"""Get raw frames for joint training with other modalities.
This method takes the file path of the sample and a list of image IDs as input.
It loads the corresponding raw frames of RGB data and returns them as a list of Image object.
The raw frames can be used for joint training with other modalities, such as optical flow.
Args:
sample_path (str): The file path of the sample containing the RGB frames.
img_ids (list): A list of image IDs corresponding to the RGB frames to be loaded.
Returns:
list: A list of raw RGB frames, where each frame is a Image object.
Raises:
FileNotFoundError: If the specified file path does not exist or the images cannot be found.
"""
if self.mode in ["train", "val"]:
action_label = self.label_map[self.sample_dict[sample_path]]
data = []
raw_imgs_list = sorted(os.listdir(os.path.join(sample_path, "rgb")))
for img_idx in img_ids:
img_name = raw_imgs_list[img_idx]
img_path = os.path.join(sample_path, "rgb",
img_name)
data.append(load_image(img_path))
if self.mode == "train":
return data, action_label
if self.mode == "val":
return sample_path, data, action_label
return sample_path, data
def get_frames(self, sample_path, img_ids):
"""Get transformed frames of RGB data.
This method takes the file path of the sample and a list of image IDs as input.
It loads the corresponding frames of RGB data and applies any specified
transformations to them.
Args:
sample_path (str): The file path of the sample containing the RGB frames.
img_ids (list): A list of image IDs corresponding to the RGB frames to be loaded and transformed.
Returns:
Torch.tensor: A tensor of transformed RGB frames
Raises:
FileNotFoundError: If the specified file path does not exist or the images cannot be found.
"""
if self.mode in ["train", "val"]:
action_label = self.label_map[self.sample_dict[sample_path]]
data = []
raw_imgs_list = sorted(os.listdir(os.path.join(sample_path, "rgb")))
for img_idx in img_ids:
img_name = raw_imgs_list[img_idx]
img_path = os.path.join(sample_path, "rgb",
img_name)
data.append(load_image(img_path))
data = self.transform(data)
if self.input_type == "2d":
data = data.reshape([3 * self.seq_length, self.output_height, self.output_width])
elif self.input_type == "3d":
pass
else:
raise ValueError("Only 2d/3d input types are supported.")
if self.mode == "train":
return data, action_label
if self.mode == "val":
return sample_path, data, action_label
return sample_path, data
def __getitem__(self, idx):
"""getitem."""
if self.full_clip:
sample_path = self.sample_clips_path[idx]
img_ids = self.sample_clips_ids[idx]
else:
idx = idx % len(self.sample_name_list)
sample_path = self.sample_name_list[idx]
max_sample_cnt = len(os.listdir(os.path.join(sample_path, "rgb")))
if self.mode == "train":
img_ids = self.train_sampler(max_sample_cnt,
self.seq_length)
else:
img_ids = self.test_sampler(max_sample_cnt,
self.seq_length)
return self.get_frames(sample_path, img_ids)
class FuseDataset(Dataset):
"""Dataset for RGB frames + Optical Flow"""
def __init__(self, sample_dict, output_shape,
of_seq_length, rgb_seq_length, label_map,
transform, train_sampler, test_sampler,
input_type="2d", mode="train", full_clip=False,
clips_per_video=1, sample_clips_path=None,
sample_clips_ids=None):
"""Initialize the FuseDataset with the given parameters.
Args:
sample_dict (dict): A dictionary containing video patch paths as keys and action labels as values.
output_shape (tuple): A tuple containing the output height and width of the frames.
of_seq_length (int): The sequence length of the optical flow data.
rgb_seq_length (int): The sequence length of the RGB frames.
label_map (dict): A dictionary mapping action labels to their corresponding integer values.
transform (callable): A callable object for transforming the frames.
train_sampler (function): A function for sampling frames during training.
test_sampler (function): A function for sampling frames during testing.
input_type (str, optional): The input type of the frames, either "2d" or "3d". Defaults to "2d".
mode (str, optional): The mode of the dataset, either "train", "val". Defaults to "train".
full_clip (bool, optional): Whether to use full clips or not. Defaults to False.
clips_per_video (int, optional): The number of clips to be extracted per video. Defaults to 1.
sample_clips_path (list, optional): A list of sample clip paths when using full clips. Defaults to None.
sample_clips_ids (list, optional): A list of sample clip IDs when using full clips. Defaults to None.
"""
self.sample_name_list = list(sample_dict.keys())
self.sample_dict = sample_dict
self.output_height = output_shape[0]
self.output_width = output_shape[1]
self.label_map = label_map
self.mode = mode
self.of_seq_length = of_seq_length
self.rgb_seq_length = rgb_seq_length
self.full_clip = full_clip
self.clips_per_video = clips_per_video
self.train_sampler = train_sampler
self.test_sampler = test_sampler
self.input_type = input_type
if self.full_clip:
assert (sample_clips_path is not None) and (sample_clips_ids is not None)
self.sample_clips_path = sample_clips_path
self.sample_clips_ids = sample_clips_ids
self.motion_dataset = MotionDataset(sample_dict=sample_dict,
output_shape=output_shape,
seq_length=of_seq_length,
label_map=label_map,
input_type=input_type,
mode=mode,
transform=None,
train_sampler=None,
test_sampler=None)
self.spatial_dataset = SpatialDataset(sample_dict=sample_dict,
output_shape=output_shape,
seq_length=rgb_seq_length,
label_map=label_map,
input_type=input_type,
mode=mode,
transform=None,
train_sampler=None,
test_sampler=None)
self.transform = transform
def __len__(self):
"""Return the length of the dataset"""
if self.full_clip:
return len(self.sample_clips_ids)
return len(self.sample_name_list) * self.clips_per_video
def __getitem__(self, idx):
"""getitem"""
if self.full_clip:
sample_path = self.sample_clips_path[idx]
img_ids = self.sample_clips_ids[idx]
if self.of_seq_length > self.rgb_seq_length:
of_ids = img_ids
rgb_ids = []
can_idx = test_interval_sample(len(of_ids), self.rgb_seq_length)
for idx_ in can_idx:
rgb_ids.append(of_ids[idx_])
elif self.of_seq_length < self.rgb_seq_length:
rgb_ids = img_ids
of_ids = []
can_idx = test_interval_sample(len(rgb_ids), self.of_seq_length)
for idx_ in can_idx:
of_ids.append(rgb_ids[idx_])
else:
rgb_ids = img_ids
of_ids = img_ids
else:
idx_ = idx % len(self.sample_name_list)
sample_path = self.sample_name_list[idx_]
# max_sample_cnt is from u and v
max_sample_cnt = len(os.listdir(os.path.join(sample_path, "u")))
if self.mode == "train":
rgb_ids, of_ids = self.train_sampler(max_sample_cnt,
self.rgb_seq_length,
self.of_seq_length)
else:
rgb_ids, of_ids = self.test_sampler(max_sample_cnt,
self.rgb_seq_length,
self.of_seq_length)
if self.mode in ["train", "val"]:
action_label = self.label_map[self.sample_dict[sample_path]]
# generate RGB frames patch + OF vectors
if self.mode == "train":
of_data, of_action_label = self.motion_dataset.get_raw_frames(sample_path, of_ids)
rgb_data, rgb_action_label = self.spatial_dataset.get_raw_frames(sample_path, rgb_ids)
rgb_data, of_data = self.transform([rgb_data, of_data])
if self.input_type == "2d":
rgb_data = rgb_data.reshape([3 * self.rgb_seq_length,
self.output_height,
self.output_width])
of_data = of_data.reshape([2 * self.of_seq_length,
self.output_height,
self.output_width])
elif self.input_type == "3d":
pass
else:
raise ValueError("Only 2d/3d input types are supported.")
assert of_action_label == action_label
assert rgb_action_label == action_label
return [rgb_data, of_data], action_label
elif self.mode == "val":
of_sample_path, of_data, of_action_label = \
self.motion_dataset.get_raw_frames(sample_path, of_ids)
rgb_sample_path, rgb_data, rgb_action_label = \
self.spatial_dataset.get_raw_frames(sample_path, rgb_ids)
rgb_data, of_data = self.transform([rgb_data, of_data])
if self.input_type == "2d":
rgb_data = rgb_data.reshape([3 * self.rgb_seq_length,
self.output_height,
self.output_width])
of_data = of_data.reshape([2 * self.of_seq_length,
self.output_height,
self.output_width])
elif self.input_type == "3d":
pass
else:
raise ValueError("Only 2d/3d input types are supported.")
assert of_action_label == action_label
assert rgb_action_label == action_label
assert of_sample_path == sample_path
assert rgb_sample_path == sample_path
return sample_path, [rgb_data, of_data], action_label
elif self.mode == "inf":
of_sample_path, of_data = self.motion_dataset.get_raw_frames(sample_path, of_ids)
rgb_sample_path, rgb_data = self.spatial_dataset.get_raw_frames(sample_path, rgb_ids)
rgb_data, of_data = self.transform([rgb_data, of_data])
if self.input_type == "2d":
rgb_data = rgb_data.reshape([3 * self.rgb_seq_length,
self.output_height,
self.output_width])
of_data = of_data.reshape([2 * self.of_seq_length,
self.output_height,
self.output_width])
elif self.input_type == "3d":
pass
else:
raise ValueError("Only 2d/3d input types are supported.")
return sample_path, [rgb_data, of_data]
else:
raise ValueError('There are only train, val, inf mode')
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/dataloader/ar_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build torch data loader."""
import os
import numpy as np
import random
from functools import partial
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from nvidia_tao_pytorch.cv.action_recognition.dataloader.ar_dataset import MotionDataset, SpatialDataset, FuseDataset
from nvidia_tao_pytorch.cv.action_recognition.dataloader.frame_sampler import (random_interval_sample,
random_consecutive_sample,
test_interval_sample,
test_consecutive_sample,
joint_random_interval_sample,
joint_random_consecutive_sample,
joint_test_interval_sample,
joint_test_consecutive_sample)
from nvidia_tao_pytorch.cv.action_recognition.utils.group_transforms import (GroupNormalize,
GroupWorker,
GroupRandomHorizontalFlip,
GroupRandomCrop,
MultiScaleCrop,
ToNumpyNDArray,
ToTorchFormatTensor,
GroupJointWorker,
GroupJointRandomCrop,
JointMultiScaleCrop,
GroupJointRandomHorizontalFlip,
JointWorker,
GroupJointNormalize)
def split_shad_dataset(top_dir, val_percent):
"""Randomly split the orginal SHAD train dataset to train/val.
And validation dataset takes val_ratio part of the whole dataset.
"""
action_set = os.listdir(top_dir)
sample_list = []
for action in action_set:
action_root_path = os.path.join(top_dir, action)
for video in os.listdir(action_root_path):
video_path = os.path.join(action_root_path, video)
for sample in os.listdir(video_path):
sample_path = os.path.join(video_path, sample)
sample_list.append((sample_path, action))
total_sample_cnt = len(sample_list)
index_list = list(range(total_sample_cnt))
random.shuffle(index_list)
val_sample_cnt = int(total_sample_cnt * val_percent)
train_samples = {}
val_samples = {}
for idx in index_list[:val_sample_cnt]:
sample_path, action_label = sample_list[idx]
val_samples[sample_path] = action_label
for idx in index_list[val_sample_cnt:total_sample_cnt]:
sample_path, action_label = sample_list[idx]
train_samples[sample_path] = action_label
return train_samples, val_samples
def split_dataset(top_dir, val_percent):
"""Randomly split the original train dataset into train and validation sets.
This function takes the top-level directory of the dataset and the validation percentage as input. It randomly
splits the original train dataset into train and validation sets, where the validation set takes up val_percent
of the whole dataset. The function returns the file paths of the train and validation sets as lists.
Args:
top_dir (str): The top-level directory of the dataset.
val_percent (float): The percentage of the dataset to be used for validation.
Returns:
tuple: A tuple containing the file paths of the train and validation sets as lists.
"""
action_set = os.listdir(top_dir)
sample_list = []
for action in action_set:
action_root_path = os.path.join(top_dir, action)
for video in os.listdir(action_root_path):
video_path = os.path.join(action_root_path, video)
sample_list.append((video_path, action))
total_sample_cnt = len(sample_list)
index_list = list(range(total_sample_cnt))
random.shuffle(index_list)
val_sample_cnt = int(total_sample_cnt * val_percent)
train_samples = {}
val_samples = {}
for idx in index_list[:val_sample_cnt]:
sample_path, action_label = sample_list[idx]
val_samples[sample_path] = action_label
for idx in index_list[val_sample_cnt:total_sample_cnt]:
sample_path, action_label = sample_list[idx]
train_samples[sample_path] = action_label
return train_samples, val_samples
def list_dataset(top_dir):
"""Generate the sample_dict from top_dir.
Args:
top_dir (str): The top-level directory of the dataset.
Returns:
dict: A dictionary containing video patch paths as keys and action labels as values.
"""
action_set = os.listdir(top_dir)
sample_dict = {}
for action in action_set:
action_root_path = os.path.join(top_dir, action)
for video in os.listdir(action_root_path):
video_path = os.path.join(action_root_path, video)
sample_dict[video_path] = action
return sample_dict
def get_clips_list(sample_dict, seq_len, eval_mode,
sampler_strategy="random_interval",
sample_rate=1, num_segments=1, dir_path="rgb"):
"""Get a list of clips covering all the frames in the dataset.
This function takes a sample dictionary, sequence length, evaluation mode, sampler strategy, sample rate,
number of segments, and directory path as input. It generates a list of clips covering all the frames in the
dataset based on the specified parameters. The function returns two lists: one containing the file paths of
the clips, and one containing the corresponding image IDs of the frames in the clips.
Args:
sample_dict (dict): A dictionary containing video patch paths as keys and action labels as values.
seq_len (int): The sequence length of the clips.
eval_mode (str): The evaluation mode, either "conv" or "all".
sampler_strategy (str, optional): The sampler strategy, either "random_interval" or "consecutive".
Defaults to "random_interval".
sample_rate (int, optional): The sample rate for the frames. Defaults to 1.
num_segments (int, optional): The number of segments for the clips when using conv mode. Defaults to 1.
dir_path (str, optional): The directory path of the frames. Defaults to "rgb".
Returns:
tuple: A tuple containing two lists: one containing the file paths of the clips, and one containing
the corresponding image IDs of the frames in the clips.
Raises:
ValueError: If the sampler strategy is not supported.
"""
sample_path_list = list(sample_dict.keys())
sample_clips_path = []
# tuple of image_ids
sample_clips_ids = []
if sampler_strategy == "random_interval":
for sample_path in sample_path_list:
total_frames = len(os.listdir(os.path.join(sample_path, dir_path)))
# interval
seq_interval = total_frames // seq_len
if seq_interval > 0:
for j in range(seq_interval):
sample_clips_path.append(sample_path)
sample_clips_ids.append(np.sort((np.arange(seq_len) * seq_interval + j)))
else:
sample_clips_path.append(sample_path)
img_ids = np.arange(seq_len)
img_ids = np.minimum(img_ids, total_frames - 1)
sample_clips_ids.append(img_ids)
# num segments and eval_mode only works for consecutive sampler strategy
elif sampler_strategy == "consecutive":
if eval_mode == "conv":
for sample_path in sample_path_list:
total_frames = len(os.listdir(os.path.join(sample_path, dir_path)))
orig_len = seq_len * sample_rate
if total_frames > orig_len - 1:
tick = (total_frames - orig_len + 1) / float(num_segments)
offsets = np.array([int(tick / 2.0 + tick * x)
for x in range(num_segments)])
else:
offsets = np.zeros((1,))
for offset in offsets:
sample_clips_path.append(sample_path)
img_ids = offset + np.arange(seq_len) * sample_rate
# img_ids = np.minimum(img_ids, total_frames-1)
img_ids = np.mod(img_ids, total_frames)
img_ids = img_ids.astype(np.int32)
sample_clips_ids.append(img_ids)
elif eval_mode == "all":
for sample_path in sample_path_list:
total_frames = len(os.listdir(os.path.join(sample_path, dir_path)))
orig_len = seq_len * sample_rate
num_clips = total_frames // orig_len
if num_clips > 0:
for j in range(num_clips):
for i in range(sample_rate):
sample_clips_path.append(sample_path)
sample_clips_ids.append(list(range(j * orig_len + i, (j + 1) * orig_len, sample_rate)))
else:
sample_clips_path.append(sample_path)
img_ids = np.arange(seq_len) * sample_rate
# img_ids = np.minimum(img_ids, total_frames-1)
img_ids = np.mod(img_ids, total_frames)
sample_clips_ids.append(img_ids)
else:
raise ValueError("Only supports sample strategy [random_interval, consecutive].")
return sample_clips_path, sample_clips_ids
def build_joint_augmentation_pipeline(output_shape, augmentation_config,
dataset_mode="train"):
"""Build an augmentation pipeline for a joint model.
This function takes the output shape, augmentation configuration, and dataset mode as input. It builds an
augmentation pipeline for a joint model based on the specified parameters. The function returns the pipeline
as a `transforms.Compose` object.
Args:
output_shape (tuple): The output shape of the joint model.
augmentation_config (dict): The augmentation configuration for the joint model.
dataset_mode (str, optional): The dataset mode, either "train", "val", or "inf". Defaults to "train".
Returns:
transforms: The augmentation pipeline as a `transforms.Compose` object.
Raises:
ValueError: If the dataset mode is not supported.
"""
rgb_input_mean = list(augmentation_config["rgb_input_mean"])
rgb_input_std = list(augmentation_config["rgb_input_std"])
of_input_mean = list(augmentation_config["of_input_mean"])
of_input_std = list(augmentation_config["of_input_std"])
output_height, output_width = output_shape
smaller_edge = augmentation_config["crop_smaller_edge"]
transforms_list = []
if dataset_mode in ["val", "inf"]:
if augmentation_config["val_center_crop"]:
# resize to smaller size 256
transforms_list.append(GroupJointWorker(transforms.Resize(int(smaller_edge))))
# center crop :
transforms_list.append(GroupJointWorker(transforms.CenterCrop([output_height, output_width])))
# transforms_list.append(GroupThreeCrop([output_height, output_width])) # [3*64, 256, 256, 3]
else:
# simply resize to target size
transforms_list.append(GroupJointWorker(transforms.Resize([output_height, output_width])))
elif dataset_mode == "train":
if augmentation_config["train_crop_type"] == "multi_scale_crop":
scales = augmentation_config["scales"]
transforms_list.append(JointMultiScaleCrop([output_width, output_height],
scales))
elif augmentation_config["train_crop_type"] == "random_crop":
# @TODO(tylerz): enable joint training experiments with .png, remove 340 later
transforms_list.append(GroupJointWorker(transforms.Resize(int(smaller_edge))))
# transforms_list.append(GroupJointWorker(transforms.Resize((int(smaller_edge), 340))))
transforms_list.append(GroupJointRandomCrop((output_height, output_width)))
else:
transforms_list.append(GroupJointWorker(transforms.Resize([output_height, output_width])))
if augmentation_config["horizontal_flip_prob"] > 0.0:
prob = min(1.0, augmentation_config["horizontal_flip_prob"])
transforms_list.append(GroupJointRandomHorizontalFlip(prob))
else:
raise ValueError('There are only train, val, inf mode.')
transforms_list.append(JointWorker(ToNumpyNDArray()))
transforms_list.append(JointWorker(ToTorchFormatTensor()))
if (len(rgb_input_mean) != 0 or len(rgb_input_std) != 0 or
len(of_input_mean) != 0 or len(of_input_std) != 0):
transforms_list.append(GroupJointNormalize(rgb_input_mean, rgb_input_std,
of_input_mean, of_input_std))
transform = transforms.Compose(transforms_list)
return transform
def build_single_augmentation_pipeline(output_shape, augmentation_config,
dataset_type="rgb", dataset_mode="train"):
"""Build a single stream augmentation pipeline.
This function takes the output shape, augmentation configuration, dataset type, and dataset mode as input. It
builds a single stream augmentation pipeline based on the specified parameters. The function returns the
pipeline as a `transforms.Compose` object.
Args:
output_shape (tuple): The output shape of the single stream model.
augmentation_config (dict): The augmentation configuration for the single stream model.
dataset_type (str, optional): The dataset type, either "rgb" or "flow". Defaults to "rgb".
dataset_mode (str, optional): The dataset mode, either "train", "val", or "inf". Defaults to "train".
Returns:
transforms.Compose: The augmentation pipeline as a `transforms.Compose` object.
Raises:
ValueError: If the dataset type or mode is not supported.
"""
if dataset_type == "rgb":
input_mean = list(augmentation_config["rgb_input_mean"])
input_std = list(augmentation_config["rgb_input_std"])
elif dataset_type == "of":
input_mean = list(augmentation_config["of_input_mean"])
input_std = list(augmentation_config["of_input_std"])
else:
ValueError(("Only the type in [of, rgb] is supported for single input pipeline"))
output_height, output_width = output_shape
smaller_edge = augmentation_config["crop_smaller_edge"]
transforms_list = []
if dataset_mode in ["val", "inf"]:
if augmentation_config["val_center_crop"]:
# resize to smaller size 256
# transforms_list.append(GroupWorker(transforms.Resize(int(smaller_edge * 256 / 224))))
transforms_list.append(GroupWorker(transforms.Resize(int(smaller_edge))))
# center crop :
transforms_list.append(GroupWorker(transforms.CenterCrop([output_height, output_width])))
# transforms_list.append(GroupThreeCrop([output_height, output_width])) # [3*64, 256, 256, 3]
else:
# simply resize to target size
transforms_list.append(GroupWorker(transforms.Resize([output_height, output_width])))
elif dataset_mode == "train":
if augmentation_config["train_crop_type"] == "multi_scale_crop":
transforms_list.append(GroupWorker(transforms.Resize(int(smaller_edge))))
scales = augmentation_config["scales"]
transforms_list.append(MultiScaleCrop([output_width, output_height],
scales))
elif augmentation_config["train_crop_type"] == "random_crop":
transforms_list.append(GroupWorker(transforms.Resize(int(smaller_edge))))
transforms_list.append(GroupRandomCrop((output_height, output_width)))
else:
transforms_list.append(GroupWorker(transforms.Resize([output_height, output_width])))
if augmentation_config["horizontal_flip_prob"] > 0.0:
prob = min(1.0, augmentation_config["horizontal_flip_prob"])
if dataset_type == "rgb":
transforms_list.append(GroupRandomHorizontalFlip(prob))
elif dataset_type == "of":
transforms_list.append(GroupRandomHorizontalFlip(flip_prob=prob, is_flow=True))
else:
raise ValueError("Single branch augmentation pipeline only supports rgb, of.")
else:
raise ValueError('There are only train, val, inf mode.')
transforms_list.append(ToNumpyNDArray())
transforms_list.append(ToTorchFormatTensor())
if len(input_mean) != 0 or len(input_std) != 0:
transforms_list.append(GroupNormalize(input_mean, input_std))
transform = transforms.Compose(transforms_list)
return transform
def build_single_sampler(sampler_strategy, sample_rate=1, all_frames_3d=False):
"""Build a frames sampler for a single branch model.
This function takes the sampler strategy, sample rate, and all frames 3D flag as input. It builds a frames
sampler for a single branch model based on the specified parameters. The function returns two samplers: one
for training and one for testing.
Args:
sampler_strategy (str): The sampler strategy, either "random_interval" or "consecutive".
sample_rate (int, optional): The sample rate for the frames. Defaults to 1.
all_frames_3d (bool, optional): The flag indicating whether to take all frames for 3D model. Defaults to False.
Returns:
tuple: A tuple containing two samplers: one for training and one for testing.
Raises:
ValueError: If the sampler strategy is not supported.
"""
if sampler_strategy == "random_interval":
train_sampler = random_interval_sample
test_sampler = test_interval_sample
elif sampler_strategy == "consecutive":
train_sampler = partial(random_consecutive_sample, sample_rate=sample_rate)
test_sampler = partial(test_consecutive_sample,
sample_rate=sample_rate,
all_frames_3d=all_frames_3d)
else:
raise ValueError("Only supports [random_interval, consecutive] sample strategy")
return train_sampler, test_sampler
def build_joint_sampler(sampler_strategy, sample_rate=1, all_frames_3d=False):
"""Build frames sampler for joint model."""
if sampler_strategy == "random_interval":
train_sampler = joint_random_interval_sample
test_sampler = joint_test_interval_sample
elif sampler_strategy == "consecutive":
train_sampler = partial(joint_random_consecutive_sample, sample_rate=sample_rate)
test_sampler = partial(joint_test_consecutive_sample,
sample_rate=sample_rate,
all_frames_3d=all_frames_3d)
else:
raise ValueError("Only supports [random_interval, consecutive] sample strategy")
return train_sampler, test_sampler
def build_dataloader(sample_dict, model_config,
output_shape, label_map, augmentation_config,
dataset_mode="inf", batch_size=1, workers=4,
input_type="2d", shuffle=False, pin_mem=False,
eval_mode="center", num_segments=1,
clips_per_video=1):
"""Build a torch dataloader.
This function takes the sample dictionary, model configuration, output shape, label map, augmentation
configuration, dataset mode, batch size, number of workers, input type, shuffle flag, pin memory flag,
evaluation mode, number of segments, and clips per video as input. It builds a torch dataloader based on
the specified parameters and returns it.
Args:
sample_dict (dict): A dictionary containing video patch paths as keys and action labels as values.
model_config (dict): The model configuration.
output_shape (tuple): The output shape.
label_map (dict): A dictionary mapping labels to their corresponding indices.
augmentation_config (dict): The augmentation configuration.
dataset_mode (str, optional): The dataset mode, could be "train", "val" or "inf". Defaults to "inf".
batch_size (int, optional): The batch size. Defaults to 1.
workers (int, optional): The number of workers. Defaults to 4.
input_type (str, optional): The input type, either "2d" or "3d". Defaults to "2d".
shuffle (bool, optional): The shuffle flag. Defaults to False.
pin_mem (bool, optional): The pin memory flag. Defaults to False.
eval_mode (str, optional): The evaluation mode for evaluation dataset, either "conv", "center" or "all". Defaults to "center".
num_segments (int, optional): The number of segments when using full clip. Defaults to 1.
clips_per_video (int, optional): The number of clips to be extracted from each video. Defaults to 1.
Returns:
torch.utils.data.DataLoader: The torch dataloader.
Raises:
ValueError: If the dataset type is not supported.
"""
dataset_type = model_config["model_type"]
train_sampler, test_sampler = build_single_sampler(model_config['sample_strategy'],
model_config['sample_rate'])
sample_clips_path = None
sample_clips_id = None
full_clip = False
if dataset_type == "of":
aug_transform = build_single_augmentation_pipeline(output_shape=output_shape,
augmentation_config=augmentation_config,
dataset_mode=dataset_mode,
dataset_type="of")
if eval_mode == "conv":
full_clip = True
sample_clips_path, sample_clips_id = \
get_clips_list(sample_dict,
model_config["of_seq_length"],
eval_mode=eval_mode,
sampler_strategy=model_config['sample_strategy'],
sample_rate=model_config['sample_rate'],
num_segments=num_segments,
dir_path="u")
elif eval_mode == "all":
if input_type == "2d":
full_clip = True
sample_clips_path, sample_clips_id = \
get_clips_list(sample_dict,
model_config["of_seq_length"],
eval_mode=eval_mode,
sampler_strategy=model_config['sample_strategy'],
sample_rate=model_config['sample_rate'],
dir_path="u")
elif input_type == "3d":
train_sampler, test_sampler = \
build_single_sampler(model_config['sample_strategy'],
model_config['sample_rate'],
all_frames_3d=True)
dataset = MotionDataset(sample_dict=sample_dict,
output_shape=output_shape,
seq_length=model_config["of_seq_length"],
input_type=input_type,
label_map=label_map,
mode=dataset_mode,
full_clip=full_clip,
clips_per_video=clips_per_video,
transform=aug_transform,
train_sampler=train_sampler,
test_sampler=test_sampler,
sample_clips_path=sample_clips_path,
sample_clips_ids=sample_clips_id)
elif dataset_type == "rgb":
aug_transform = build_single_augmentation_pipeline(output_shape=output_shape,
augmentation_config=augmentation_config,
dataset_mode=dataset_mode,
dataset_type="rgb")
if eval_mode == "conv":
full_clip = True
sample_clips_path, sample_clips_id = \
get_clips_list(sample_dict,
model_config["rgb_seq_length"],
eval_mode=eval_mode,
sampler_strategy=model_config['sample_strategy'],
sample_rate=model_config['sample_rate'],
num_segments=num_segments,
dir_path="rgb")
elif eval_mode == "all":
if input_type == "2d":
full_clip = True
sample_clips_path, sample_clips_id = \
get_clips_list(sample_dict,
model_config["rgb_seq_length"],
eval_mode=eval_mode,
sampler_strategy=model_config['sample_strategy'],
sample_rate=model_config['sample_rate'],
dir_path="rgb")
elif input_type == "3d":
train_sampler, test_sampler = \
build_single_sampler(model_config['sample_strategy'],
model_config['sample_rate'],
all_frames_3d=True)
dataset = SpatialDataset(sample_dict=sample_dict,
output_shape=output_shape,
seq_length=model_config["rgb_seq_length"],
input_type=input_type,
label_map=label_map,
mode=dataset_mode,
full_clip=full_clip,
clips_per_video=clips_per_video,
transform=aug_transform,
train_sampler=train_sampler,
test_sampler=test_sampler,
sample_clips_path=sample_clips_path,
sample_clips_ids=sample_clips_id)
elif dataset_type == "joint":
train_sampler, test_sampler = \
build_joint_sampler(model_config['sample_strategy'],
model_config['sample_rate'])
aug_transform = \
build_joint_augmentation_pipeline(output_shape=output_shape,
augmentation_config=augmentation_config,
dataset_mode=dataset_mode)
larger_seq = max(model_config["rgb_seq_length"],
model_config["of_seq_length"])
if eval_mode == "conv":
full_clip = True
sample_clips_path, sample_clips_id = \
get_clips_list(sample_dict,
larger_seq,
eval_mode=eval_mode,
sampler_strategy=model_config['sample_strategy'],
sample_rate=model_config['sample_rate'],
num_segments=num_segments,
dir_path="u")
elif eval_mode == "all":
if input_type == "2d":
full_clip = True
sample_clips_path, sample_clips_id = \
get_clips_list(sample_dict,
larger_seq,
eval_mode=eval_mode,
sampler_strategy=model_config['sample_strategy'],
sample_rate=model_config['sample_rate'],
dir_path="u")
elif input_type == "3d":
train_sampler, test_sampler = \
build_joint_sampler(model_config['sample_strategy'],
model_config['sample_rate'],
all_frames_3d=True)
dataset = FuseDataset(sample_dict=sample_dict,
output_shape=output_shape,
of_seq_length=model_config["of_seq_length"],
rgb_seq_length=model_config["rgb_seq_length"],
input_type=input_type,
label_map=label_map,
mode=dataset_mode,
full_clip=full_clip,
clips_per_video=clips_per_video,
transform=aug_transform,
train_sampler=train_sampler,
test_sampler=test_sampler,
sample_clips_path=sample_clips_path,
sample_clips_ids=sample_clips_id)
else:
raise ValueError("Only the type in [of, rgb, joint] is supported")
dataloader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=workers,
pin_memory=pin_mem)
return dataloader
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/action_recognition/dataloader/build_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose classification root module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose classification config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional, List, Dict
from dataclasses import dataclass, field
@dataclass
class PCModelConfig:
"""Pose classification model config."""
model_type: str = "ST-GCN"
pretrained_model_path: Optional[str] = None
input_channels: int = 3
dropout: float = 0.5
graph_layout: str = "nvidia" # [nvidia, openpose, human3.6m, ntu-rgb+d, ntu_edge, coco]
graph_strategy: str = "spatial" # [uniform, distance, spatial]
edge_importance_weighting: bool = True
@dataclass
class OptimConfig:
"""Optimizer config."""
optimizer_type: str = "torch.optim.SGD"
lr: float = 0.1
momentum: float = 0.9
nesterov: bool = True
weight_decay: float = 0.0001
lr_scheduler: str = "MultiStep" # {AutoReduce, MultiStep}
lr_monitor: str = "val_loss" # {val_loss, train_loss}
patience: int = 1
min_lr: float = 1e-4
lr_steps: List[int] = field(default_factory=lambda: [10, 60])
lr_decay: float = 0.1
@dataclass
class SkeletonDatasetConfig:
"""Skeleton dataset config."""
data_path: Optional[str] = None
label_path: Optional[str] = None
@dataclass
class PCDatasetConfig:
"""Dataset config."""
train_dataset: SkeletonDatasetConfig = SkeletonDatasetConfig()
val_dataset: SkeletonDatasetConfig = SkeletonDatasetConfig()
num_classes: int = 6
label_map: Optional[Dict[str, int]] = None
random_choose: bool = False
random_move: bool = False
window_size: int = -1
batch_size: int = 64
num_workers: int = 1
@dataclass
class PCTrainExpConfig:
"""Train experiment config."""
results_dir: Optional[str] = None
gpu_ids: List[int] = field(default_factory=lambda: [0])
resume_training_checkpoint_path: Optional[str] = None
optim: OptimConfig = OptimConfig()
num_epochs: int = 70
checkpoint_interval: int = 5
grad_clip: float = 0.0
@dataclass
class PCInferenceExpConfig:
"""Inference experiment config."""
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
output_file: Optional[str] = None
test_dataset: SkeletonDatasetConfig = SkeletonDatasetConfig()
gpu_id: int = 0
@dataclass
class PCEvalExpConfig:
"""Evaluation experiment config."""
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
test_dataset: SkeletonDatasetConfig = SkeletonDatasetConfig()
gpu_id: int = 0
@dataclass
class PCExportExpConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
onnx_file: Optional[str] = None
gpu_id: int = 0
@dataclass
class PCDatasetConvertExpConfig:
"""Dataset conversion experiment config."""
results_dir: Optional[str] = None
data: Optional[str] = None
pose_type: str = "3dbp" # [3dbp, 25dbp, 2dbp]
num_joints: int = 34
input_width: int = 1920
input_height: int = 1080
focal_length: float = 1200.0
sequence_length_max: int = 300
sequence_length_min: int = 10
sequence_length: int = 100
sequence_overlap: float = 0.5
@dataclass
class ExperimentConfig:
"""Experiment config."""
results_dir: Optional[str] = None
encryption_key: Optional[str] = None
model: PCModelConfig = PCModelConfig()
dataset: PCDatasetConfig = PCDatasetConfig()
train: PCTrainExpConfig = PCTrainExpConfig()
inference: PCInferenceExpConfig = PCInferenceExpConfig()
evaluate: PCEvalExpConfig = PCEvalExpConfig()
export: PCExportExpConfig = PCExportExpConfig()
dataset_convert: PCDatasetConvertExpConfig = PCDatasetConvertExpConfig()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose classification utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for pose classification."""
import os
import torch
import struct
import json
import numpy as np
from eff.core.codec import encrypt_stream
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import decrypt_checkpoint
def patch_decrypt_checkpoint(checkpoint, key):
"""
Decrypt the given checkpoint and adjust it for single-GPU usage.
This function temporarily modifies the torch.load function to ensure the checkpoint is loaded onto the CPU.
It decrypts the checkpoint using the provided key, and then resets torch.load back to its original state.
The 'state_dict_encrypted' field in the checkpoint is also set to False to indicate it has been decrypted.
Args:
checkpoint (dict): The checkpoint to decrypt.
key (str): The decryption key.
Returns:
dict: The decrypted checkpoint.
"""
from functools import partial
legacy_load = torch.load
torch.load = partial(legacy_load, map_location="cpu")
checkpoint = decrypt_checkpoint(checkpoint, key)
torch.load = legacy_load
# set the encrypted status to be False when it is decrypted
checkpoint["state_dict_encrypted"] = False
return checkpoint
def encrypt_onnx(tmp_file_name, output_file_name, key):
"""
Encrypt the onnx model.
The function reads an ONNX model from a file, encrypts it using the provided key,
and writes the encrypted model to a new file.
Args:
tmp_file_name (str): The path to the file containing the ONNX model to encrypt.
output_file_name (str): The path where the encrypted ONNX model should be written.
key (str): The encryption key.
"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
# set the input name magic number
open_encoded_file.write(struct.pack("<i", 0))
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def check_and_create(d):
"""
Create a directory if it does not already exist.
Args:
d (str): The path of the directory to create.
"""
if not os.path.isdir(d):
os.makedirs(d)
def load_json_from_file(file_path):
"""
Load data from a JSON file.
Args:
file_path (str): The path of the JSON file to load data from.
Returns:
dict: The data loaded from the JSON file.
"""
with open(file_path, 'r') as f:
data = json.load(f)
return data
def write_np_to_file(file_path, data):
"""
Write a Numpy array to a file.
Args:
file_path (str): The path where the file should be written.
data (numpy.ndarray): The Numpy array to write to the file.
"""
np.save(file=file_path, arr=data, allow_pickle=False)
def data_to_device(data):
"""
Transfer data to GPU.
If the data is a list, each item in the list is moved to the GPU individually. Otherwise, the entire data
object is moved to the GPU.
Args:
data (torch.Tensor or list of torch.Tensor): The data to move to the GPU.
Returns:
torch.Tensor or list of torch.Tensor: The data on the GPU.
"""
if isinstance(data, list):
cuda_data = []
for item in data:
cuda_item = item.cuda(non_blocking=True)
cuda_data.append(cuda_item)
else:
cuda_data = data.cuda(non_blocking=True)
return cuda_data
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose classification scripts module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export pose classification model to ONNX."""
import os
import torch
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.pose_classification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.pose_classification.model.pl_pc_model import PoseClassificationModel
from nvidia_tao_pytorch.cv.pose_classification.model.st_gcn import Graph
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""
Run the export process for pose classification model.
This function serves as the entry point for the export script.
It loads the experiment specification, updates the results directory, and calls the 'run_export' function.
Args:
cfg (ExperimentConfig): The experiment configuration retrieved from the Hydra configuration files.
"""
try:
cfg = update_results_dir(cfg, task="export")
run_export(cfg, results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
def run_export(args, results_dir):
"""
Run the export of the pose classification model to ONNX.
This function handles the export process, including loading the model, creating dummy input,
and exporting the model to an ONNX file. It also performs encryption on the ONNX file.
Args:
args (dict): The parsed arguments for the export process.
results_dir (str): The directory to save the export results.
Raises:
AssertionError: If the default output file already exists.
Exception: If any error occurs during the export process.
"""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting Pose Classification export")
gpu_id = args['export']['gpu_id']
torch.cuda.set_device(gpu_id)
# Parsing command line arguments.
model_path = args['export']['checkpoint']
key = args['encryption_key']
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
onnx_file = args['export']['onnx_file']
experiment_config = args
# Set default output filename if the filename
# isn't provided over the command line.
if onnx_file is None:
split_name = os.path.splitext(model_path)[0]
onnx_file = "{}.onnx".format(split_name)
# Warn the user if an exported file already exists.
assert not os.path.exists(onnx_file), "Default output file {} already "\
"exists.".format(onnx_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(onnx_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# load model
pl_model = PoseClassificationModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config,
export=True)
model = pl_model.model
model.eval()
model.cuda()
# create dummy input
input_channels = experiment_config["model"]["input_channels"]
graph_layout = experiment_config["model"]["graph_layout"]
graph_strategy = experiment_config["model"]["graph_strategy"]
graph = Graph(layout=graph_layout, strategy=graph_strategy)
num_node = graph.get_num_node()
num_person = graph.get_num_person()
seq_length = graph.get_seq_length()
dummy_input = torch.randn(1, input_channels, seq_length,
num_node, num_person).cuda()
input_names = ["input"]
output_names = ["fc_pred"]
dynamic_axes = {"input": {0: "batch"}, "fc_pred": {0: "batch"}}
# export
torch.onnx.export(model,
dummy_input,
onnx_file,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
verbose=True,
opset_version=12)
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert pose data from deepstream-bodypose-3d to skeleton arrays."""
import os
import numpy as np
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.pose_classification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import check_and_create, load_json_from_file, write_np_to_file
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
def create_data_numpy(data_numpy, pose_sequence, frame_start, frame_end, pose_type, num_joints, sequence_length_max):
"""
Create a NumPy array for output.
This function takes a pose sequence and converts it into a NumPy array for output. The output array
has the shape (1, joint_dim, sequence_length_max, num_joints, 1), where joint_dim is 2 or 3 depending
on the pose type (2D or 3D), sequence_length_max is the maximum sequence length, and num_joints is the
number of joints in the pose.
Args:
data_numpy (numpy.ndarray or None): The existing NumPy array to concatenate the sequence with. If None,
a new array will be created.
pose_sequence (list): The pose sequence to convert.
frame_start (int): The starting frame index.
frame_end (int): The ending frame index.
pose_type (str): The type of pose data ("2dbp", "3dbp", "25dbp").
num_joints (int): The number of joints in the pose.
sequence_length_max (int): The maximum sequence length.
Returns:
numpy.ndarray: The NumPy array containing the converted pose sequence.
"""
joint_dim = 3
if pose_type == "2dbp":
joint_dim = 2
sequence = np.zeros((1, joint_dim, sequence_length_max, num_joints, 1), dtype="float32")
f = 0
for frame in range(frame_start, frame_end):
for j in range(num_joints):
for d in range(joint_dim):
sequence[0, d, f, j, 0] = pose_sequence[frame][j][d]
f += 1
if data_numpy is None:
data_numpy = sequence
else:
data_numpy = np.concatenate((data_numpy, sequence), axis=0)
return data_numpy
def run_experiment(experiment_config, key, data_path, results_dir):
"""
Start the dataset conversion.
This function is responsible for the main dataset conversion process. It loads the pose data from
the deepstream-bodypose-3d JSON file, extracts the pose sequences, applies normalization and preprocessing,
and saves the resulting skeleton arrays as NumPy files.
Args:
experiment_config (dict): The experiment configuration.
key (str): The encryption key for data encryption.
data_path (str): The path to the deepstream-bodypose-3d JSON file.
results_dir (str): The directory to save the converted dataset.
Raises:
AssertionError: If the number of frames in a batch does not match the length of the batches.
KeyError: If the required pose data ("pose3d" or "pose25d") is not found in the input data.
NotImplementedError: If the pose type specified in the experiment configuration is not supported.
Exception: If any error occurs during the conversion process.
"""
# create output directory
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting Pose classification dataset convert")
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# load pose data from deepstream-bodypose-3d
pose_data = load_json_from_file(data_path)
# extract sequences from pose data and apply normalization
pose_type = experiment_config["dataset_convert"]["pose_type"]
num_joints = experiment_config["dataset_convert"]["num_joints"]
input_width = float(experiment_config["dataset_convert"]["input_width"])
input_height = float(experiment_config["dataset_convert"]["input_height"])
focal_length = experiment_config["dataset_convert"]["focal_length"]
pose_sequences = {}
for batch in pose_data:
assert batch["num_frames_in_batch"] == len(batch["batches"]), f"batch[\"num_frames_in_batch\"] "\
f"{batch['num_frames_in_batch']} does not match len(batch[\"batches\"]) {len(batch['batches'])}."
for frame in batch["batches"]:
for person in frame["objects"]:
object_id = person["object_id"]
if object_id not in pose_sequences.keys():
pose_sequences[object_id] = []
poses = []
if pose_type == "3dbp":
if "pose3d" not in list(person.keys()):
raise KeyError("\"pose3d\" not found in input data. "
"Please run deepstream-bodypose-3d with \"--publish-pose pose3d\".")
assert num_joints == len(person["pose3d"]) // 4, f"The num_joints should be "\
f"{len(person['pose3d']) // 4}. Got {num_joints}."
for j in range(num_joints):
if person["pose3d"][j * 4 + 3] == 0.0:
poses.append([0.0, 0.0, 0.0])
continue
x = (person["pose3d"][j * 4 + 0] - person["pose3d"][0]) / focal_length
y = (person["pose3d"][j * 4 + 1] - person["pose3d"][1]) / focal_length
z = (person["pose3d"][j * 4 + 2] - person["pose3d"][2]) / focal_length
poses.append([x, y, z])
elif pose_type in ("25dbp", "2dbp"):
if "pose25d" not in list(person.keys()):
raise KeyError("\"pose25d\" not found in input data. "
"Please run deepstream-bodypose-3d with \"--publish-pose pose25d\".")
assert num_joints == len(person["pose25d"]) // 4, f"The num_joints should be "\
f"{len(person['pose25d']) // 4}. Got {num_joints}."
for j in range(num_joints):
if person["pose25d"][j * 4 + 3] == 0.0:
if pose_type == "25dbp":
poses.append([0.0, 0.0, 0.0])
else:
poses.append([0.0, 0.0])
continue
x = person["pose25d"][j * 4 + 0] / input_width - 0.5
y = person["pose25d"][j * 4 + 1] / input_height - 0.5
z = person["pose25d"][j * 4 + 2]
if pose_type == "25dbp":
poses.append([x, y, z])
else:
poses.append([x, y])
else:
raise NotImplementedError(f"Pose type {pose_type} is not supported.")
pose_sequences[object_id].append(poses)
print(f"Number of objects: {len(pose_sequences.keys())}")
status_logging.get_status_logger().kpi = {"Number of objects": len(pose_sequences.keys())}
status_logging.get_status_logger().write(
status_level=status_logging.Status.RUNNING,
)
# create output of pose arrays
sequence_length_max = experiment_config["dataset_convert"]["sequence_length_max"]
sequence_length_min = experiment_config["dataset_convert"]["sequence_length_min"]
sequence_length = experiment_config["dataset_convert"]["sequence_length"]
sequence_overlap = experiment_config["dataset_convert"]["sequence_overlap"]
step = int(sequence_length * sequence_overlap)
for object_id in pose_sequences.keys():
data_numpy = None
frame_start = 0
sequence_count = 0
while len(pose_sequences[object_id]) - frame_start >= sequence_length_min:
frame_end = frame_start + sequence_length
if len(pose_sequences[object_id]) - frame_start < sequence_length:
frame_end = len(pose_sequences[object_id])
data_numpy = create_data_numpy(data_numpy, pose_sequences[object_id], frame_start, frame_end,
pose_type, num_joints, sequence_length_max)
frame_start += step
sequence_count += 1
if sequence_count > 0:
results_path = os.path.join(results_dir, "object_" + str(object_id) + ".npy")
write_np_to_file(results_path, data_numpy)
print(f"Saved data {data_numpy.shape} for object {object_id} at {results_path}")
status_logging.get_status_logger().write(
message=f"Saved data {data_numpy.shape} for object {object_id} at {results_path}",
status_level=status_logging.Status.RUNNING
)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""
Run the dataset conversion process.
This function serves as the entry point for the dataset conversion script.
It loads the experiment specification, updates the results directory, and calls the 'run_experiment' function.
Args:
cfg (ExperimentConfig): The experiment configuration retrieved from the Hydra configuration files.
"""
try:
cfg = update_results_dir(cfg, task="dataset_convert")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
results_dir=cfg.results_dir,
data_path=cfg.dataset_convert.data)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train pose classification model."""
import os
import re
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import TLTCheckpointConnector
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
from nvidia_tao_pytorch.core.utilities import update_results_dir
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.pose_classification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.pose_classification.model.pl_pc_model import PoseClassificationModel
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import check_and_create
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
def run_experiment(experiment_config, key, results_dir):
"""
Start the training process.
This function initializes the pose classification model with the provided experiment configuration.
It sets up the necessary components such as the status logger and checkpoint callbacks.
The training is performed using the PyTorch Lightning Trainer.
Args:
experiment_config (dict): The experiment configuration containing the model, training, and other parameters.
key (str): The encryption key for intermediate checkpoints.
results_dir (str): The directory to save the trained model checkpoints and logs.
"""
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
pc_model = PoseClassificationModel(experiment_config)
check_and_create(results_dir)
num_epochs = experiment_config['train']['num_epochs']
checkpoint_interval = experiment_config['train']['checkpoint_interval']
assert checkpoint_interval <= num_epochs, (
f"Checkpoint interval {checkpoint_interval} > Number of epochs {num_epochs}. "
f"Please set experiment_config.train.checkpoint_interval < {num_epochs}"
)
status_logger_callback = TAOStatusLogger(results_dir, append=True, num_epochs=num_epochs)
status_logging.set_status_logger(status_logger_callback.logger)
grad_clip = experiment_config['train']['grad_clip']
gpus_ids = experiment_config['train']["gpu_ids"]
acc_flag = None
if len(gpus_ids) > 1:
acc_flag = "ddp"
trainer = Trainer(gpus=gpus_ids,
max_epochs=num_epochs,
check_val_every_n_epoch=checkpoint_interval,
default_root_dir=results_dir,
accelerator='gpu',
strategy=acc_flag,
gradient_clip_val=grad_clip)
# Overload connector to enable intermediate ckpt encryption & decryption.
resume_ckpt = experiment_config['train']['resume_training_checkpoint_path']
if resume_ckpt is not None:
trainer._checkpoint_connector = TLTCheckpointConnector(trainer, resume_from_checkpoint=resume_ckpt)
else:
trainer._checkpoint_connector = TLTCheckpointConnector(trainer)
if resume_ckpt is not None:
trainer._checkpoint_connector.resume_checkpoint_path = resume_ckpt
# setup checkpointer:
ModelCheckpoint.FILE_EXTENSION = ".tlt"
checkpoint_callback = ModelCheckpoint(every_n_epochs=checkpoint_interval,
dirpath=results_dir,
save_on_train_epoch_end=True,
monitor=None,
save_top_k=-1,
filename='pc_model_{epoch:03d}')
if resume_ckpt:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_ckpt}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_ckpt)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1 # make sure callback epoch matches resumed epoch
trainer.callbacks.append(status_logger_callback)
trainer.callbacks.append(checkpoint_callback)
trainer.fit(pc_model)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""
Run the training process.
This function serves as the entry point for the training script.
It loads the experiment specification, obfuscates logs, updates the results directory, and calls the 'run_experiment' function.
Args:
cfg (ExperimentConfig): The experiment configuration retrieved from the Hydra configuration files.
"""
# Obfuscate logs.
try:
obfuscate_logs(cfg)
cfg = update_results_dir(cfg, task="train")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference on single patch."""
import os
import torch
from tqdm import tqdm
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.pose_classification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.pose_classification.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.pose_classification.inference.inferencer import Inferencer
from nvidia_tao_pytorch.cv.pose_classification.model.pl_pc_model import PoseClassificationModel
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
def run_experiment(experiment_config, results_dir, key, model_path, data_path):
"""
Start the inference process.
This function initializes the necessary components for inference, including the model, data loader,
and inferencer. It performs inference on the provided data and saves the results in the specified output file.
Args:
experiment_config (dict): The experiment configuration containing the model and inference parameters.
results_dir (str): The directory to save the status and log files.
key (str): The encryption key for intermediate checkpoints.
model_path (str): The path to the pre-trained model checkpoint.
data_path (str): The path to the test dataset.
Raises:
Exception: If any error occurs during the inference process.
"""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Pose classification inference"
)
gpu_id = experiment_config.inference.gpu_id
torch.cuda.set_device(gpu_id)
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# build dataloader
label_map = experiment_config["dataset"]["label_map"]
batch_size = experiment_config["dataset"]["batch_size"]
num_workers = experiment_config["dataset"]["num_workers"]
dataloader = build_dataloader(data_path=data_path,
label_map=label_map,
mmap=True,
batch_size=batch_size,
num_workers=num_workers)
# build inferencer
model = PoseClassificationModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config)
infer = Inferencer(model, ret_prob=False)
# do inference
progress = tqdm(dataloader)
id2name = {v: k for k, v in label_map.items()}
results = []
for data_label in progress:
data = data_label[0]
batch_size = len(data)
pred_id = infer.inference(data)
pred_name = []
for label_idx in pred_id:
pred_name.append(id2name[label_idx])
results.extend(pred_name)
# save the output
output_file = open(experiment_config["inference"]["output_file"], "w")
for idx in range(len(results)):
output_file.write("{}\n".format(results[idx]))
output_file.close()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""
Run the inference process.
This function serves as the entry point for the inference script.
It loads the experiment specification, obfuscates logs, updates the results directory, and calls the 'run_experiment' function.
Args:
cfg (ExperimentConfig): The experiment configuration retrieved from the Hydra configuration files.
"""
# Obfuscate logs.
try:
cfg = update_results_dir(cfg, task="inference")
run_experiment(experiment_config=cfg,
results_dir=cfg.results_dir,
key=cfg.encryption_key,
model_path=cfg.inference.checkpoint,
data_path=cfg.inference.test_dataset.data_path)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained pose classification model."""
import csv
import os
import numpy as np
import torch
from tqdm import tqdm
from tabulate import tabulate
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.pose_classification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.pose_classification.model.pl_pc_model import PoseClassificationModel
from nvidia_tao_pytorch.cv.pose_classification.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.pose_classification.inference.inferencer import Inferencer
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
def compute_metrics(confusion_matrix):
"""
Compute evaluation metrics based on the confusion matrix.
This function computes the percentage confusion matrix, accuracy, and average class accuracy
from the provided confusion matrix.
Args:
confusion_matrix (np.ndarray): The confusion matrix of shape (num_classes, num_classes).
Returns:
np.ndarray: The percentage confusion matrix of the same shape as the input matrix.
float: The overall accuracy.
float: The average class accuracy.
"""
row_sum = np.sum(confusion_matrix, axis=1)
_shape = confusion_matrix.shape
percentage_confusion_matrix = np.zeros(
_shape, dtype=np.float32)
for x in range(_shape[0]):
for y in range(_shape[1]):
if not row_sum[x] == 0:
percentage_confusion_matrix[x][y] = np.float32(confusion_matrix[x][y]) / \
row_sum[x] * 100.0
trace = np.trace(confusion_matrix)
percent_trace = np.trace(percentage_confusion_matrix)
accuracy = float(trace) / np.sum(confusion_matrix) * 100.0
m_accuracy = percent_trace / _shape[0]
return percentage_confusion_matrix, accuracy, m_accuracy
def dump_cm(csv_path, cm, id2name):
"""
Dump the confusion matrix to a CSV file.
This function saves the confusion matrix to a CSV file, where each row and column represent a class,
and the cell values represent the counts.
Args:
csv_path (str): The path to the output CSV file.
cm (np.ndarray): The confusion matrix of shape (num_classes, num_classes).
id2name (dict): A dictionary mapping class IDs to class names.
"""
n_class = len(id2name.keys())
with open(csv_path, "w") as f:
writer = csv.writer(f)
label_list = ["class"]
for idx in range(n_class):
label_list.append(id2name[idx])
writer.writerow(label_list)
for row_id in range(n_class):
row = [id2name[row_id]]
for col_id in range(n_class):
row.append(cm[row_id][col_id])
writer.writerow(row)
def run_experiment(experiment_config, results_dir, key, model_path, data_path, label_path):
"""
Run the evaluation process.
This function initializes the necessary components for evaluation, including the model, data loader,
and inferencer. It performs evaluation on the test dataset and computes evaluation metrics and the confusion matrix.
Args:
experiment_config (dict): The experiment configuration containing the model and evaluation parameters.
results_dir (str): The directory to save the evaluation results.
key (str): The encryption key for intermediate checkpoints.
model_path (str): The path to the trained model checkpoint.
data_path (str): The path to the test dataset.
label_path (str): The path to the label data.
Raises:
Exception: If any error occurs during the evaluation process.
"""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting Pose classification evaluation")
gpu_id = experiment_config.evaluate.gpu_id
torch.cuda.set_device(gpu_id)
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# build dataloader
label_map = experiment_config["dataset"]["label_map"]
batch_size = experiment_config["dataset"]["batch_size"]
num_workers = experiment_config["dataset"]["num_workers"]
dataloader = build_dataloader(data_path=data_path,
label_path=label_path,
label_map=label_map,
mmap=True,
batch_size=batch_size,
num_workers=num_workers)
# build inferencer
model = PoseClassificationModel.load_from_checkpoint(model_path,
map_location="cpu",
experiment_spec=experiment_config)
infer = Inferencer(model, ret_prob=False)
# do evaluation
num_classes = len(label_map.keys())
confusion_matrix = np.zeros((num_classes, num_classes), dtype=np.int32)
progress = tqdm(dataloader)
for data, label in progress:
batch_size = len(data)
pred_id = infer.inference(data)
for idx in range(batch_size):
confusion_matrix[label[idx].item(), pred_id[idx]] += 1
percentage_confusion_matrix, accuracy, m_accuracy = compute_metrics(confusion_matrix)
table = []
id2name = {v: k for k, v in label_map.items()}
for idx in range(len(label_map)):
cls_acc = percentage_confusion_matrix[idx][idx]
table.append(["Class accuracy: " + id2name[idx], cls_acc])
table.append(["Total accuracy", accuracy])
table.append(["Average class accuracy", m_accuracy])
status_logging.get_status_logger().kpi = {"accuracy": round(accuracy, 2), "avg_accuracy": round(m_accuracy, 2)}
status_logging.get_status_logger().write(message="Evaluation metrics generated.", status_level=status_logging.Status.RUNNING)
print(tabulate(table, headers=["Name", "Score"], floatfmt=".4f", tablefmt="fancy_grid"))
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""
Run the evaluation process.
This function serves as the entry point for the evaluation script.
It loads the experiment specification, updates the results directory, and calls the 'run_experiment' function.
Args:
cfg (ExperimentConfig): The experiment configuration retrieved from the Hydra configuration files.
"""
try:
cfg = update_results_dir(cfg, task="evaluate")
run_experiment(experiment_config=cfg,
results_dir=cfg.results_dir,
key=cfg.encryption_key,
model_path=cfg.evaluate.checkpoint,
data_path=cfg.evaluate.test_dataset.data_path,
label_path=cfg.evaluate.test_dataset.label_path)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the pose classification task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to pose classification."""
import importlib
import os
import pkgutil
import argparse
import subprocess
import sys
from time import time
import nvidia_tao_pytorch.cv.pose_classification.scripts as scripts
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""
Get supported subtasks for a given task.
This function dynamically collects all modules within a specified package. This function is particularly useful
for gathering all scripts within a package for a specific task.
Args:
package (module): The package to gather subtask modules from.
Returns:
dict: A dictionary where the keys are the names of the subtasks and the values are dictionaries containing
the full module name and absolute path of the subtask module.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, network=None):
"""
CLI function that executes subtasks.
This function uses the argparse module to parse command line arguments for a given task. It processes these arguments
and then runs the corresponding subtask script with these arguments. It also collects telemetry data and sends it.
Args:
parser (argparse.ArgumentParser): An ArgumentParser object for parsing command line arguments.
subtasks (dict): A dictionary where the keys are the names of the subtasks and the values are dictionaries containing
the full module name and absolute path of the subtask module.
network (str, optional): The name of the network running training. Defaults to None.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
if args.subtask in ["train"]:
if args.results_dir:
script_args += " results_dir=" + args.results_dir
# Add encryption key.
if args.subtask in ["train", "evaluate", "inference", "export", "dataset_convert"]:
if args.key is not None:
script_args += " encryption_key=" + args.key
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=1,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def main():
"""
Main entrypoint function.
This function creates a command line argument parser, gathers all subtasks in the 'scripts' package using the
'get_subtasks' function, and then executes the chosen subtask using the 'launch' function.
"""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"pose_classification", add_help=True, description="Transfer Learning Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="pose_classification")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/entrypoint/pose_classification.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main PTL model file for pose classification."""
from typing import Any, Dict
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
import torchmetrics
from nvidia_tao_pytorch.cv.pose_classification.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.pose_classification.model.build_nn_model import build_pc_model
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import patch_decrypt_checkpoint
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
# pylint:disable=too-many-ancestors
class PoseClassificationModel(pl.LightningModule):
"""PyTorch Lightning module for single stream pose classification."""
def __init__(self, experiment_spec, export=False):
"""
Initialize the training for pose classification model.
Args:
experiment_spec (dict): The experiment specifications.
export (bool, optional): If set to True, the model is prepared for export. Defaults to False.
"""
super().__init__()
self.experiment_spec = experiment_spec
self.model_config = experiment_spec["model"]
self.dataset_config = experiment_spec["dataset"]
# init the model
self._build_model(experiment_spec, export)
self.train_accuracy = torchmetrics.Accuracy()
self.val_accuracy = torchmetrics.Accuracy()
self.status_logging_dict = {"train_loss": 0.0,
"train_acc": 0.0,
"val_loss": 0.0,
"val_acc": 0.0}
def _build_model(self, experiment_spec, export):
"""
Internal function to build the model.
Args:
experiment_spec (dict): The experiment specifications.
export (bool): If set to True, the model is prepared for export.
"""
self.model = build_pc_model(experiment_config=experiment_spec,
export=export)
print(self.model)
def train_dataloader(self):
"""
Build the dataloader for training.
Returns:
DataLoader: The dataloader for training.
"""
train_loader = \
build_dataloader(data_path=self.dataset_config["train_dataset"]["data_path"],
label_path=self.dataset_config["train_dataset"]["label_path"],
label_map=self.dataset_config["label_map"],
random_choose=self.dataset_config["random_choose"],
random_move=self.dataset_config["random_move"],
window_size=self.dataset_config["window_size"],
mmap=True,
batch_size=self.dataset_config["batch_size"],
shuffle=False,
num_workers=self.dataset_config["num_workers"],
pin_mem=False)
return train_loader
def val_dataloader(self):
"""
Build the dataloader for validation.
Returns:
DataLoader: The dataloader for validation.
"""
val_loader = \
build_dataloader(data_path=self.dataset_config["val_dataset"]["data_path"],
label_path=self.dataset_config["val_dataset"]["label_path"],
label_map=self.dataset_config["label_map"],
batch_size=self.dataset_config["batch_size"],
num_workers=self.dataset_config["num_workers"])
return val_loader
def configure_optimizers(self):
"""
Configure optimizers for training.
Returns:
dict: A dictionary containing the optimizer, learning rate scheduler, and the metric to monitor.
Raises:
NotImplementedError: If an unsupported scheduler type is provided.
"""
self.train_config = self.experiment_spec["train"]
optim_dict = {}
optim = torch.optim.SGD(params=self.parameters(),
lr=self.train_config['optim']['lr'],
momentum=self.train_config['optim']['momentum'],
nesterov=self.train_config['optim']['nesterov'],
weight_decay=self.train_config['optim']['weight_decay'])
optim_dict["optimizer"] = optim
scheduler_type = self.train_config['optim']['lr_scheduler']
if scheduler_type == "AutoReduce":
lr_scheduler = ReduceLROnPlateau(optim, 'min',
patience=self.train_config['optim']['patience'],
min_lr=self.train_config['optim']['min_lr'],
factor=self.train_config['optim']["lr_decay"],
verbose=True)
elif scheduler_type == "MultiStep":
lr_scheduler = MultiStepLR(optimizer=optim,
milestones=self.train_config['optim']["lr_steps"],
gamma=self.train_config['optim']["lr_decay"],
verbose=True)
else:
raise NotImplementedError("Only [AutoReduce, MultiStep] schedulers are supported")
optim_dict["lr_scheduler"] = lr_scheduler
optim_dict['monitor'] = self.train_config['optim']['lr_monitor']
return optim_dict
def training_step(self, batch, batch_idx):
"""
Define a training step.
Args:
batch (Tensor): A batch of data.
batch_idx (int): The index of the batch.
Returns:
Tensor: The computed loss.
"""
data, label = batch
output = self.model(data)
loss = F.cross_entropy(output, label)
self.train_accuracy.update(output, label)
self.log("train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
self.log("train_acc@1", self.train_accuracy, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
return loss
def training_epoch_end(self, training_step_outputs):
"""
Log Training metrics to status.json at the end of the epoch.
Args:
training_step_outputs (list): List of outputs from each training step.
"""
average_train_loss = 0.0
for out in training_step_outputs:
average_train_loss += out['loss'].item()
average_train_loss /= len(training_step_outputs)
self.status_logging_dict["train_loss"] = average_train_loss
self.status_logging_dict["train_acc"] = self.train_accuracy.compute().item()
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and Val metrics generated.",
status_level=status_logging.Status.RUNNING
)
def validation_step(self, batch, batch_idx):
"""
Define a validation step.
Args:
batch (Tensor): A batch of data.
batch_idx (int): The index of the batch.
Returns:
Tensor: The computed loss.
"""
data, label = batch
output = self.model(data)
loss = F.cross_entropy(output, label)
self.val_accuracy.update(output, label)
self.log("val_loss", loss, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
self.log("val_acc@1", self.val_accuracy, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True)
return loss
def validation_epoch_end(self, validation_step_outputs):
"""
Log Validation metrics to status.json at the end of the epoch.
Args:
validation_step_outputs (list): List of outputs from each validation step.
"""
average_val_loss = 0.0
for out in validation_step_outputs:
average_val_loss += out.item()
average_val_loss /= len(validation_step_outputs)
self.status_logging_dict["val_loss"] = average_val_loss
self.status_logging_dict["val_acc"] = self.val_accuracy.compute().item()
def forward(self, x):
"""
Define the forward pass of the pose classification model.
Args:
x (Tensor): The input tensor.
Returns:
Tensor: The output tensor.
"""
output = self.model(x)
return output
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""
Encrypt the checkpoint. The encryption is done in TLTCheckpointConnector.
Args:
checkpoint (dict): The checkpoint to save.
"""
pass
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""
Decrypt the checkpoint.
Args:
checkpoint (dict): The checkpoint to load.
Raises:
PermissionError: If the checkpoint is encrypted and the encryption key is not available.
"""
if checkpoint.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
checkpoint = patch_decrypt_checkpoint(checkpoint, key)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/model/pl_pc_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose classification dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model builder interface and joint model."""
import torch
from nvidia_tao_pytorch.cv.pose_classification.model.st_gcn import st_gcn
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import patch_decrypt_checkpoint
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
def load_pretrained_weights(pretrained_model_path):
"""
Load the pre-trained weights from a checkpoint.
This function loads the weights from a specified checkpoint. If the weights are encrypted,
the function retrieves the encryption key from the TLTPyTorchCookbook and decrypts the weights.
Args:
pretrained_model_path (str): The path to the pre-trained model checkpoint.
Returns:
dict: A dictionary containing the state of the model.
Raises:
PermissionError: If the weights are encrypted and the encryption key is not available.
"""
temp = torch.load(pretrained_model_path,
map_location="cpu")
if temp.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
temp = patch_decrypt_checkpoint(temp, key)
state_dict = {}
for key, value in list(temp["state_dict"].items()):
if "model" in key:
new_key = ".".join(key.split(".")[1:])
state_dict[new_key] = value
else:
state_dict[key] = value
return state_dict
def get_basemodel(pretrained_model_path,
input_channels,
num_classes,
graph_layout,
graph_strategy,
edge_importance_weighting=True,
data_bn=True,
**kwargs):
"""
Get the base model for ST-GCN.
This function creates an instance of the ST-GCN model, and if a pre-trained model is provided,
it loads the pre-trained weights into the model.
Args:
pretrained_model_path (str): The path to the pre-trained model checkpoint.
input_channels (int): The number of input channels.
num_classes (int): The number of classes in the dataset.
graph_layout (str): The graph layout to be used in the model.
graph_strategy (str): The graph strategy to be used in the model.
edge_importance_weighting (bool, optional): Whether to use edge importance weighting. Defaults to True.
data_bn (bool, optional): Whether to use batch normalization. Defaults to True.
**kwargs: Additional keyword arguments.
Returns:
torch.nn.Module: The created ST-GCN model.
"""
if pretrained_model_path:
print("loading trained weights from {}".format(
pretrained_model_path))
pretrained_weights = load_pretrained_weights(pretrained_model_path)
else:
pretrained_weights = None
model = st_gcn(pretrained_weights=pretrained_weights,
input_channels=input_channels,
num_classes=num_classes,
graph_layout=graph_layout,
graph_strategy=graph_strategy,
edge_importance_weighting=edge_importance_weighting,
data_bn=data_bn,
**kwargs)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/model/pc_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The top model builder interface."""
from nvidia_tao_pytorch.cv.pose_classification.model.pc_model import get_basemodel
def build_pc_model(experiment_config,
export=False):
"""
Build a pose classification model according to the provided configuration.
This function uses the configuration provided in experiment_config to create a pose classification model.
Currently, only the "ST-GCN" model type is supported. If a different model type is provided in the
configuration, a NotImplementedError will be raised.
Args:
experiment_config (dict): A dictionary containing the configuration for the experiment.
This should include specifications for the model, such as its type,
the path to a pre-trained model if one is being used, the number of input channels,
the number of classes in the dataset, dropout rate, and graph parameters
like layout, strategy, and edge importance weighting.
export (bool, optional): A flag that indicates whether the model is being built for export.
This is currently not used in the function. Defaults to False.
Returns:
torch.nn.Module: The created pose classification model.
Raises:
NotImplementedError: If a model type other than "ST-GCN" is specified in the configuration.
"""
model_config = experiment_config["model"]
model_type = model_config["model_type"]
pretrained_model_path = model_config["pretrained_model_path"]
input_channels = model_config["input_channels"]
num_classes = experiment_config["dataset"]["num_classes"]
dropout = model_config["dropout"]
graph_layout = model_config["graph_layout"]
graph_strategy = model_config["graph_strategy"]
edge_importance_weighting = model_config["edge_importance_weighting"]
if model_type == "ST-GCN":
model = get_basemodel(pretrained_model_path=pretrained_model_path,
model_type=model_type,
input_channels=input_channels,
num_classes=num_classes,
graph_layout=graph_layout,
graph_strategy=graph_strategy,
edge_importance_weighting=edge_importance_weighting,
dropout=dropout)
else:
raise NotImplementedError("Only the type \"ST-GCN\" is supported")
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ST-GCN model architecture for pose classification."""
import torch
from torch import nn
from torch.nn import functional
import numpy as np
def zero(x):
"""
Function that returns zero regardless of the input.
Args:
x (Any): Input to the function.
Returns:
int: Returns 0.
"""
return 0
def iden(x):
"""
Identity function.
Args:
x (Any): Input to the function.
Returns:
Any: Returns the input without any changes.
"""
return x
def get_hop_distance(num_node, edge, max_hop=1):
"""
Compute the hop distance between nodes in a graph.
Args:
num_node (int): The number of nodes in the graph.
edge (list): A list of tuples representing the edges between nodes.
max_hop (int, optional): The maximum hop distance to consider. Defaults to 1.
Returns:
numpy.ndarray: An adjacency matrix representing the hop distances between nodes.
"""
A = np.zeros((num_node, num_node))
for i, j in edge:
A[j, i] = 1
A[i, j] = 1
# compute hop steps
hop_dis = np.zeros((num_node, num_node)) + np.inf
transfer_mat = [np.linalg.matrix_power(A, d) for d in range(max_hop + 1)]
arrive_mat = (np.stack(transfer_mat) > 0)
for d in range(max_hop, -1, -1):
hop_dis[arrive_mat[d]] = d
return hop_dis
def normalize_digraph(A):
"""
Normalize a directed graph.
Args:
A (numpy.ndarray): The adjacency matrix of the graph.
Returns:
numpy.ndarray: The adjacency matrix of the normalized graph.
"""
Dl = np.sum(A, 0)
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if Dl[i] > 0:
Dn[i, i] = Dl[i]**(-1)
AD = np.dot(A, Dn)
return AD
def normalize_undigraph(A):
"""
Normalize an undirected graph.
Args:
A (numpy.ndarray): The adjacency matrix of the graph.
Returns:
numpy.ndarray: The adjacency matrix of the normalized graph.
"""
Dl = np.sum(A, 0)
num_node = A.shape[0]
Dn = np.zeros((num_node, num_node))
for i in range(num_node):
if Dl[i] > 0:
Dn[i, i] = Dl[i]**(-0.5)
DAD = np.dot(np.dot(Dn, A), Dn)
return DAD
class Graph():
"""
The Graph class models the skeletons extracted by openpose.
Attributes:
max_hop (int): The maximal distance between two connected nodes.
dilation (int): Controls the spacing between the kernel points.
A (numpy.ndarray): The adjacency matrix of the graph.
"""
def __init__(self,
layout="nvidia",
strategy="spatial",
max_hop=1,
dilation=1):
"""
Initialize a spatial-temporal graph.
Args:
layout (str, optional): The layout of the graph. Defaults to "nvidia".
- nvidia: Is consists of 34 joints. For more information, please
refer to https://docs.nvidia.com/deeplearning/maxine/ar-sdk-programming-guide/index.html
- openpose: Is consists of 18 joints. For more information, please
refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose#output
- human3.6m: Is consists of 17 joints. For more information, please
refer to http://vision.imar.ro/human3.6m/description.php
- ntu-rgb+d: Is consists of 25 joints. For more information, please
refer to https://github.com/shahroudy/NTURGB-D
- ntu_edge: Is consists of 24 joints. For more information, please
refer to https://github.com/shahroudy/NTURGB-D
- coco: Is consists of 17 joints. For more information, please
refer to https://cocodataset.org/#home
strategy (str, optional): The strategy used to construct the graph. Defaults to "spatial".
- uniform: Uniform Labeling
- distance: Distance Partitioning
- spatial: Spatial Configuration
For more information, please refer to the section "Partition Strategies" in the paper (https://arxiv.org/abs/1801.07455).
max_hop (int, optional): The maximal distance between two connected nodes. Defaults to 1.
dilation (int, optional): Controls the spacing between the kernel points. Defaults to 1.
"""
self.max_hop = max_hop
self.dilation = dilation
self.get_edge(layout)
self.hop_dis = get_hop_distance(self.num_node,
self.edge,
max_hop=max_hop)
self.get_adjacency(strategy)
def __str__(self):
"""
String representation of the spatial-temporal graph.
Returns:
str: The adjacency matrix of the graph as a string.
"""
return self.A
def get_edge(self, layout):
"""
Get the edge of the graph.
Args:
layout (str): The layout of the graph.
"""
# edge is a list of [child, parent] pairs
if layout == "nvidia":
self.num_node = 34
self.num_person = 1
self.seq_length = 300
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(0, 1), (0, 2), (0, 3), (1, 4), (2, 5), (3, 6),
(4, 7), (5, 8), (7, 9), (8, 10), (9, 11), (10, 12),
(7, 13), (8, 14), (6, 15), (15, 16), (15, 17),
(16, 18), (17, 19), (1, 20), (2, 21), (6, 20),
(6, 21), (20, 22), (21, 23), (22, 24), (23, 25),
(24, 26), (25, 27), (24, 28), (25, 29), (24, 30),
(25, 31), (24, 32), (25, 33)]
self.edge = self_link + neighbor_link
self.center = 0
elif layout == "openpose":
self.num_node = 18
self.num_person = 1
self.seq_length = 300
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5),
(13, 12), (12, 11), (10, 9), (9, 8), (11, 5),
(8, 2), (5, 1), (2, 1), (0, 1), (15, 0), (14, 0),
(17, 15), (16, 14)]
self.edge = self_link + neighbor_link
self.center = 1
elif layout == "human3.6m":
self.num_node = 17
self.num_person = 1
self.seq_length = 3163
self_link = [(i, i) for i in range(self.num_node)]
neighbor_link = [(9, 10), (8, 9), (7, 8), (1, 7),
(4, 7), (0, 1), (0, 4), (9, 11),
(9, 14), (11, 12), (14, 15), (12, 13),
(15, 16), (1, 2), (2, 3), (4, 5), (5, 6)]
self.edge = self_link + neighbor_link
self.center = 8
elif layout == "ntu-rgb+d":
self.num_node = 25
self.num_person = 2
self.seq_length = 300
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (2, 21), (3, 21),
(4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21),
(10, 9), (11, 10), (12, 11), (13, 1), (14, 13),
(15, 14), (16, 15), (17, 1), (18, 17), (19, 18),
(20, 19), (22, 23), (23, 8), (24, 25), (25, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 21 - 1
elif layout == "ntu_edge":
self.num_node = 24
self.num_person = 2
self.seq_length = 300
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [(1, 2), (3, 2), (4, 3), (5, 2), (6, 5), (7, 6),
(8, 7), (9, 2), (10, 9), (11, 10), (12, 11),
(13, 1), (14, 13), (15, 14), (16, 15), (17, 1),
(18, 17), (19, 18), (20, 19), (21, 22), (22, 8),
(23, 24), (24, 12)]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 2
elif layout == "coco":
self.num_node = 17
self.num_person = 1
self.seq_length = 300
self_link = [(i, i) for i in range(self.num_node)]
neighbor_1base = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13],
[6, 12], [7, 13], [6, 7], [8, 6], [9, 7],
[10, 8], [11, 9], [2, 3], [2, 1], [3, 1], [4, 2],
[5, 3], [4, 6], [5, 7]]
neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base]
self.edge = self_link + neighbor_link
self.center = 0
# elif layout=="customer settings"
# pass
else:
raise NotImplementedError(f"Layout \"{layout}\" not supported. Please choose from "
f"[\"nvidia\", \"openpose\", \"human3.6m\", \"ntu-rgb+d\", "
f"\"ntu_edge\", \"coco\"].")
def get_adjacency(self, strategy):
"""
Get the adjacency of the graph.
Args:
strategy (str): The strategy used to construct the graph.
"""
valid_hop = range(0, self.max_hop + 1, self.dilation)
adjacency = np.zeros((self.num_node, self.num_node))
for hop in valid_hop:
adjacency[self.hop_dis == hop] = 1
normalize_adjacency = normalize_digraph(adjacency)
if strategy == "uniform":
A = np.zeros((1, self.num_node, self.num_node))
A[0] = normalize_adjacency
self.A = A
elif strategy == "distance":
A = np.zeros((len(valid_hop), self.num_node, self.num_node))
for i, hop in enumerate(valid_hop):
A[i][self.hop_dis == hop] = normalize_adjacency[self.hop_dis ==
hop]
self.A = A
elif strategy == "spatial":
A = []
for hop in valid_hop:
a_root = np.zeros((self.num_node, self.num_node))
a_close = np.zeros((self.num_node, self.num_node))
a_further = np.zeros((self.num_node, self.num_node))
for i in range(self.num_node):
for j in range(self.num_node):
if self.hop_dis[j, i] == hop:
if self.hop_dis[j, self.center] == self.hop_dis[
i, self.center]:
a_root[j, i] = normalize_adjacency[j, i]
elif self.hop_dis[j, self.center] > self.hop_dis[
i, self.center]:
a_close[j, i] = normalize_adjacency[j, i]
else:
a_further[j, i] = normalize_adjacency[j, i]
if hop == 0:
A.append(a_root)
else:
A.append(a_root + a_close)
A.append(a_further)
A = np.stack(A)
self.A = A
else:
raise NotImplementedError(f"Strategy \"{strategy}\" not supported. Please choose from "
f"[\"uniform\", \"distance\", \"spatial\"].")
def get_num_node(self):
"""
Get the number of nodes in the graph.
Returns:
int: The number of nodes in the graph.
"""
return self.num_node
def get_num_person(self):
"""
Get the number of persons in the graph.
Returns:
int: The number of persons in the graph.
"""
return self.num_person
def get_seq_length(self):
"""
Get the sequence length.
Returns:
int: The sequence length.
"""
return self.seq_length
class ConvTemporalGraphical(nn.Module):
"""
The basic module for applying a graph convolution.
Args:
input_channels (int): Number of channels in the input sequence data
output_channels (int): Number of channels produced by the convolution
kernel_size (int): Size of the graph convolving kernel
t_kernel_size (int): Size of the temporal convolving kernel
t_stride (int, optional): Stride of the temporal convolution. Default: 1
t_padding (int, optional): Temporal zero-padding added to both sides of
the input. Default: 0
t_dilation (int, optional): Spacing between temporal kernel elements.
Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output.
Default: ``True``
Shape:
- Input[0]: Input graph sequence in :math:`(N, input_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Output graph sequence in :math:`(N, output_channels, T_{out}, V)` format
- Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self,
input_channels,
output_channels,
kernel_size,
t_kernel_size=1,
t_stride=1,
t_padding=0,
t_dilation=1,
bias=True):
"""
Initializes a module for graph convolution.
This module is a basic unit for applying a convolution operation on graph-structured data. It involves
both spatial convolution (i.e., convolution on graph) and temporal convolution (i.e., convolution
across time dimension).
"""
super().__init__()
self.kernel_size = kernel_size
self.conv = nn.Conv2d(input_channels,
output_channels * kernel_size,
kernel_size=(t_kernel_size, 1),
padding=(t_padding, 0),
stride=(t_stride, 1),
dilation=(t_dilation, 1),
bias=bias)
def forward(self, x, A):
"""
Apply forward propagation.
Args:
x (torch.Tensor): The input graph sequence. It has a shape of :math:`(N, input_channels, T_{in}, V)`.
A (torch.Tensor): The adjacency matrix of the graph. It has a shape of :math:`(K, V, V)`.
Returns:
torch.Tensor: The output graph sequence. It has a shape of :math:`(N, output_channels, T_{out}, V)`.
torch.Tensor: The adjacency matrix of the graph for output data. It has a shape of :math:`(K, V, V)`.
"""
assert A.size(0) == self.kernel_size, f"A.size(0) {A.size(0)} does not match "\
f"self.kernel_size {self.kernel_size}."
x = self.conv(x)
n, kc, t, v = x.size()
x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v)
x = torch.einsum("nkctv,kvw->nctw", (x, A))
return x.contiguous(), A
class ST_GCN_Block(nn.Module):
"""
Applies a spatial temporal graph convolution over an input graph sequence.
Args:
input_channels (int): Number of channels in the input sequence data
output_channels (int): Number of channels produced by the convolution
kernel_size (tuple): Size of the temporal convolving kernel and graph convolving kernel
stride (int, optional): Stride of the temporal convolution. Default: 1
dropout (int, optional): Dropout rate of the final output. Default: 0
residual (bool, optional): If ``True``, applies a residual mechanism. Default: ``True``
Shape:
- Input[0]: Input graph sequence in :math:`(N, input_channels, T_{in}, V)` format
- Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format
- Output[0]: Output graph sequence in :math:`(N, output_channels, T_{out}, V)` format
- Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V)` format
where
:math:`N` is a batch size,
:math:`K` is the spatial kernel size, as :math:`K == kernel_size[1]`,
:math:`T_{in}/T_{out}` is a length of input/output sequence,
:math:`V` is the number of graph nodes.
"""
def __init__(self,
input_channels,
output_channels,
kernel_size,
stride=1,
dropout=0,
residual=True):
"""
Initializes a module for spatial temporal graph convolution.
This module is a basic unit for applying a convolution operation on graph-structured data with consideration
of both spatial and temporal information.
"""
super().__init__()
assert len(kernel_size) == 2, f"len(kernel_size) should be 2. Got {len(kernel_size)}."
assert kernel_size[0] % 2 == 1, "kernel_size[0] should be odd. Got even."
padding = ((kernel_size[0] - 1) // 2, 0)
self.gcn = ConvTemporalGraphical(input_channels, output_channels,
kernel_size[1])
self.tcn = nn.Sequential(
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
nn.Conv2d(
output_channels,
output_channels,
(kernel_size[0], 1),
(stride, 1),
padding,
),
nn.BatchNorm2d(output_channels),
nn.Dropout(dropout, inplace=True),
)
if not residual:
self.residual = zero
elif (input_channels == output_channels) and (stride == 1):
self.residual = iden
else:
self.residual = nn.Sequential(
nn.Conv2d(input_channels,
output_channels,
kernel_size=1,
stride=(stride, 1)),
nn.BatchNorm2d(output_channels),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, A):
"""
Apply forward propagation.
Args:
x (torch.Tensor): The input graph sequence. It has a shape of :math:`(N, input_channels, T_{in}, V)`.
A (torch.Tensor): The adjacency matrix of the graph. It has a shape of :math:`(K, V, V)`.
Returns:
torch.Tensor: The output graph sequence. It has a shape of :math:`(N, output_channels, T_{out}, V)`.
torch.Tensor: The adjacency matrix of the graph for output data. It has a shape of :math:`(K, V, V)`.
"""
res = self.residual(x)
x, A = self.gcn(x, A)
x = self.tcn(x) + res
return self.relu(x), A
class ST_GCN(nn.Module):
"""
Spatial temporal graph convolutional networks.
Args:
input_channels (int): Number of channels in the input data
num_classes (int): Number of classes for the classification task
graph_layout (str): The layout of the graph
graph_strategy (str): The strategy of the graph
edge_importance_weighting (bool): If ``True``, adds a learnable
importance weighting to the edges of the graph
**kwargs (optional): Other parameters for graph convolution units
Shape:
- Input: :math:`(N, input_channels, T_{in}, V_{in}, M_{in})`
- Output: :math:`(N, num_classes)` where
:math:`N` is a batch size,
:math:`T_{in}` is a length of input sequence,
:math:`V_{in}` is the number of graph nodes,
:math:`M_{in}` is the number of instance in a frame.
"""
def __init__(self,
input_channels,
num_classes,
graph_layout,
graph_strategy,
edge_importance_weighting=True,
data_bn=True,
**kwargs):
"""
Initializes the spatial-temporal graph convolution network.
"""
super().__init__()
# load graph
self.graph = Graph(layout=graph_layout, strategy=graph_strategy)
A = torch.tensor(self.graph.A,
dtype=torch.float32,
requires_grad=False)
self.register_buffer("A", A)
# build networks
spatial_kernel_size = A.size(0)
temporal_kernel_size = 9
kernel_size = (temporal_kernel_size, spatial_kernel_size)
self.data_bn = nn.BatchNorm1d(input_channels *
A.size(1)) if data_bn else iden
kwargs = {k: v for k, v in kwargs.items() if k != "model_type"}
kwargs0 = {k: v for k, v in kwargs.items() if k != "dropout"}
self.st_gcn_networks = nn.ModuleList((
ST_GCN_Block(input_channels, 64, kernel_size, 1, residual=False, **kwargs0),
ST_GCN_Block(64, 64, kernel_size, 1, **kwargs),
ST_GCN_Block(64, 64, kernel_size, 1, **kwargs),
ST_GCN_Block(64, 64, kernel_size, 1, **kwargs),
ST_GCN_Block(64, 128, kernel_size, 2, **kwargs),
ST_GCN_Block(128, 128, kernel_size, 1, **kwargs),
ST_GCN_Block(128, 128, kernel_size, 1, **kwargs),
ST_GCN_Block(128, 256, kernel_size, 2, **kwargs),
ST_GCN_Block(256, 256, kernel_size, 1, **kwargs),
ST_GCN_Block(256, 256, kernel_size, 1, **kwargs),
))
# initialize parameters for edge importance weighting
if edge_importance_weighting:
self.edge_importance = nn.ParameterList([
nn.Parameter(torch.ones(self.A.size()))
for i in self.st_gcn_networks
])
else:
self.edge_importance = [1] * len(self.st_gcn_networks)
# fcn for prediction
self.fcn = nn.Conv2d(256, num_classes, kernel_size=1)
def forward(self, x):
"""
Apply forward propagation.
Args:
x (torch.Tensor): The input graph sequence. It has a shape of :math:`(N, input_channels, T_{in}, V_{in}, M_{in})`.
Returns:
torch.Tensor: The output sequence. It has a shape of :math:`(N, num_classes)`.
"""
# data normalization
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
# forward
for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
x, _ = gcn(x, self.A * importance)
# global pooling
x_size = [int(s) for s in x.size()[2:]]
x = functional.avg_pool2d(x, x_size)
x = x.view(N, M, -1, 1, 1).mean(dim=1)
# prediction
x = self.fcn(x)
x = x.view(x.size(0), -1)
return x
def extract_feature(self, x):
"""
Extract features from the input.
Args:
x (torch.Tensor): The input graph sequence. It has a shape of :math:`(N, input_channels, T_{in}, V_{in}, M_{in})`.
Returns:
torch.Tensor: The output sequence. It has a shape of :math:`(N, num_classes, T_{out}, V_{out}, M_{out})`.
torch.Tensor: The extracted feature from the input. It has a shape of :math:`(N, C_{out}, T_{out}, V_{out}, M_{out})`.
"""
# data normalization
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous()
x = x.view(N * M, V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T)
x = x.permute(0, 1, 3, 4, 2).contiguous()
x = x.view(N * M, C, T, V)
# forwad
for gcn, importance in zip(self.st_gcn_networks, self.edge_importance):
x, _ = gcn(x, self.A * importance)
_, c, t, v = x.size()
feature = x.view(N, M, c, t, v).permute(0, 2, 3, 4, 1)
# prediction
x = self.fcn(x)
output = x.view(N, M, -1, t, v).permute(0, 2, 3, 4, 1)
return output, feature
def st_gcn(pretrained_weights,
input_channels,
num_classes,
graph_layout,
graph_strategy,
edge_importance_weighting=True,
data_bn=True,
**kwargs):
"""
Constructs an ST-GCN (Spatial Temporal Graph Convolutional Networks) model.
Args:
pretrained_weights (torch.nn.Module): A PyTorch module with pretrained weights.
If provided, these weights are loaded into the model.
input_channels (int): Number of channels in the input data.
num_classes (int): Number of classes for the classification task.
graph_layout (str): The layout of the graph.
graph_strategy (str): The strategy of the graph.
edge_importance_weighting (bool, optional): If ``True``, adds a learnable
importance weighting to the edges of the graph. Default: ``True``.
data_bn (bool, optional): If ``True``, applies Batch Normalization on the input data. Default: ``True``.
**kwargs (optional): Other parameters for graph convolution units.
Returns:
model (ST_GCN): An ST-GCN model configured with the given parameters and weights.
"""
model = ST_GCN(input_channels=input_channels, num_classes=num_classes,
graph_layout=graph_layout, graph_strategy=graph_strategy,
edge_importance_weighting=edge_importance_weighting,
data_bn=data_bn, **kwargs)
if pretrained_weights:
model.load_state_dict(pretrained_weights)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/model/st_gcn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inferencer."""
import torch
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import data_to_device
class Inferencer():
"""
Pytorch model inferencer.
This class takes a PyTorch model, moves it to GPU for execution, and provides a method for making
inferences with the model on given data. The class optionally returns probabilities if 'ret_prob' is set to True.
Attributes:
model (torch.nn.Module): The PyTorch model to use for inference.
ret_prob (bool): If True, the 'inference' method will return probabilities instead of class IDs.
"""
def __init__(self, model, ret_prob=False):
"""
Initialize Inferencer with a PyTorch model.
Args:
model (torch.nn.Module): The PyTorch model to use for inference.
ret_prob (bool, optional): If True, the 'inference' method will return probabilities instead of
class IDs. Defaults to False.
"""
self.model = model
self.model.eval()
self.model.cuda()
self.ret_prob = ret_prob
def inference(self, data):
"""
Perform inference on the given data.
The data is moved to GPU and passed through the model for inference. If 'ret_prob' is True, softmax
is applied to the model output to get probabilities, and these probabilities are returned. Otherwise,
the IDs of the classes with the highest scores are returned.
Args:
data (torch.Tensor): A tensor containing the data to perform inference on.
Returns:
numpy.ndarray: If 'ret_prob' is True, an array of probabilities. Otherwise, an array of class IDs.
"""
cuda_data = data_to_device(data)
cls_scores = self.model(cuda_data)
if self.ret_prob:
prob = torch.softmax(cls_scores, dim=1)
prob = prob.detach().cpu().numpy()
return prob
pred_id = torch.argmax(cls_scores, dim=1)
pred_id = pred_id.cpu().numpy()
return pred_id
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/inference/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose classification inference module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/inference/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose classification dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/open-mmlab/mmskeleton
# Copyright 2019 OpenMMLAB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data feeder for loading skeleton sequences."""
import numpy as np
import pickle
import random
from torch.utils.data import Dataset
def auto_pad(data_numpy, size, random_pad=False):
"""
Apply padding to the numpy data.
This function checks if the temporal dimension of the input data (second dimension) is smaller than the target size.
If it is, the function pads the data up to the target size. The padding could be applied from the beginning (left padding)
or randomly within the range, depending on the 'random_pad' flag.
Args:
data_numpy (np.ndarray): The input data of shape (C, T, V, M), where C is the number of channels, T is
the temporal dimension, V is the number of vertices, and M is another dimension (e.g., batch size).
size (int): The target size for the temporal dimension.
random_pad (bool, optional): If True, padding is applied at a random position within the range.
If False, padding is applied from the beginning. Defaults to False.
Returns:
np.ndarray: The padded data. If the temporal dimension of the input data is equal to or larger than the target size,
the input data is returned as is.
"""
C, T, V, M = data_numpy.shape
if T < size:
begin = random.randint(0, size - T) if random_pad else 0
data_numpy_padded = np.zeros((C, size, V, M), dtype=data_numpy.dtype)
data_numpy_padded[:, begin:begin + T, :, :] = data_numpy
return data_numpy_padded
return data_numpy
def random_choose(data_numpy, size, enable_auto_pad=True):
"""
Randomly select a clip from the input data.
This function checks the temporal dimension of the input data. If it's equal to the target size, the function
returns the original data. If it's smaller than the target size, the function either pads the data to the
target size or returns the original data based on the 'enable_auto_pad' flag. If the temporal dimension of
the input data is greater than the target size, the function will randomly select a portion of the data
of size 'size' and return it.
Args:
data_numpy (np.ndarray): The input data of shape (C, T, V, M), where C is the number of channels, T is
the temporal dimension, V is the number of vertices, and M is another dimension (e.g., batch size).
size (int): The target size for the temporal dimension.
enable_auto_pad (bool, optional): If True, and if T < size, padding is applied using the 'auto_pad' function.
If False, the original data is returned as is. Defaults to True.
Returns:
np.ndarray: The data clip with the temporal dimension of size 'size'. If the temporal dimension of the
input data is smaller than 'size' and 'enable_auto_pad' is False, the original data is returned.
"""
# input: C,T,V,M
data_shape = data_numpy.shape
T = data_shape[1]
if T == size:
return data_numpy
if T < size:
if enable_auto_pad:
return auto_pad(data_numpy, size, random_pad=True)
return data_numpy
begin = random.randint(0, T - size)
return data_numpy[:, begin:begin + size, :, :]
def random_move(data_numpy,
angle_candidate=[-10., -5., 0., 5., 10.],
scale_candidate=[0.9, 1.0, 1.1],
transform_candidate=[-0.2, -0.1, 0.0, 0.1, 0.2],
move_time_candidate=[1]):
"""
Randomly manipulate the coordinates of the input data.
This function randomly manipulates the coordinates of the input data by applying rotations, scaling, and transformations.
The angle of rotation, scale factor, and the magnitude of the transformation are randomly chosen from the
corresponding candidate lists. The manipulation is applied to the spatial dimension of the data.
Args:
data_numpy (np.ndarray): The input data of shape (C, T, V, M), where C is the number of channels, T is
the temporal dimension, V is the number of vertices, and M is another dimension (e.g., batch size).
angle_candidate (list, optional): List of possible rotation angles in degrees. Defaults to [-10., -5., 0., 5., 10.].
scale_candidate (list, optional): List of possible scaling factors. Defaults to [0.9, 1.0, 1.1].
transform_candidate (list, optional): List of possible translation magnitudes. Defaults to [-0.2, -0.1, 0.0, 0.1, 0.2].
move_time_candidate (list, optional): List of possible 'move times' determining the granularity of the transformation over time. Defaults to [1].
Returns:
np.ndarray: The manipulated data with the same shape as the input data.
"""
# input: C,T,V,M
data_shape = data_numpy.shape
T = data_shape[1]
V = data_shape[2]
M = data_shape[3]
move_time = random.choice(move_time_candidate)
node = np.arange(0, T, T * 1.0 / move_time).round().astype(int)
node = np.append(node, T)
num_node = len(node)
A = np.random.choice(angle_candidate, num_node)
S = np.random.choice(scale_candidate, num_node)
T_x = np.random.choice(transform_candidate, num_node)
T_y = np.random.choice(transform_candidate, num_node)
a = np.zeros(T)
s = np.zeros(T)
t_x = np.zeros(T)
t_y = np.zeros(T)
# linspace
for i in range(num_node - 1):
a[node[i]:node[i + 1]] = np.linspace(
A[i], A[i + 1], node[i + 1] - node[i]) * np.pi / 180
s[node[i]:node[i + 1]] = np.linspace(S[i], S[i + 1],
node[i + 1] - node[i])
t_x[node[i]:node[i + 1]] = np.linspace(T_x[i], T_x[i + 1],
node[i + 1] - node[i])
t_y[node[i]:node[i + 1]] = np.linspace(T_y[i], T_y[i + 1],
node[i + 1] - node[i])
theta = np.array([[np.cos(a) * s, -np.sin(a) * s],
[np.sin(a) * s, np.cos(a) * s]])
# perform transformation
for i_frame in range(T):
xy = data_numpy[0:2, i_frame, :, :]
new_xy = np.dot(theta[:, :, i_frame], xy.reshape(2, -1))
new_xy[0] += t_x[i_frame]
new_xy[1] += t_y[i_frame]
data_numpy[0:2, i_frame, :, :] = new_xy.reshape(2, V, M)
return data_numpy
class SkeletonFeeder(Dataset):
"""
Feeder for skeleton-based action recognition.
This feeder loads skeleton sequences and their corresponding labels from given paths and applies specified
data processing methods such as random choosing, moving, and padding. It inherits from the PyTorch Dataset class.
Args:
data_path (str): The path to the '.npy' data. The data should have the shape (N, C, T, V, M).
label_path (str): The path to the label data.
label_map (dict): A dictionary mapping labels to their corresponding indices.
random_choose (bool, optional): If True, a portion of the input sequence is randomly chosen for each sample. Defaults to False.
random_move (bool, optional): If True, the input sequence is randomly moved for each sample. Defaults to False.
window_size (int, optional): The length of the output sequence. If it is negative, the whole sequence is used. Defaults to -1.
debug (bool, optional): If True, only the first 100 samples are used. Defaults to False.
mmap (bool, optional): If True, memory-map the loaded data. Useful when the data is too large to fit into memory. Defaults to True.
Attributes:
data (np.ndarray): The loaded skeleton sequences of shape (N, C, T, V, M).
label (list): The labels corresponding to the skeleton sequences.
sample_name (list): The names of the skeleton sequence samples.
N, C, T, V, M (int): The dimensions of the skeleton sequence data.
"""
def __init__(self,
data_path,
label_path,
label_map,
random_choose=False,
random_move=False,
window_size=-1,
debug=False,
mmap=True):
"""
Initialize a skeleton feeder.
"""
self.label = None
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.label_map = label_map
self.random_choose = random_choose
self.random_move = random_move
self.window_size = window_size
self.load_data(mmap)
def load_data(self, mmap):
"""
Load skeleton sequences and their corresponding labels.
The data is loaded from the paths specified in the constructor. The sequences are loaded either normally or
as memory-mapped based on the 'mmap' argument. If 'debug' is True, only the first 100 samples are loaded.
Args:
mmap (bool): If True, memory-map the loaded data.
"""
# data: N C T V M
# load label
if self.label_path:
with open(self.label_path, 'rb') as f:
self.sample_name, self.label = pickle.load(f)
# load data
if mmap:
self.data = np.load(self.data_path, mmap_mode='r')
else:
self.data = np.load(self.data_path)
if self.debug:
self.label = self.label[0:100]
self.data = self.data[0:100]
self.sample_name = self.sample_name[0:100]
self.N, self.C, self.T, self.V, self.M = self.data.shape
def __len__(self):
"""
Return the number of sequences.
Returns:
int: The number of skeleton sequences.
"""
return self.N
def __getitem__(self, index):
"""
Get data and label at an index.
This method retrieves the skeleton sequence and its corresponding label at the specified index. It applies
the data processing methods specified in the constructor (random choosing, moving, and padding).
Args:
index (int): The index of the sequence and label to retrieve.
Returns:
tuple: A tuple containing the skeleton sequence and its corresponding label.
"""
# get data
data_numpy = np.array(self.data[index])
label = -1
if self.label:
label = self.label[index]
# processing
if self.random_choose:
data_numpy = random_choose(data_numpy, self.window_size)
elif self.window_size > 0:
data_numpy = auto_pad(data_numpy, self.window_size)
if self.random_move:
data_numpy = random_move(data_numpy)
return data_numpy, label
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/dataloader/skeleton_feeder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build torch data loader."""
from torch.utils.data import DataLoader
from nvidia_tao_pytorch.cv.pose_classification.dataloader.skeleton_feeder import SkeletonFeeder
def build_dataloader(data_path, label_map, label_path=None,
random_choose=False, random_move=False,
window_size=-1, debug=False, mmap=True,
batch_size=1, shuffle=False,
num_workers=4, pin_mem=False):
"""
Build a torch DataLoader from a given data path and label map.
This function first constructs a SkeletonFeeder dataset from the provided parameters. It then uses this dataset
to build a DataLoader object, which is a generator that allows for iteration over batches of the dataset.
Args:
data_path (str): Path to the data in a NumPy array.
label_map (dict): Dictionary mapping labels to their corresponding indices.
label_path (str, optional): Path to the labels in a pickle file. Defaults to None.
random_choose (bool, optional): Specifies whether to randomly choose a portion of the input sequence. Defaults to False.
random_move (bool, optional): Specifies whether to randomly move the input sequence. Defaults to False.
window_size (int, optional): The length of the output sequence. -1 means the same as original length.
debug (bool, optional): If True, the function will run in debug mode. Defaults to False.
mmap (bool, optional): If True, memory-mapping mode is used for loading data. Defaults to True.
batch_size (int, optional): The number of samples per batch. Defaults to 1.
shuffle (bool, optional): If True, data will be reshuffled at every epoch. Defaults to False.
num_workers (int, optional): The number of subprocesses to use for data loading. Defaults to 4.
pin_mem (bool, optional): If True, data loader will copy Tensors into CUDA pinned memory before returning them. Defaults to False.
Returns:
torch.utils.data.DataLoader: A DataLoader instance with specified dataset and parameters.
"""
dataset = SkeletonFeeder(data_path=data_path,
label_path=label_path,
label_map=label_map,
random_choose=random_choose,
random_move=random_move,
window_size=window_size,
debug=debug,
mmap=mmap)
dataloader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_mem)
return dataloader
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/pose_classification/dataloader/build_data_loader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet root module"""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional, List
from dataclasses import dataclass, field
from omegaconf import MISSING
from nvidia_tao_pytorch.pruning.torch_pruning.prune_config import PruneConfig
@dataclass
class OCRNetModelConfig:
"""OCRNet model config."""
TPS: bool = False # Thin-Plate-Spline interpolation
num_fiducial: int = 20 # number of keypoints for TPS
backbone: str = "ResNet" # [ResNet]
feature_channel: int = 512
sequence: str = "BiLSTM" # [BiLSTM]
hidden_size: int = 256
prediction: str = "CTC" # [Attn, CTC]
quantize: bool = False
input_width: int = 100
input_height: int = 32
input_channel: int = 1
@dataclass
class OptimConfig:
"""Optimizer config."""
name: str = "adadelta" # [adam, adadelta]
lr: float = 1.0 # default value = 1.0 for adadelta
momentum: float = 0.9
weight_decay: float = 5e-4
lr_scheduler: str = "MultiStep" # {AutoReduce, MultiStep}
lr_monitor: str = "val_loss" # {val_loss, train_loss}
patience: int = 1
min_lr: float = 1e-4
lr_steps: List[int] = field(default_factory=lambda: [15, 25])
lr_decay: float = 0.1
# TODO(tylerz): no augmentation from original implementation
@dataclass
class OCRNetAugmentationConfig:
"""Augmentation config."""
keep_aspect_ratio: bool = False
@dataclass
class OCRNetDatasetConfig:
"""Dataset config."""
train_dataset_dir: Optional[List[str]] = None
train_gt_file: Optional[str] = None
val_dataset_dir: Optional[str] = None
val_gt_file: Optional[str] = None
character_list_file: Optional[str] = None
max_label_length: int = 25 # Shall we check it with output feature length ?
batch_size: int = 32
workers: int = 8
augmentation: OCRNetAugmentationConfig = OCRNetAugmentationConfig()
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: List[str] = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "fp16"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class OCRNetGenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: Optional[str] = None
input_channel: int = 3
input_width: int = 100
input_height: int = 32
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class OCRNetTrainExpConfig:
"""Train experiment config."""
results_dir: Optional[str] = None
seed: int = 1111
# TODO(tylerz): Update to use torch.distributed.launch for multi gpu training.
gpu_ids: List[int] = field(default_factory=lambda: [0])
resume_training_checkpoint_path: Optional[str] = None
pretrained_model_path: Optional[str] = None
quantize_model_path: Optional[str] = None
optim: OptimConfig = OptimConfig()
num_epochs: int = 10
clip_grad_norm: float = 5.0 # default = 5.0 for adadelta
checkpoint_interval: int = 2
validation_interval: int = 1
distributed_strategy: str = "ddp"
@dataclass
class OCRNetInferenceExpConfig:
"""Inference experiment config."""
checkpoint: str = MISSING
trt_engine: Optional[str] = None
results_dir: Optional[str] = None
gpu_id: int = 0
inference_dataset_dir: str = MISSING
batch_size: int = 1
input_width: int = 100
input_height: int = 32
@dataclass
class OCRNetEvalExpConfig:
"""Evaluation experiment config."""
checkpoint: str = MISSING
trt_engine: Optional[str] = None
gpu_id: int = 0
test_dataset_dir: str = MISSING
test_dataset_gt_file: Optional[str] = None
results_dir: Optional[str] = None
batch_size: int = 1
input_width: int = 100
input_height: int = 32
@dataclass
class OCRNetExportExpConfig:
"""Export experiment config."""
checkpoint: str = MISSING
results_dir: Optional[str] = None
onnx_file: Optional[str] = None
gpu_id: int = 0
@dataclass
class OCRNetPruneExpConfig:
"""Prune experiment config."""
checkpoint: str = MISSING
results_dir: Optional[str] = None
pruned_file: Optional[str] = None
gpu_id: int = 0
prune_setting: PruneConfig = PruneConfig()
@dataclass
class OCRNetConvertDatasetExpConfig:
"""Convert_dataset experiment config."""
input_img_dir: str = MISSING
gt_file: str = MISSING
results_dir: Optional[str] = None
@dataclass
class ExperimentConfig:
"""Experiment config."""
model: OCRNetModelConfig = OCRNetModelConfig()
dataset: OCRNetDatasetConfig = OCRNetDatasetConfig()
train: OCRNetTrainExpConfig = OCRNetTrainExpConfig()
evaluate: OCRNetEvalExpConfig = OCRNetEvalExpConfig()
export: OCRNetExportExpConfig = OCRNetExportExpConfig()
inference: OCRNetInferenceExpConfig = OCRNetInferenceExpConfig()
prune: OCRNetPruneExpConfig = OCRNetPruneExpConfig()
dataset_convert: OCRNetConvertDatasetExpConfig = OCRNetConvertDatasetExpConfig()
gen_trt_engine: OCRNetGenTrtEngineExpConfig = OCRNetGenTrtEngineExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet utils module."""
import os
import pickle
import struct
import time
import tempfile
import logging
from eff.codec import encrypt_stream, decrypt_stream
import torch
import torch.nn.functional as F
from nltk.metrics.distance import edit_distance
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class CTCLabelConverter(object):
"""Convert between text-label and text-index for CTC."""
def __init__(self, character):
"""Initialize CTCLabelConverter.
Args:
character (str): A string containing the set of possible characters.
"""
# character (str): set of the possible characters.
dict_character = list(character)
self.dict = {}
for i, char in enumerate(dict_character):
# NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss
self.dict[char] = i + 1
self.character = ['[CTCblank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0)
def encode(self, text, batch_max_length=25):
"""Convert text-label into text-index.
Args:
text (list): text labels of each image. [batch_size]
batch_max_length (int): max length of text label in the batch. 25 by default
Return:
text (Torch.tensor): text index for CTCLoss. [batch_size, batch_max_length]
length (Torch.tensor): length of each text. [batch_size]
"""
length = [len(s) for s in text]
# The index used for padding (=0) would not affect the CTC loss calculation.
batch_text = torch.LongTensor(len(text), batch_max_length).fill_(0)
for i, t in enumerate(text):
text = list(t)
text = [self.dict[char] for char in text]
batch_text[i][:len(text)] = torch.LongTensor(text)
return (batch_text.to(device), torch.IntTensor(length).to(device))
def decode(self, text_index, length):
"""Convert text-index into text-label.
Args:
text_index (numpy.ndarray): the batch of predicted text_index.
length (list): the length of the predicted text.
Return:
list : the list of decoded text.
"""
texts = []
for index, l in enumerate(length):
t = text_index[index, :]
char_list = []
for i in range(l):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank.
char_list.append(self.character[t[i]])
text = ''.join(char_list)
texts.append(text)
return texts
class CTCLabelConverterForBaiduWarpctc(object):
"""Convert between text-label and text-index for baidu warpctc."""
def __init__(self, character):
"""Initialize CTCLabelConverterForBaiduWarpctc.
Args:
character (str): A string containing the set of possible characters.
"""
# character (str): set of the possible characters.
dict_character = list(character)
self.dict = {}
for i, char in enumerate(dict_character):
# NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss
self.dict[char] = i + 1
self.character = ['[CTCblank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0)
def encode(self, text, batch_max_length=25):
"""Convert text-label into text-index.
Args:
text (list): text labels of each image. [batch_size]
Return:
text (torch.Tensor): concatenated text index for CTCLoss.
[sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
length (torch.Tensor): length of each text. [batch_size]
"""
length = [len(s) for s in text]
text = ''.join(text)
text = [self.dict[char] for char in text]
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, text_index, length):
"""Convert text-index into text-label.
Args:
text_index (numpy.ndarray): the batch of predicted text_index.
length (list): the length of the predicted text.
Return:
list : the list of decoded text.
"""
texts = []
index = 0
for ll in length:
t = text_index[index:index + ll]
char_list = []
for i in range(ll):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])): # removing repeated characters and blank.
char_list.append(self.character[t[i]])
text = ''.join(char_list)
texts.append(text)
index += ll
return texts
class AttnLabelConverter(object):
"""Convert between text-label and text-index for Attention"""
def __init__(self, character):
"""Initialize AttnLabelConverter.
Args:
character (str): A string containing the set of possible characters.
"""
# character (str): set of the possible characters.
# [GO] for the start token of the attention decoder. [s] for end-of-sentence token.
list_token = ['[GO]', '[s]'] # ['[s]','[UNK]','[PAD]','[GO]']
list_character = list(character)
self.character = list_token + list_character
self.dict = {}
for i, char in enumerate(self.character):
# print(i, char)
self.dict[char] = i
def encode(self, text, batch_max_length=25):
"""Convert text-label into text-index.
Args:
text (list): text labels of each image. [batch_size]
batch_max_length (int): max length of text label in the batch. 25 by default
Return:
text (torch.Tensor): the input of attention decoder. [batch_size x (max_length+2)] +1 for [GO] token and +1 for [s] token.
text[:, 0] is [GO] token and text is padded with [GO] token after [s] token.
length (torch.Tensor): the length of output of attention decoder, which count [s] token also. [3, 7, ....] [batch_size]
"""
length = [len(s) + 1 for s in text] # +1 for [s] at end of sentence.
# batch_max_length = max(length) # this is not allowed for multi-gpu setting
batch_max_length += 1
# additional +1 for [GO] at first step. batch_text is padded with [GO] token after [s] token.
batch_text = torch.LongTensor(len(text), batch_max_length + 1).fill_(0)
for i, t in enumerate(text):
text = list(t)
text.append('[s]')
text = [self.dict[char] for char in text]
batch_text[i][1:1 + len(text)] = torch.LongTensor(text) # batch_text[:, 0] = [GO] token
return (batch_text.to(device), torch.IntTensor(length).to(device))
def decode(self, text_index, length):
"""Convert text-index into text-label.
Args:
text_index (numpy.ndarray): the batch of predicted text_index.
length (list): the length of the predicted text.
Return:
list : the list of decoded text.
"""
texts = []
for index, _ in enumerate(length):
text = ''.join([self.character[i] for i in text_index[index, :]])
texts.append(text)
return texts
class Averager(object):
"""Compute average for torch.Tensor, used for loss average."""
def __init__(self):
"""init."""
self.reset()
def add(self, v):
"""add."""
count = v.data.numel()
v = v.data.sum()
self.n_count += count
self.sum += v
def reset(self):
"""reset."""
self.n_count = 0
self.sum = 0
def val(self):
"""val."""
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def validation(model, criterion, evaluation_loader, converter, opt):
"""Performs validation or evaluation of a model.
Args:
model (torch.nn.Module): The model to be evaluated.
criterion (torch.nn.Module): The loss function to be used.
evaluation_loader (torch.utils.data.DataLoader): The data loader for the evaluation dataset.
converter (CTCLabelConverter): The converter for converting between text-label and text-index.
opt (argparse.Namespace): The command-line arguments.
Returns:
float: The average loss over the evaluation dataset.
float: The accuracy over the evaluation dataset.
float: The normalized edit distance over the evaluation dataset.
list: A list of predicted transcriptions for each sample in the evaluation dataset.
list: A list of confidence scores for each sample in the evaluation dataset.
list: A list of ground truth transcriptions for each sample in the evaluation dataset.
float: The total inference time for the evaluation dataset.
int: The total number of samples in the evaluation dataset.
"""
n_correct = 0
norm_ED = 0
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
for _, (image_tensors, labels) in enumerate(evaluation_loader):
batch_size = image_tensors.size(0)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss, length_for_loss = converter.encode(labels, batch_max_length=opt.batch_max_length)
start_time = time.time()
if 'CTC' in opt.Prediction:
preds = model(image, text_for_pred)
forward_time = time.time() - start_time
# Calculate evaluation loss for CTC deocder.
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
# permute 'preds' to use CTCloss format
if opt.baiduCTC:
cost = criterion(preds.permute(1, 0, 2), text_for_loss, preds_size, length_for_loss) / batch_size
else:
cost = criterion(preds.log_softmax(2).permute(1, 0, 2), text_for_loss, preds_size, length_for_loss)
# Select max probabilty (greedy decoding) then decode index to character
if opt.baiduCTC:
_, preds_index = preds.max(2)
preds_index = preds_index.view(-1)
else:
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index.data, preds_size.data)
else:
preds = model(image, text_for_pred, is_train=False)
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
infer_time += forward_time
valid_loss_avg.add(cost)
# calculate accuracy & confidence score
preds_prob = F.softmax(preds, dim=2)
preds_max_prob, _ = preds_prob.max(dim=2)
confidence_score_list = []
for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):
if 'Attn' in opt.Prediction:
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# To evaluate 'case sensitive model' with alphanumeric and case insensitve setting.
# if opt.sensitive and opt.data_filtering_off:
# pred = pred.lower()
# gt = gt.lower()
# alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
# out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
# pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
# gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred == gt:
n_correct += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt) == 0 or len(pred) == 0:
norm_ED += 0
elif len(gt) > len(pred):
norm_ED += 1 - edit_distance(pred, gt) / len(gt)
else:
norm_ED += 1 - edit_distance(pred, gt) / len(pred)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
except Exception:
confidence_score = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list.append(confidence_score)
# print(pred, gt, pred==gt, confidence_score)
accuracy = n_correct / float(length_of_data) * 100
norm_ED = norm_ED / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return valid_loss_avg.val(), accuracy, norm_ED, preds_str, confidence_score_list, labels, infer_time, length_of_data # pylint: disable=undefined-loop-variable
def encrypt_onnx(tmp_file_name, output_file_name, key):
"""Encrypts an ONNX model.
Args:
tmp_file_name (str): The name of the temporary file containing the ONNX model.
output_file_name (str): The name of the output file to write the encrypted ONNX model to.
key (str): The passphrase to use for encryption.
"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
# set the input name magic number
open_encoded_file.write(struct.pack("<i", 0))
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def encrypt_pytorch(tmp_file_name, output_file_name, key):
"""Encrypts a PyTorch model.
Args:
tmp_file_name (str): The name of the temporary file containing the PyTorch model.
output_file_name (str): The name of the output file to write the encrypted PyTorch model to.
key (str): The passphrase to use for encryption.
"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def save_checkpoint(state, filename, key):
"""Saves a PyTorch checkpoint.
Args:
state (dict): The state dictionary to save.
filename (str): The name of the output file to write the encrypted checkpoint to.
key (str): The passphrase to use for encryption.
"""
handle, temp_name = tempfile.mkstemp(".tlt")
os.close(handle)
torch.save(state, temp_name)
encrypt_pytorch(temp_name, filename, key)
os.remove(temp_name)
def decrypt_pytorch(input_file_name, output_file_name, key):
"""Decrypts a TAO model to a PyTorch model.
Args:
input_file_name (str): The name of the input file containing the encrypted TAO model.
output_file_name (str): The name of the output file to write the decrypted PyTorch model to.
key (str): The passphrase to use for decryption.
"""
with open(input_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
decrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def load_checkpoint(model_path, key, to_cpu=False):
"""Loads a saved PyTorch checkpoint.
Args:
model_path (str): The path to the saved checkpoint file.
key (str): The passphrase to use for decryption.
to_cpu (bool, optional): Whether to load the model onto the CPU. Defaults to False.
Returns:
dict: The loaded state dictionary.
"""
loc_type = torch.device('cpu') if to_cpu else None
if model_path.endswith(".tlt"):
handle, temp_name = tempfile.mkstemp(".pth")
os.close(handle)
decrypt_pytorch(model_path, temp_name, key)
loaded_state = torch.load(temp_name, map_location=loc_type)
os.remove(temp_name)
else:
loaded_state = torch.load(model_path, map_location=loc_type)
if isinstance(loaded_state, dict):
if "whole_model" in loaded_state:
loaded_state = pickle.loads(loaded_state["whole_model"])
return loaded_state
def create_logger(log_file=None, rank=0, log_level=logging.INFO):
"""Creates a logger object.
Args:
log_file (str, optional): The name of the log file to write to. Defaults to None.
rank (int, optional): The rank of the process. Defaults to 0.
log_level (int, optional): The logging level. Defaults to logging.INFO.
Returns:
logging.Logger: The logger object.
"""
logger = logging.getLogger(__name__)
logger.setLevel(log_level if rank == 0 else 'ERROR')
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(message)s')
if log_file is not None:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setLevel(log_level if rank == 0 else 'ERROR')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/utils/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet script module"""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Export OCRNet script.
"""
import os
import argparse
import tempfile
import onnx_graphsurgeon as gs
import onnx
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.ocrnet.config.default_config import ExperimentConfig
MODEL_HEIGHT_STRIDE = 16
@gs.Graph.register()
def replace_with_avgpool2d(self, inputs, outputs, kernel_shape,
pads=[0, 0, 0, 0], strides=[1, 1]):
"""helper function to replace adaptive pool to avgpool2d.
Args:
inputs (torch.Tensor): The input onnx node.
outputs (torch.Tensor): The output onnx node.
kernel_shape (tuple): A tuple containing the height and width of the kernel.
pads (list, optional): A list containing the padding values for the top, bottom, left, and right sides of the input. Defaults to [0, 0, 0, 0].
strides (list, optional): A list containing the stride values for the height and width of the kernel. Defaults to [1, 1].
"""
# Disconnect output nodes of all input tensors
for inp in inputs:
inp.outputs.clear()
# Disconnet input nodes of all output tensors
for out in outputs:
out.inputs.clear()
attrs = {"ceil_mode": 0, "kernel_shape": kernel_shape, "pads": pads, "strides": strides}
# Insert the new node.
return self.layer(op="AveragePool", attrs=attrs,
inputs=inputs, outputs=outputs)
def export(opt):
"""Export the model according to option."""
# @TODO(tylerz): Lazy import for correctly setting CUDA_VISIBLE_DEVICES
import torch
import torch.utils.data
from nvidia_tao_pytorch.cv.ocrnet.utils.utils import (CTCLabelConverter,
AttnLabelConverter,
load_checkpoint)
from nvidia_tao_pytorch.cv.ocrnet.model.model import Model, ExportModel
from pytorch_quantization import nn as quant_nn
quant_nn.TensorQuantizer.use_fb_fake_quant = True
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
# load model
print('loading pretrained model from %s' % opt.saved_model)
ckpt = load_checkpoint(opt.saved_model, key=opt.encryption_key, to_cpu=True)
if not isinstance(ckpt, Model):
model = Model(opt)
state_dict = ckpt
model.load_state_dict(state_dict)
model = ExportModel(ocr_model=model, prediction_type=opt.Prediction)
else:
for name, m in ckpt.named_modules():
if "quantizer" in name:
m.use_fb_fake_quant = True
model = ExportModel(ocr_model=ckpt, prediction_type=opt.Prediction)
model = model.to(device)
input_names = ["input"]
output_names = ["output_id", "output_prob"]
dummy_input = (torch.randn(1, opt.input_channel, opt.imgH, opt.imgW).to(device),
torch.LongTensor(1, opt.batch_max_length + 1).fill_(0).to(device))
dynamic_axes = {"input": {0: "batch"}, "output_id": {0: "batch"}, "output_prob": {0: "batch"}}
os_handle, tmp_file_name = tempfile.mkstemp()
os.close(os_handle)
output_file = tmp_file_name
torch.onnx.export(model,
dummy_input,
output_file,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH,
verbose=False,
opset_version=16,
do_constant_folding=True
)
graph = gs.import_onnx(onnx.load(output_file))
for node in graph.nodes:
# if node.op == 'grid_sampler':
# #cleanup 3 unused inputs
# for i in [4, 3, 2]:
# node.i(i, 0).outputs.clear()
# del node.inputs[i]
if node.op == "adaptive_avg_pool2d":
feature_height = int(opt.imgH / MODEL_HEIGHT_STRIDE) - 1
inp_tensor = [node.inputs[0]]
node.i(1, 0).outputs.clear()
del node.inputs[1]
oup_tensor = [node.outputs[0]]
graph.replace_with_avgpool2d(inp_tensor, oup_tensor, kernel_shape=[1, feature_height])
del node
graph.cleanup()
onnx.save(gs.export_onnx(graph), opt.output_file)
os.remove(tmp_file_name)
def init_configs(experiment_spec: ExperimentConfig):
"""Pass the yaml config to argparse.Namespace"""
parser = argparse.ArgumentParser()
opt, _ = parser.parse_known_args()
opt.encryption_key = experiment_spec.encryption_key
opt.output_file = experiment_spec.export.onnx_file
# 1. Init dataset params
dataset_config = experiment_spec.dataset
model_config = experiment_spec.model
opt.batch_max_length = dataset_config.max_label_length
opt.imgH = model_config.input_height
opt.imgW = model_config.input_width
opt.input_channel = model_config.input_channel
if model_config.input_channel == 3:
opt.rgb = True
else:
opt.rgb = False
# load character list:
# Don't convert the characters to lower case
with open(dataset_config.character_list_file, "r") as f:
characters = "".join([ch.strip() for ch in f.readlines()])
opt.character = characters
# 2. Init Model params
opt.saved_model = experiment_spec.export.checkpoint
if model_config.TPS:
opt.Transformation = "TPS"
else:
opt.Transformation = "None"
opt.FeatureExtraction = model_config.backbone
opt.SequenceModeling = model_config.sequence
opt.Prediction = model_config.prediction
opt.num_fiducial = model_config.num_fiducial
opt.output_channel = model_config.feature_channel
opt.hidden_size = model_config.hidden_size
opt.baiduCTC = False
# 4. Init for Device setting
os.environ["CUDA_VISIBLE_DEVICES"] = str(experiment_spec.export.gpu_id)
import torch
opt.num_gpu = torch.cuda.device_count()
return opt
def run_experiment(experiment_spec):
"""run experiment."""
opt = init_configs(experiment_spec)
# Set default output filename if the filename
# isn't provided over the command line.
if opt.output_file is None:
split_name = os.path.splitext(opt.saved_model)[0]
opt.output_file = "{}.etlt".format(split_name)
# Warn the user if an exported file already exists.
if os.path.exists(opt.output_file):
raise FileExistsError(f"Output file already exists at {opt.output_file}")
# Set status logging
if experiment_spec.export.results_dir is not None:
results_dir = experiment_spec.export.results_dir
else:
results_dir = os.path.join(experiment_spec.results_dir, "export")
experiment_spec.export.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file,
append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting OCRNet export"
)
export(opt)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
run_experiment(experiment_spec=cfg)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" a modified version of CRNN torch repository https://github.com/bgshih/crnn/blob/master/tool/create_dataset.py """
import os
import lmdb
import cv2
import numpy as np
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.path_utils import expand_path
from nvidia_tao_pytorch.cv.ocrnet.config.default_config import ExperimentConfig
def checkImageIsValid(imageBin):
"""Check if the image is valid.
Args:
imageBin : the encoded image data.
Returns:
bool : True if the image is valid else False.
"""
if imageBin is None:
return False
imageBuf = np.frombuffer(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
return True
def writeCache(env, cache):
"""Write the cache to LMDB
Args:
env (lmdb.Environment): the LMDB environment to save the content.
cache (dict): the content to be writed in LMDB.
"""
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k, v)
def createDataset(inputPath, gtFile, outputPath, checkValid=True):
"""Create LMDB dataset for training and evaluation.
Args:
inputPath (string): input folder path where starts imagePath
outputPath (string): LMDB output path
gtFile (string): list of image path and label
checkValid (bool): if true, check the validity of every image
"""
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
gtFile = expand_path(gtFile)
with open(gtFile, 'r', encoding='utf-8') as data:
datalist = data.readlines()
nSamples = len(datalist)
for i in range(nSamples):
imagePath, label = datalist[i].strip('\n').split('\t')
imagePath = expand_path(f"{inputPath}/{imagePath}")
# # only use alphanumeric data
# if re.search('[^a-zA-Z0-9]', label):
# continue
if not os.path.exists(imagePath):
print(f'{imagePath} does not exist')
continue
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
try:
if not checkImageIsValid(imageBin):
print(f'{imagePath} is not a valid image')
continue
except Exception:
print('error occured', i)
with open(outputPath + '/error_image_log.txt', 'a') as log:
log.write('%s-th image data occured error\n' % str(i))
continue
imageKey = 'image-%09d'.encode() % cnt
labelKey = 'label-%09d'.encode() % cnt
cache[imageKey] = imageBin
cache[labelKey] = label.encode()
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt - 1
cache['num-samples'.encode()] = str(nSamples).encode()
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment",
schema=ExperimentConfig)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
if cfg.dataset_convert.results_dir is not None:
results_dir = cfg.dataset_convert.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "dataset_convert", "lmdb")
cfg.dataset_convert.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file,
append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting OCRNet dataset_convert"
)
inputPath = expand_path(cfg.dataset_convert.input_img_dir)
createDataset(inputPath=inputPath,
gtFile=cfg.dataset_convert.gt_file,
outputPath=results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Dataset convert finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Dataset convert was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/scripts/dataset_convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Train OCRNet script.
"""
import os
import re
import random
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.ocrnet.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.ocrnet.model.pl_ocrnet import OCRNetModel
def run_experiment(experiment_spec: ExperimentConfig):
"""run experiment."""
if experiment_spec.train.results_dir is not None:
results_dir = experiment_spec.train.results_dir
else:
results_dir = os.path.join(experiment_spec.results_dir, "train")
experiment_spec.train.results_dir = results_dir
total_epochs = experiment_spec.train.num_epochs
os.makedirs(f'{results_dir}', exist_ok=True)
manual_seed = experiment_spec.train.seed
import torch
import torch.backends.cudnn as cudnn
import numpy as np
random.seed(manual_seed)
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed(manual_seed)
cudnn.benchmark = True
cudnn.deterministic = True
status_logger_callback = TAOStatusLogger(results_dir,
append=True,
num_epochs=total_epochs)
status_logging.set_status_logger(status_logger_callback.logger)
ocrnet_model = OCRNetModel(experiment_spec)
clip_grad = experiment_spec.train.clip_grad_norm
gpus_ids = experiment_spec.train.gpu_ids
distributed_strategy = None
if len(gpus_ids) > 1:
distributed_strategy = experiment_spec.train.distributed_strategy
val_inter = experiment_spec.train.validation_interval
trainer = Trainer(gpus=gpus_ids,
max_epochs=total_epochs,
check_val_every_n_epoch=val_inter,
default_root_dir=results_dir,
enable_checkpointing=False,
strategy=distributed_strategy,
accelerator='gpu',
num_sanity_val_steps=0,
gradient_clip_val=clip_grad)
ckpt_inter = experiment_spec.train.checkpoint_interval
ModelCheckpoint.FILE_EXTENSION = ".pth"
checkpoint_callback = ModelCheckpoint(every_n_epochs=ckpt_inter,
dirpath=results_dir,
save_on_train_epoch_end=True,
monitor=None,
save_top_k=-1,
filename='ocrnet_{epoch:03d}')
resume_ckpt = experiment_spec['train']['resume_training_checkpoint_path']
if resume_ckpt:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_ckpt}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_ckpt)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1 # make sure callback epoch matches resumed epoch
trainer.callbacks.append(status_logger_callback)
trainer.callbacks.append(checkpoint_callback)
trainer.fit(ocrnet_model)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
run_experiment(experiment_spec=cfg)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Prune OCRNet script.
"""
import os
import argparse
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.ocrnet.config.default_config import ExperimentConfig
import nvidia_tao_pytorch.pruning.torch_pruning as tp
def prune(opt):
"""Prune the the OCRNet according to option"""
# @TODO(tylerz): Lazy import for correctly setting CUDA_VISIBLE_DEVICES
import torch
import torch.utils.data
from nvidia_tao_pytorch.cv.ocrnet.utils.utils import (CTCLabelConverter,
AttnLabelConverter,
load_checkpoint)
from nvidia_tao_pytorch.cv.ocrnet.model.model import Model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
# load model
print('loading pretrained model from %s' % opt.saved_model)
ckpt = load_checkpoint(opt.saved_model, key=opt.encryption_key, to_cpu=True)
if not isinstance(ckpt, Model):
model = Model(opt)
state_dict = ckpt
model.load_state_dict(state_dict)
else:
model = ckpt
# print(model)
model = model.to(device)
model.eval()
num_params_before_pruning = tp.utils.count_params(model)
# 1. build dependency graph
dep_graph = tp.DependencyGraph()
dep_graph.build_dependency(model, example_inputs=(torch.randn([1, opt.input_channel, opt.imgH, opt.imgW]).to(device),
torch.LongTensor(1, opt.batch_max_length + 1).fill_(0).to(device)))
# 1.1 excluded layer @TODO(tylerz): only support for CTC now.
excluded_layers = list(model.modules())[-11:]
pruned_module = []
prunable_list = [torch.nn.Conv2d]
# 2. loop through the graph to execute the pruning:
if opt.prune_mode in ["amount", "threshold"]:
strategy = tp.strategy.LNStrategy(p=opt.p, mode=opt.prune_mode)
if opt.prune_mode == "amount":
th = opt.amount
else:
th = opt.threshold
for _, m in model.named_modules():
if isinstance(m, tuple(prunable_list)) and m not in excluded_layers and m not in pruned_module:
pruned_idxs = strategy(m.weight, amount=th, round_to=opt.granularity)
prune_func = tp.prune.prune_conv
pruning_plan = dep_graph.get_pruning_plan(m, prune_func, idxs=pruned_idxs)
if pruning_plan is not None:
pruning_plan.exec()
else:
continue
else: # experimental hybrid path
strategy = tp.strategy.CustomScoreStrategy()
global_thresh, module2scores = tp.utils.get_global_thresh(model, prune_ratio=opt.amount)
merged_sets = {}
# 2.1 find the merged set:
for _, m in model.named_modules():
if isinstance(m, torch.nn.Conv2d):
prune_func = tp.prune.prune_conv
merged_set = tp.dependency.find_merged_set(dep_graph.module_to_node[m], prune_func)
merged_sets[m] = merged_set
tp.utils.execute_custom_score_prune(model,
global_thresh=global_thresh,
module2scores=module2scores,
dep_graph=dep_graph,
granularity=opt.granularity,
excluded_layers=excluded_layers,
merged_sets=merged_sets)
num_params_after_pruning = tp.utils.count_params(model)
print(" Params: %s => %s" % (num_params_before_pruning, num_params_after_pruning))
encoded_output_file = opt.output_file
print(f"Pruned model is saved to {encoded_output_file}")
torch.save(model, encoded_output_file)
def init_configs(experiment_spec: ExperimentConfig):
"""Pass the yaml config to argparse.Namespace"""
parser = argparse.ArgumentParser()
opt, _ = parser.parse_known_args()
opt.encryption_key = experiment_spec.encryption_key
opt.output_file = experiment_spec.prune.pruned_file
# 1. Init dataset params
dataset_config = experiment_spec.dataset
model_config = experiment_spec.model
opt.batch_max_length = dataset_config.max_label_length
opt.imgH = model_config.input_height
opt.imgW = model_config.input_width
opt.input_channel = model_config.input_channel
if model_config.input_channel == 3:
opt.rgb = True
else:
opt.rgb = False
# load character list:
# Don't convert the characters to lower case
with open(dataset_config.character_list_file, "r") as f:
characters = "".join([ch.strip() for ch in f.readlines()])
opt.character = characters
# 2. Init Model params
opt.saved_model = experiment_spec.prune.checkpoint
if model_config.TPS:
opt.Transformation = "TPS"
else:
opt.Transformation = "None"
opt.FeatureExtraction = model_config.backbone
opt.SequenceModeling = model_config.sequence
opt.Prediction = model_config.prediction
opt.num_fiducial = model_config.num_fiducial
opt.output_channel = model_config.feature_channel
opt.hidden_size = model_config.hidden_size
opt.baiduCTC = False
# 3. Init pruning params:
prune_config = experiment_spec.prune.prune_setting
opt.prune_mode = prune_config.mode
if opt.prune_mode in ["amount", "experimental_hybrid"]:
opt.amount = prune_config.amount
elif opt.prune_mode in ["threshold"]:
opt.threshold = prune_config.threshold
else:
raise ValueError("Only supports prune mode in [amount, threshold, \
experimental_hybrid]")
opt.granularity = prune_config.granularity
if prune_config.raw_prune_score == "L2":
opt.p = 2
else:
opt.p = 1
# 4. Init for Device setting
os.environ["CUDA_VISIBLE_DEVICES"] = str(experiment_spec.prune.gpu_id)
import torch
opt.num_gpu = torch.cuda.device_count()
return opt
def run_experiment(experiment_spec):
"""run experiment."""
opt = init_configs(experiment_spec)
if experiment_spec.prune.results_dir is not None:
results_dir = experiment_spec.prune.results_dir
else:
results_dir = os.path.join(experiment_spec.results_dir, "prune")
experiment_spec.prune.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file,
append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED,
message="Starting OCRNet Prune")
# Set default output filename if the filename
# isn't provided over the command line.
if opt.output_file is None:
split_name = os.path.splitext(opt.saved_model)[0]
opt.output_file = "pruned_{}.etlt".format(split_name)
# Warn the user if an exported file already exists.
assert not os.path.exists(opt.output_file), "Default output file {} already "\
"exists".format(opt.output_file)
prune(opt)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
run_experiment(experiment_spec=cfg)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Prune finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Prune was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Inference OCRNet script.
"""
import argparse
import os
from tabulate import tabulate
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.ocrnet.config.default_config import ExperimentConfig
def inference(opt):
"""Inference on the OCRNet according to option"""
# @TODO(tylerz): Lazy import for correctly setting CUDA_VISIBLE_DEVICES
import torch
import torch.utils.data
import torch.nn.functional as F
from nvidia_tao_pytorch.cv.ocrnet.utils.utils import CTCLabelConverter, AttnLabelConverter, load_checkpoint
from nvidia_tao_pytorch.cv.ocrnet.dataloader.ocr_dataset import RawDataset, AlignCollate
from nvidia_tao_pytorch.cv.ocrnet.model.model import Model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
# load model
print('loading pretrained model from %s' % opt.saved_model)
ckpt = load_checkpoint(opt.saved_model, key=opt.encryption_key, to_cpu=True)
if not isinstance(ckpt, Model):
state_dict = ckpt
model = Model(opt)
model = torch.nn.DataParallel(model).to(device)
model.load_state_dict(state_dict)
else:
model = torch.nn.DataParallel(ckpt).to(device)
# prepare data. two demo images from https://github.com/bgshih/crnn#run-demo
AlignCollate_demo = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
demo_data = RawDataset(root=opt.image_folder, opt=opt) # use RawDataset
demo_loader = torch.utils.data.DataLoader(
demo_data, batch_size=opt.batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_demo, pin_memory=True)
# predict
model.eval()
table_header = ["image_path", "predicted_labels", "confidence score"]
table_data = []
with torch.no_grad():
for image_tensors, image_path_list in demo_loader:
batch_size = image_tensors.size(0)
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
if 'CTC' in opt.Prediction:
preds = model(image, text_for_pred)
# Select max probabilty (greedy decoding) then decode index to character
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
_, preds_index = preds.max(2)
# preds_index = preds_index.view(-1)
preds_str = converter.decode(preds_index, preds_size)
else:
preds = model(image, text_for_pred, is_train=False)
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
preds_prob = F.softmax(preds, dim=2)
preds_max_prob, _ = preds_prob.max(dim=2)
for img_name, pred, pred_max_prob in zip(image_path_list, preds_str, preds_max_prob):
if 'Attn' in opt.Prediction:
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# calculate confidence score (= multiply of pred_max_prob)
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
table_data.append((img_name, pred, f"{confidence_score:0.4f}"))
print(tabulate(table_data, headers=table_header, tablefmt="psql"))
def init_configs(experiment_spec: ExperimentConfig):
"""Pass the yaml config to argparse.Namespace"""
parser = argparse.ArgumentParser()
opt, _ = parser.parse_known_args()
opt.encryption_key = experiment_spec.encryption_key
opt.image_folder = experiment_spec.inference.inference_dataset_dir
# 1. Init dataset params
dataset_config = experiment_spec.dataset
model_config = experiment_spec.model
opt.batch_max_length = dataset_config.max_label_length
opt.imgH = model_config.input_height
opt.imgW = model_config.input_width
opt.input_channel = model_config.input_channel
if model_config.input_channel == 3:
opt.rgb = True
else:
opt.rgb = False
if dataset_config.augmentation.keep_aspect_ratio:
opt.PAD = True
else:
opt.PAD = False
# load character list:
# Don't convert the characters to lower case
with open(dataset_config.character_list_file, "r") as f:
characters = "".join([ch.strip() for ch in f.readlines()])
opt.character = characters
opt.workers = dataset_config.workers
opt.batch_size = experiment_spec.inference.batch_size
# 2. Init Model params
opt.saved_model = experiment_spec.inference.checkpoint
if model_config.TPS:
opt.Transformation = "TPS"
else:
opt.Transformation = "None"
opt.FeatureExtraction = model_config.backbone
opt.SequenceModeling = model_config.sequence
opt.Prediction = model_config.prediction
opt.num_fiducial = model_config.num_fiducial
opt.output_channel = model_config.feature_channel
opt.hidden_size = model_config.hidden_size
opt.baiduCTC = False
# 4. Init for Device setting
os.environ["CUDA_VISIBLE_DEVICES"] = str(experiment_spec.inference.gpu_id)
import torch
opt.num_gpu = torch.cuda.device_count()
return opt
def run_experiment(experiment_spec):
"""run experiment."""
opt = init_configs(experiment_spec)
# Set status logging
if experiment_spec.inference.results_dir is not None:
results_dir = experiment_spec.inference.results_dir
else:
results_dir = os.path.join(experiment_spec.results_dir, "inference")
experiment_spec.inference.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file,
append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting OCRNet inference"
)
inference(opt)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
run_experiment(experiment_spec=cfg)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluate OCRNet script.
"""
import os
import argparse
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.ocrnet.config.default_config import ExperimentConfig
def test(opt):
"""Test the OCRNet according to option."""
# @TODO(tylerz): Lazy import for correctly setting CUDA_VISIBLE_DEVICES
import torch
import torch.utils.data
from nvidia_tao_pytorch.cv.ocrnet.utils.utils import (CTCLabelConverter,
AttnLabelConverter,
validation, load_checkpoint)
from nvidia_tao_pytorch.cv.ocrnet.dataloader.ocr_dataset import LmdbDataset, RawGTDataset, AlignCollate
from nvidia_tao_pytorch.cv.ocrnet.model.model import Model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
""" model configuration """
if 'CTC' in opt.Prediction:
converter = CTCLabelConverter(opt.character)
else:
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
# load model
print('loading pretrained model from %s' % opt.saved_model)
ckpt = load_checkpoint(opt.saved_model, key=opt.encryption_key, to_cpu=True)
if not isinstance(ckpt, Model):
model = Model(opt)
model = torch.nn.DataParallel(model).to(device)
state_dict = ckpt
model.load_state_dict(state_dict, strict=True)
else:
model = torch.nn.DataParallel(ckpt).to(device)
print(model)
""" setup loss """
if 'CTC' in opt.Prediction:
criterion = torch.nn.CTCLoss(zero_infinity=True).to(device)
else:
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
""" evaluation """
model.eval()
with torch.no_grad():
log = open(f'{opt.exp_name}/log_evaluation.txt', 'a')
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
if opt.eval_gt_file is None:
eval_data = LmdbDataset(opt.eval_data, opt)
else:
eval_data = RawGTDataset(gt_file=opt.eval_gt_file, img_dir=opt.eval_data, opt=opt)
eval_data_log = f"data directory:\t{opt.eval_data}\t num samples: {len(eval_data)}"
print(eval_data_log)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=opt.batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
_, accuracy_by_best_model, _, _, _, _, _, _ = validation(
model, criterion, evaluation_loader, converter, opt)
log.write(eval_data_log)
print(f'Accuracy: {accuracy_by_best_model:0.3f}')
log.write(f'{accuracy_by_best_model:0.3f}\n')
log.close()
def init_configs(experiment_spec: ExperimentConfig):
"""Pass the yaml config to argparse.Namespace"""
parser = argparse.ArgumentParser()
opt, _ = parser.parse_known_args()
if experiment_spec.evaluate.results_dir is not None:
results_dir = experiment_spec.evaluate.results_dir
else:
results_dir = os.path.join(experiment_spec.results_dir, "evaluate")
experiment_spec.evaluate.results_dir = results_dir
opt.exp_name = results_dir
opt.encryption_key = experiment_spec.encryption_key
opt.eval_data = experiment_spec.evaluate.test_dataset_dir
opt.eval_gt_file = experiment_spec.evaluate.test_dataset_gt_file
# 1. Init dataset params
dataset_config = experiment_spec.dataset
model_config = experiment_spec.model
opt.batch_max_length = dataset_config.max_label_length
opt.imgH = model_config.input_height
opt.imgW = model_config.input_width
opt.input_channel = model_config.input_channel
if model_config.input_channel == 3:
opt.rgb = True
else:
opt.rgb = False
if dataset_config.augmentation.keep_aspect_ratio:
opt.PAD = True
else:
opt.PAD = False
# load character list:
# Don't convert the characters to lower case
with open(dataset_config.character_list_file, "r") as f:
characters = "".join([ch.strip() for ch in f.readlines()])
opt.character = characters
# TODO(tylerz): hardcode the data_filtering_off to be True.
# And there will be KeyError when encoding the labels
opt.data_filtering_off = True
opt.workers = dataset_config.workers
opt.batch_size = experiment_spec.evaluate.batch_size
# 2. Init Model params
opt.saved_model = experiment_spec.evaluate.checkpoint
if model_config.TPS:
opt.Transformation = "TPS"
else:
opt.Transformation = "None"
opt.FeatureExtraction = model_config.backbone
opt.SequenceModeling = model_config.sequence
opt.Prediction = model_config.prediction
opt.num_fiducial = model_config.num_fiducial
opt.output_channel = model_config.feature_channel
opt.hidden_size = model_config.hidden_size
if model_config.quantize:
opt.quantize = True
else:
opt.quantize = False
opt.baiduCTC = False
# 4. Init for Device setting
os.environ["CUDA_VISIBLE_DEVICES"] = str(experiment_spec.evaluate.gpu_id)
import torch
opt.num_gpu = torch.cuda.device_count()
return opt
def run_experiment(experiment_spec):
"""run experiment."""
opt = init_configs(experiment_spec)
os.makedirs(f'{opt.exp_name}', exist_ok=True)
# Set status logging
status_file = os.path.join(opt.exp_name, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file,
append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting OCRNet evaluation"
)
test(opt)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
run_experiment(experiment_spec=cfg)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the OCRNet task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to ocrnet.
"""
import os
import argparse
import subprocess
import sys
import nvidia_tao_pytorch.cv.ocrnet.scripts as scripts
from nvidia_tao_pytorch.core.entrypoint import get_subtasks
def launch(parser, subtasks):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
if args.subtask in ["train"]:
if args.results_dir:
script_args += " output_dir=" + args.results_dir
# Add encryption key.
if args.subtask in ["train", "evaluate", "inference", "export"]:
if args.key is not None:
script_args += " encryption_key=" + args.key
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
exit(1)
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"ocrnet", add_help=True, description="TAO Toolkit OCR"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks)
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/entrypoint/ocrnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet transformation module."""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class TPS_SpatialTransformerNetwork(nn.Module):
""" Rectification Network of RARE, namely TPS based STN """
def __init__(self, F, I_size, I_r_size, I_channel_num=1):
""" Based on RARE TPS
Args:
batch_I: Batch Input Image [batch_size x I_channel_num x I_height x I_width]
I_size : (height, width) of the input image I
I_r_size : (height, width) of the rectified image I_r
I_channel_num : the number of channels of the input image I
Returns:
batch_I_r: rectified image [batch_size x I_channel_num x I_r_height x I_r_width]
"""
super(TPS_SpatialTransformerNetwork, self).__init__()
self.F = F
self.I_size = I_size
self.I_r_size = I_r_size # = (I_r_height, I_r_width)
self.I_channel_num = I_channel_num
self.LocalizationNetwork = LocalizationNetwork(self.F, self.I_channel_num)
self.GridGenerator = GridGenerator(self.F, self.I_r_size)
def forward(self, batch_I):
"""Forward with original input."""
batch_C_prime = self.LocalizationNetwork(batch_I) # batch_size x K x 2
build_P_prime = self.GridGenerator.build_P_prime(batch_C_prime) # batch_size x n (= I_r_width x I_r_height) x 2
build_P_prime_reshape = build_P_prime.reshape([build_P_prime.size(0), self.I_r_size[0], self.I_r_size[1], 2])
if torch.__version__ > "1.2.0":
batch_I_r = F.grid_sample(batch_I, build_P_prime_reshape, padding_mode='border', align_corners=True)
else:
batch_I_r = F.grid_sample(batch_I, build_P_prime_reshape, padding_mode='border')
return batch_I_r
class LocalizationNetwork(nn.Module):
""" Localization Network of RARE, which predicts C' (K x 2) from I (I_width x I_height) """
def __init__(self, F, I_channel_num):
"""Init.
Args:
F (int): The number of fiducial points.
I_channel_num (int): The number of input channels.
"""
super(LocalizationNetwork, self).__init__()
self.F = F
self.I_channel_num = I_channel_num
self.conv = nn.Sequential(
nn.Conv2d(in_channels=self.I_channel_num, out_channels=64, kernel_size=3, stride=1, padding=1,
bias=False), nn.BatchNorm2d(64), nn.ReLU(True),
nn.MaxPool2d(2, 2), # batch_size x 64 x I_height/2 x I_width/2
nn.Conv2d(64, 128, 3, 1, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(True),
nn.MaxPool2d(2, 2), # batch_size x 128 x I_height/4 x I_width/4
nn.Conv2d(128, 256, 3, 1, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(True),
nn.MaxPool2d(2, 2), # batch_size x 256 x I_height/8 x I_width/8
nn.Conv2d(256, 512, 3, 1, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(True),
nn.AdaptiveAvgPool2d(1) # batch_size x 512
)
self.localization_fc1 = nn.Sequential(nn.Linear(512, 256), nn.ReLU(True))
self.localization_fc2 = nn.Linear(256, self.F * 2)
# Init fc2 in LocalizationNetwork
self.localization_fc2.weight.data.fill_(0)
""" see RARE paper Fig. 6 (a) """
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
ctrl_pts_y_top = np.linspace(0.0, -1.0, num=int(F / 2))
ctrl_pts_y_bottom = np.linspace(1.0, 0.0, num=int(F / 2))
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
initial_bias = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
self.localization_fc2.bias.data = torch.from_numpy(initial_bias).float().view(-1)
def forward(self, batch_I):
"""
input: batch_I : Batch Input Image [batch_size x I_channel_num x I_height x I_width]
output: batch_C_prime : Predicted coordinates of fiducial points for input batch [batch_size x F x 2]
"""
batch_size = batch_I.size(0)
features = self.conv(batch_I).view(batch_size, -1)
batch_C_prime = self.localization_fc2(self.localization_fc1(features)).view(batch_size, self.F, 2)
return batch_C_prime
class GridGenerator(nn.Module):
""" Grid Generator of RARE, which produces P_prime by multipling T with P """
def __init__(self, F, I_r_size):
"""Generate P_hat and inv_delta_C for later.
Args:
F (int): The number of fiducial points.
I_r_size (tuple): A tuple containing the height and width of the rectified image.
"""
super(GridGenerator, self).__init__()
self.eps = 1e-6
self.I_r_height, self.I_r_width = I_r_size
self.F = F
self.C = self._build_C(self.F) # F x 2
self.P = self._build_P(self.I_r_width, self.I_r_height)
# # for multi-gpu, you need register buffer
self.register_buffer("inv_delta_C", torch.tensor(self._build_inv_delta_C(self.F, self.C)).float()) # F+3 x F+3
self.register_buffer("P_hat", torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float()) # n x F+3
# # for fine-tuning with different image width, you may use below instead of self.register_buffer
# self.inv_delta_C = torch.tensor(self._build_inv_delta_C(self.F, self.C)).float().cuda() # F+3 x F+3
# self.P_hat = torch.tensor(self._build_P_hat(self.F, self.C, self.P)).float().cuda() # n x F+3
def _build_C(self, F):
""" Return coordinates of fiducial points in I_r; C
Args:
F (int): The number of fiducial points.
"""
ctrl_pts_x = np.linspace(-1.0, 1.0, int(F / 2))
ctrl_pts_y_top = -1 * np.ones(int(F / 2))
ctrl_pts_y_bottom = np.ones(int(F / 2))
ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)
ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)
C = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0)
return C # F x 2
def _build_inv_delta_C(self, F, C):
""" Return inv_delta_C which is needed to calculate T.
Args:
F (int): The number of fiducial points.
C (ndarray): The coordinates matrix of fiducial points.
"""
hat_C = np.zeros((F, F), dtype=float) # F x F
for i in range(0, F):
for j in range(i, F):
r = np.linalg.norm(C[i] - C[j])
hat_C[i, j] = r
hat_C[j, i] = r
np.fill_diagonal(hat_C, 1)
hat_C = (hat_C ** 2) * np.log(hat_C)
# print(C.shape, hat_C.shape)
delta_C = np.concatenate( # F+3 x F+3
[
np.concatenate([np.ones((F, 1)), C, hat_C], axis=1), # F x F+3
np.concatenate([np.zeros((2, 3)), np.transpose(C)], axis=1), # 2 x F+3
np.concatenate([np.zeros((1, 3)), np.ones((1, F))], axis=1) # 1 x F+3
],
axis=0
)
inv_delta_C = np.linalg.inv(delta_C)
return inv_delta_C # F+3 x F+3
def _build_P(self, I_r_width, I_r_height):
"""Build p.
Args:
I_r_width (int): The rectified image width.
I_r_height (int): The rectified image height.
"""
I_r_grid_x = (np.arange(-I_r_width, I_r_width, 2) + 1.0) / I_r_width # self.I_r_width
I_r_grid_y = (np.arange(-I_r_height, I_r_height, 2) + 1.0) / I_r_height # self.I_r_height
P = np.stack( # self.I_r_width x self.I_r_height x 2
np.meshgrid(I_r_grid_x, I_r_grid_y),
axis=2
)
return P.reshape([-1, 2]) # n (= self.I_r_width x self.I_r_height) x 2
def _build_P_hat(self, F, C, P):
"""Build P_hat.
Args:
F (int): The number of fiducial points.
C (np.ndarray): An array containing the coordinates of fiducial points in I_r.
P (np.ndarray): An array containing the coordinates of points in the output grid.
"""
n = P.shape[0] # n (= self.I_r_width x self.I_r_height)
P_tile = np.tile(np.expand_dims(P, axis=1), (1, F, 1)) # n x 2 -> n x 1 x 2 -> n x F x 2
C_tile = np.expand_dims(C, axis=0) # 1 x F x 2
P_diff = P_tile - C_tile # n x F x 2
rbf_norm = np.linalg.norm(P_diff, ord=2, axis=2, keepdims=False) # n x F
rbf = np.multiply(np.square(rbf_norm), np.log(rbf_norm + self.eps)) # n x F
P_hat = np.concatenate([np.ones((n, 1)), P, rbf], axis=1)
return P_hat # n x F+3
def build_P_prime(self, batch_C_prime):
"""Generate Grid from batch_C_prime [batch_size x F x 2].
Args:
batch_C_prime (np.ndarray): The batch of predicted fiducial points.
"""
batch_size = batch_C_prime.size(0)
batch_inv_delta_C = self.inv_delta_C.repeat(batch_size, 1, 1)
batch_P_hat = self.P_hat.repeat(batch_size, 1, 1)
batch_C_prime_with_zeros = torch.cat((batch_C_prime, torch.zeros(
batch_size, 3, 2).float().to(device)), dim=1) # batch_size x F+3 x 2
batch_T = torch.bmm(batch_inv_delta_C, batch_C_prime_with_zeros) # batch_size x F+3 x 2
batch_P_prime = torch.bmm(batch_P_hat, batch_T) # batch_size x n x 2
return batch_P_prime # batch_size x n x 2
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/model/transformation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet model module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet feature extraction module."""
import torch.nn as nn
import torch.nn.functional as F
from pytorch_quantization import nn as quant_nn
class VGG_FeatureExtractor(nn.Module):
""" FeatureExtractor of CRNN (https://arxiv.org/pdf/1507.05717.pdf) """
def __init__(self, input_channel, output_channel=512):
"""Init.
Args:
input_channel (int): The number of input channels.
output_channel (int, optional): The number of output channels. Default is 512.
"""
super(VGG_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel] # [64, 128, 256, 512]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64x16x50
nn.Conv2d(self.output_channel[0], self.output_channel[1], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 128x8x25
nn.Conv2d(self.output_channel[1], self.output_channel[2], 3, 1, 1), nn.ReLU(True), # 256x8x25
nn.Conv2d(self.output_channel[2], self.output_channel[2], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 256x4x25
nn.Conv2d(self.output_channel[2], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True), # 512x4x25
nn.Conv2d(self.output_channel[3], self.output_channel[3], 3, 1, 1, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True),
nn.MaxPool2d((2, 1), (2, 1)), # 512x2x25
nn.Conv2d(self.output_channel[3], self.output_channel[3], 2, 1, 0), nn.ReLU(True)) # 512x1x24
def forward(self, input): # pylint: disable=redefined-builtin
"""Forward."""
return self.ConvNet(input)
class RCNN_FeatureExtractor(nn.Module):
""" FeatureExtractor of GRCNN (https://papers.nips.cc/paper/6637-gated-recurrent-convolution-neural-network-for-ocr.pdf) """
def __init__(self, input_channel, output_channel=512):
"""Init.
Args:
input_channel (int): The number of input channels.
output_channel (int, optional): The number of output channels. Default is 512.
"""
super(RCNN_FeatureExtractor, self).__init__()
self.output_channel = [int(output_channel / 8), int(output_channel / 4),
int(output_channel / 2), output_channel] # [64, 128, 256, 512]
self.ConvNet = nn.Sequential(
nn.Conv2d(input_channel, self.output_channel[0], 3, 1, 1), nn.ReLU(True),
nn.MaxPool2d(2, 2), # 64 x 16 x 50
GRCL(self.output_channel[0], self.output_channel[0], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, 2), # 64 x 8 x 25
GRCL(self.output_channel[0], self.output_channel[1], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)), # 128 x 4 x 26
GRCL(self.output_channel[1], self.output_channel[2], num_iteration=5, kernel_size=3, pad=1),
nn.MaxPool2d(2, (2, 1), (0, 1)), # 256 x 2 x 27
nn.Conv2d(self.output_channel[2], self.output_channel[3], 2, 1, 0, bias=False),
nn.BatchNorm2d(self.output_channel[3]), nn.ReLU(True)) # 512 x 1 x 26
def forward(self, input): # pylint: disable=redefined-builtin
"""Forward."""
return self.ConvNet(input)
class ResNet_FeatureExtractor(nn.Module):
""" FeatureExtractor of FAN (http://openaccess.thecvf.com/content_ICCV_2017/papers/Cheng_Focusing_Attention_Towards_ICCV_2017_paper.pdf) """
def __init__(self, input_channel, output_channel=512, quantize=False):
"""Init.
Args:
input_channel (int): The number of input channels.
output_channel (int, optional): The number of output channels. Default is 512.
quantize (bool, optional): Whether to use quantization. Default is False.
"""
super(ResNet_FeatureExtractor, self).__init__()
self.ConvNet = ResNet(input_channel, output_channel, BasicBlock, [1, 2, 5, 3], quantize=quantize)
def forward(self, input): # pylint: disable=redefined-builtin
"""Forward."""
return self.ConvNet(input)
# For Gated RCNN
class GRCL(nn.Module):
"""Gated RCNN."""
def __init__(self, input_channel, output_channel, num_iteration, kernel_size, pad):
"""Init.
Args:
input_channel (int): The number of input channels.
output_channel (int): The number of output channels.
num_iteration (int): The number of iterations of recursion.
kernel_size (int): The size of the kernel.
pad (int): The amount of padding.
"""
super(GRCL, self).__init__()
self.wgf_u = nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False)
self.wgr_x = nn.Conv2d(output_channel, output_channel, 1, 1, 0, bias=False)
self.wf_u = nn.Conv2d(input_channel, output_channel, kernel_size, 1, pad, bias=False)
self.wr_x = nn.Conv2d(output_channel, output_channel, kernel_size, 1, pad, bias=False)
self.BN_x_init = nn.BatchNorm2d(output_channel)
self.num_iteration = num_iteration
self.GRCL = [GRCL_unit(output_channel) for _ in range(num_iteration)]
self.GRCL = nn.Sequential(*self.GRCL)
def forward(self, input): # pylint: disable=redefined-builtin
""" The input of GRCL is consistant over time t, which is denoted by u(0)
thus wgf_u / wf_u is also consistant over time t.
"""
wgf_u = self.wgf_u(input)
wf_u = self.wf_u(input)
x = F.relu(self.BN_x_init(wf_u))
for i in range(self.num_iteration):
x = self.GRCL[i](wgf_u, self.wgr_x(x), wf_u, self.wr_x(x))
return x
class GRCL_unit(nn.Module):
"""Gated RCNN unit."""
def __init__(self, output_channel):
"""Init.
Args:
output_channel (int): The number of output channels.
"""
super(GRCL_unit, self).__init__()
self.BN_gfu = nn.BatchNorm2d(output_channel)
self.BN_grx = nn.BatchNorm2d(output_channel)
self.BN_fu = nn.BatchNorm2d(output_channel)
self.BN_rx = nn.BatchNorm2d(output_channel)
self.BN_Gx = nn.BatchNorm2d(output_channel)
def forward(self, wgf_u, wgr_x, wf_u, wr_x):
"""Performs a forward pass through the GRCL_unit network."""
G_first_term = self.BN_gfu(wgf_u)
G_second_term = self.BN_grx(wgr_x)
G = F.sigmoid(G_first_term + G_second_term)
x_first_term = self.BN_fu(wf_u)
x_second_term = self.BN_Gx(self.BN_rx(wr_x) * G)
x = F.relu(x_first_term + x_second_term)
return x
class BasicBlock(nn.Module):
"""Basic Block for ResNet."""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, quantize=False):
"""Init.
Args:
inplanes (int): The number of input channels.
planes (int): The number of output channels.
stride (int, optional): The stride of the convolutional layer. Default is 1.
downsample (nn.Module, optional): The downsampling layer. Default is None.
quantize (bool, optional): Whether to use quantization. Default is False.
"""
super(BasicBlock, self).__init__()
self.quantize = quantize
self.conv1 = self._conv3x3(inplanes, planes, quantize=self.quantize)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = self._conv3x3(planes, planes, quantize=self.quantize)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if self.quantize:
self.residual_quantizer = \
quant_nn.TensorQuantizer(quant_nn.QuantConv2d.default_quant_desc_input)
def _conv3x3(self, in_planes, out_planes, stride=1, quantize=False):
"""3x3 convolution with padding"""
if quantize:
return quant_nn.QuantConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def forward(self, x):
"""forward."""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.quantize:
out += self.residual_quantizer(residual)
else:
out += residual
out = self.relu(out)
return out
def get_conv2d(quantize=False):
"""Helper function for quantize model."""
if quantize:
return quant_nn.QuantConv2d
return nn.Conv2d
class ResNet(nn.Module):
"""ResNet module."""
def __init__(self, input_channel, output_channel, block, layers, quantize=False):
"""Init.
Args:
input_channel (int): The number of input channels.
output_channel (int): The number of output channels.
block (nn.Module): The block to use for the ResNet network.
layers (list): A list of integers specifying the number of blocks in each layer.
quantize (bool, optional): Whether to use quantization. Default is False.
"""
super(ResNet, self).__init__()
self.quantize = quantize
self.output_channel_block = [int(output_channel / 4), int(output_channel / 2), output_channel, output_channel]
self.inplanes = int(output_channel / 8)
self.conv0_1 = get_conv2d(self.quantize)(input_channel, int(output_channel / 16),
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_1 = nn.BatchNorm2d(int(output_channel / 16))
self.conv0_2 = get_conv2d(self.quantize)(int(output_channel / 16), self.inplanes,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn0_2 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer1 = self._make_layer(block, self.output_channel_block[0], layers[0])
self.conv1 = get_conv2d(self.quantize)(self.output_channel_block[0], self.output_channel_block[
0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.output_channel_block[0])
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.layer2 = self._make_layer(block, self.output_channel_block[1], layers[1], stride=1)
self.conv2 = get_conv2d(self.quantize)(self.output_channel_block[1], self.output_channel_block[
1], kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(self.output_channel_block[1])
self.maxpool3 = nn.MaxPool2d(kernel_size=2, stride=(2, 1), padding=(0, 1))
self.layer3 = self._make_layer(block, self.output_channel_block[2], layers[2], stride=1)
self.conv3 = get_conv2d(self.quantize)(self.output_channel_block[2], self.output_channel_block[
2], kernel_size=3, stride=1, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.output_channel_block[2])
self.layer4 = self._make_layer(block, self.output_channel_block[3], layers[3], stride=1)
self.conv4_1 = get_conv2d(self.quantize)(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=(2, 1), padding=(0, 1), bias=False)
self.bn4_1 = nn.BatchNorm2d(self.output_channel_block[3])
self.conv4_2 = get_conv2d(self.quantize)(self.output_channel_block[3], self.output_channel_block[
3], kernel_size=2, stride=1, padding=0, bias=False)
self.bn4_2 = nn.BatchNorm2d(self.output_channel_block[3])
def _conv1x1(self, in_planes, out_planes, stride=1, quantize=False):
"""conv1x1 helper."""
if quantize:
return quant_nn.QuantConv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
def _make_layer(self, block, planes, blocks, stride=1):
"""make resnet block."""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# nn.Conv2d(self.inplanes, planes * block.expansion,
# kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion),
# )
downsample = nn.Sequential(
self._conv1x1(self.inplanes, planes * block.expansion,
stride=stride, quantize=self.quantize),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
"""forward."""
x = self.conv0_1(x)
x = self.bn0_1(x)
x = self.relu(x)
x = self.conv0_2(x)
x = self.bn0_2(x)
x = self.relu(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool2(x)
x = self.layer2(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.maxpool3(x)
x = self.layer3(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.layer4(x)
x = self.conv4_1(x)
x = self.bn4_1(x)
x = self.relu(x)
x = self.conv4_2(x)
x = self.bn4_2(x)
x = self.relu(x)
return x
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/model/feature_extraction.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet prediction module."""
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Attention(nn.Module):
"""Attention module."""
def __init__(self, input_size, hidden_size, num_classes):
"""Init.
Args:
input_size (int): The size of the input.
hidden_size (int): The size of the hidden layer.
num_classes (int): The number of classes.
"""
super(Attention, self).__init__()
self.attention_cell = AttentionCell(input_size, hidden_size, num_classes)
self.hidden_size = hidden_size
self.num_classes = num_classes
self.generator = nn.Linear(hidden_size, num_classes)
def _char_to_onehot(self, input_char, onehot_dim=38):
"""Convert char label to onehot."""
input_char = input_char.unsqueeze(1)
batch_size = input_char.size(0)
one_hot = torch.FloatTensor(batch_size, onehot_dim).zero_().to(device)
one_hot = one_hot.scatter_(1, input_char, 1)
return one_hot
def forward(self, batch_H, text, is_train=True, batch_max_length=25):
""" Perform attention forward.
Args:
batch_H (torch.Tensor): contextual_feature H = hidden state of encoder. [batch_size x num_steps x contextual_feature_channels]
text (torch.Tensor): the text-index of each image. [batch_size x (max_length+1)]. +1 for [GO] token. text[:, 0] = [GO].
is_train (bool, optional): Set it to be true if in train phase.
batch_max_length (bool, optional): the maximum length of text in this batch.
Returns:
prob: probability distribution at each step [batch_size x num_steps x num_classes]
"""
batch_size = batch_H.size(0)
num_steps = batch_max_length + 1 # +1 for [s] at end of sentence.
output_hiddens = torch.FloatTensor(batch_size, num_steps, self.hidden_size).fill_(0).to(device)
hidden = (torch.FloatTensor(batch_size, self.hidden_size).fill_(0).to(device),
torch.FloatTensor(batch_size, self.hidden_size).fill_(0).to(device))
if is_train:
for i in range(num_steps):
# one-hot vectors for a i-th char. in a batch
char_onehots = self._char_to_onehot(text[:, i], onehot_dim=self.num_classes)
# hidden : decoder's hidden s_{t-1}, batch_H : encoder's hidden H, char_onehots : one-hot(y_{t-1})
hidden, _ = self.attention_cell(hidden, batch_H, char_onehots)
output_hiddens[:, i, :] = hidden[0] # LSTM hidden index (0: hidden, 1: Cell)
probs = self.generator(output_hiddens)
else:
targets = torch.LongTensor(batch_size).fill_(0).to(device) # [GO] token
probs = torch.FloatTensor(batch_size, num_steps, self.num_classes).fill_(0).to(device)
for i in range(num_steps):
char_onehots = self._char_to_onehot(targets, onehot_dim=self.num_classes)
hidden, _ = self.attention_cell(hidden, batch_H, char_onehots)
probs_step = self.generator(hidden[0])
probs[:, i, :] = probs_step
_, next_input = probs_step.max(1)
targets = next_input
return probs # batch_size x num_steps x num_classes
class AttentionCell(nn.Module):
"""Attention Cell."""
def __init__(self, input_size, hidden_size, num_embeddings):
"""Init.
Args:
input_size (int): The size of the input.
hidden_size (int): The size of the hidden layer.
num_embeddings (int): The number of embeddings.
"""
super(AttentionCell, self).__init__()
self.i2h = nn.Linear(input_size, hidden_size, bias=False)
self.h2h = nn.Linear(hidden_size, hidden_size) # either i2i or h2h should have bias
self.score = nn.Linear(hidden_size, 1, bias=False)
self.rnn = nn.LSTMCell(input_size + num_embeddings, hidden_size)
self.hidden_size = hidden_size
def forward(self, prev_hidden, batch_H, char_onehots):
"""Performs a forward pass through the AttentionCell network.
Args:
prev_hidden (tuple): The previous hidden state.
batch_H (torch.Tensor): The input tensor.
char_onehots (torch.Tensor): The one-hot encoded character tensor.
Returns:
tuple: The current hidden state and the attention weights.
"""
# [batch_size x num_encoder_step x num_channel] -> [batch_size x num_encoder_step x hidden_size]
batch_H_proj = self.i2h(batch_H)
prev_hidden_proj = self.h2h(prev_hidden[0]).unsqueeze(1)
e = self.score(torch.tanh(batch_H_proj + prev_hidden_proj)) # batch_size x num_encoder_step * 1
alpha = F.softmax(e, dim=1)
context = torch.bmm(alpha.permute(0, 2, 1), batch_H).squeeze(1) # batch_size x num_channel
concat_context = torch.cat([context, char_onehots], 1) # batch_size x (num_channel + num_embedding)
cur_hidden = self.rnn(concat_context, prev_hidden)
return cur_hidden, alpha
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/model/prediction.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2019-present NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Model OCRNet script.
"""
import torch
import torch.nn as nn
from .transformation import TPS_SpatialTransformerNetwork
from .feature_extraction import VGG_FeatureExtractor, RCNN_FeatureExtractor, ResNet_FeatureExtractor
from .sequence_modeling import BidirectionalLSTM
from .prediction import Attention
class Model(nn.Module):
"""Model wrapper wrapping transformation, backbone, sequence."""
def __init__(self, opt):
"""Init."""
super(Model, self).__init__()
self.opt = opt
self.stages = {'Trans': opt.Transformation, 'Feat': opt.FeatureExtraction,
'Seq': opt.SequenceModeling, 'Pred': opt.Prediction}
# self.export = export
""" Transformation """
if opt.Transformation == 'TPS':
self.Transformation = TPS_SpatialTransformerNetwork(
F=opt.num_fiducial, I_size=(opt.imgH, opt.imgW), I_r_size=(opt.imgH, opt.imgW), I_channel_num=opt.input_channel)
else:
print('No Transformation module specified')
""" FeatureExtraction """
if opt.FeatureExtraction == 'VGG':
self.FeatureExtraction = VGG_FeatureExtractor(opt.input_channel, opt.output_channel)
elif opt.FeatureExtraction == 'RCNN':
self.FeatureExtraction = RCNN_FeatureExtractor(opt.input_channel, opt.output_channel)
elif opt.FeatureExtraction == 'ResNet':
self.FeatureExtraction = ResNet_FeatureExtractor(opt.input_channel, opt.output_channel, opt.quantize)
else:
raise Exception('No FeatureExtraction module specified')
self.FeatureExtraction_output = opt.output_channel # int(imgH/16-1) * 512
self.AdaptiveAvgPool = nn.AdaptiveAvgPool2d((None, 1)) # Transform final (imgH/16-1) -> 1
""" Sequence modeling"""
if opt.SequenceModeling == 'BiLSTM':
self.SequenceModeling = nn.Sequential(
BidirectionalLSTM(self.FeatureExtraction_output, opt.hidden_size, opt.hidden_size),
BidirectionalLSTM(opt.hidden_size, opt.hidden_size, opt.hidden_size))
self.SequenceModeling_output = opt.hidden_size
else:
print('No SequenceModeling module specified')
self.SequenceModeling_output = self.FeatureExtraction_output
""" Prediction """
if opt.Prediction == 'CTC':
self.Prediction = nn.Linear(self.SequenceModeling_output, opt.num_class)
elif opt.Prediction == 'Attn':
self.Prediction = Attention(self.SequenceModeling_output, opt.hidden_size, opt.num_class)
else:
raise Exception('Prediction is neither CTC or Attn')
def forward(self, input, text, is_train=True): # pylint: disable=redefined-builtin
""" Transformation stage """
if not self.stages['Trans'] == "None":
input = self.Transformation(input)
""" Feature extraction stage """
visual_feature = self.FeatureExtraction(input)
visual_feature = self.AdaptiveAvgPool(visual_feature.permute(0, 3, 1, 2)) # [b, c, h, w] -> [b, w, c, h]
visual_feature = visual_feature.squeeze(3)
""" Sequence modeling stage """
if self.stages['Seq'] == 'BiLSTM':
contextual_feature = self.SequenceModeling(visual_feature)
else:
contextual_feature = visual_feature # for convenience. this is NOT contextually modeled by BiLSTM
""" Prediction stage """
if self.stages['Pred'] == 'CTC':
# if self.export:
# prediction = self.Prediction(contextual_feature.contiguous())
# prediction = nn.functional.softmax(prediction, dim=2)
# sequence_id = torch.argmax(prediction, dim=2)
# sequence_prob = torch.max(prediction, dim=2)
# return sequence_id, sequence_prob
# else:
prediction = self.Prediction(contextual_feature.contiguous())
else:
prediction = self.Prediction(contextual_feature.contiguous(), text, is_train, batch_max_length=self.opt.batch_max_length)
return prediction
class CTCPostProcessor(nn.Module):
"""CTC postprocessor to convert raw ctc output to sequence_id and seqence_probablity"""
def forward(self, prediction):
"""Forward."""
prediction = nn.functional.softmax(prediction, dim=2)
sequence_id = torch.argmax(prediction, dim=2)
sequence_prob = torch.max(prediction, dim=2)
return sequence_id, sequence_prob
class ExportModel(nn.Module):
"""A wrapper class to wrap ocr model and the corresponding post process."""
def __init__(self, ocr_model, prediction_type="CTC"):
"""Init."""
super(ExportModel, self).__init__()
self.ocr_model = ocr_model
if prediction_type == "CTC":
self.post_processor = CTCPostProcessor()
else:
self.post_processor = None
def forward(self, input, text): # pylint: disable=redefined-builtin
"""Forward with post-process."""
prediction = self.ocr_model(input, text, is_train=False)
if self.post_processor is not None:
sequence_id, sequence_prob = self.post_processor(prediction)
return sequence_id, sequence_prob
return prediction
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/model/model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Main PTL model file for OCRNet """
import os
import random
from typing import Any, Dict, List, Optional
import pickle
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from tabulate import tabulate
import torch
import torch.nn.functional as F
import torchmetrics
# from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.ocrnet.model.build_nn_model import build_ocrnet_model
from nvidia_tao_pytorch.cv.ocrnet.dataloader.build_dataloader import build_dataloader
from nvidia_tao_pytorch.cv.ocrnet.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.ocrnet.utils.utils import (CTCLabelConverter,
AttnLabelConverter, create_logger)
TABLE_HEADER = ['Ground Truth', 'Prediction', 'Confidence && T/F']
# pylint:disable=too-many-ancestors
class OCRNetModel(pl.LightningModule):
""" PTL module for OCRNet."""
def __init__(self, experiment_spec: ExperimentConfig):
"""Init training for OCRNet."""
super().__init__()
self.experiment_spec = experiment_spec
with open(self.experiment_spec.dataset.character_list_file, "r") as f:
self.characters = "".join([ch.strip() for ch in f.readlines()])
# init the label converter and criterion
self.max_label_length = self.experiment_spec.dataset.max_label_length
if 'CTC' in self.experiment_spec.model.prediction:
self.ctc = True
self.converter = CTCLabelConverter(self.characters)
self.criterion = torch.nn.CTCLoss(zero_infinity=True)
else:
self.ctc = False
self.converter = AttnLabelConverter(self.characters)
self.criterion = torch.nn.CrossEntropyLoss(ignore_index=0) # ignore [GO] token = ignore index 0
self.num_class = len(self.converter.character)
# init the model
self._build_model(experiment_spec)
self.val_accuracy = torchmetrics.Accuracy()
self.best_acc = -1
val_log_file = os.path.join(experiment_spec.train.results_dir, "log_val.txt")
self.console_logger = create_logger(val_log_file)
self.check_val_batch_idx = 0
self.val_batch_num = 0
self.gpu_num = len(self.experiment_spec.train.gpu_ids)
self.status_logging_dict = {"train_loss": 0.0,
"val_loss": 0.0,
"val_acc": 0.0}
def _build_model(self, experiment_spec):
"""Internal function to build the model."""
self.model = build_ocrnet_model(experiment_spec=experiment_spec,
num_class=self.num_class)
print(self.model)
def setup(self, stage: Optional[str] = None):
""" Set up the dataset for train and val"""
self.train_data_path = self.experiment_spec.dataset.train_dataset_dir[0]
self.train_gt_file = self.experiment_spec.dataset.train_gt_file
self.val_data_path = self.experiment_spec.dataset.val_dataset_dir
self.val_gt_file = self.experiment_spec.dataset.val_gt_file
def train_dataloader(self):
"""Build the dataloader for training."""
train_loader = \
build_dataloader(experiment_spec=self.experiment_spec,
data_path=self.train_data_path,
gt_file=self.train_gt_file)
self.console_logger.info(f"Train dataset samples: {len(train_loader.dataset)}")
self.console_logger.info(f"Train batch num: {len(train_loader)}")
return train_loader
def val_dataloader(self):
"""Build the dataloader for validation."""
val_loader = build_dataloader(experiment_spec=self.experiment_spec,
data_path=self.val_data_path,
shuffle=False,
gt_file=self.val_gt_file)
self.console_logger.info(f"Val dataset samples: {len(val_loader.dataset)}")
self.console_logger.info(f"Val batch num: {len(val_loader)}")
self.val_batch_num = int(len(val_loader) / self.gpu_num)
return val_loader
def configure_optimizers(self):
"""Configure optimizers for training"""
self.train_config = self.experiment_spec["train"]
optim_dict = {}
# filter that only require gradient decent
filtered_parameters = []
for p in filter(lambda p: p.requires_grad, self.model.parameters()):
filtered_parameters.append(p)
if self.train_config['optim']['name'] == 'adam':
optim = torch.optim.Adam(filtered_parameters,
lr=self.train_config['optim']['lr'],
betas=(0.9, 0.999))
elif self.train_config['optim']['name'] == 'adadelta':
optim = torch.optim.Adadelta(filtered_parameters,
lr=self.train_config['optim']['lr'],
rho=0.95,
eps=1e-8)
optim_dict["optimizer"] = optim
# # Uncomment the following codes to enable learning rate scheduler
# scheduler_type = self.train_config['optim']['lr_scheduler']
# if scheduler_type == "AutoReduce":
# lr_scheduler = ReduceLROnPlateau(optim, 'min',
# patience=self.train_config['optim']['patience'],
# min_lr=self.train_config['optim']['min_lr'],
# factor=self.train_config['optim']["lr_decay"],
# verbose=True)
# elif scheduler_type == "MultiStep":
# lr_scheduler = MultiStepLR(optimizer=optim,
# milestones=self.train_config['optim']["lr_steps"],
# gamma=self.train_config['optim']["lr_decay"],
# verbose=True)
# else:
# raise ValueError("Only [AutoReduce, MultiStep] scheduler is supported")
# optim_dict["lr_scheduler"] = lr_scheduler
# optim_dict['monitor'] = self.train_config['optim']['lr_monitor']
return optim_dict
def training_step(self, batch, batch_idx):
"""Training step."""
image, labels = batch
text, length = self.converter.encode(labels, batch_max_length=self.max_label_length)
batch_size = image.size(0)
if self.ctc:
preds = self.model(image, text)
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
preds = preds.log_softmax(2).permute(1, 0, 2)
cost = self.criterion(preds, text, preds_size, length)
else:
preds = self.model(image, text[:, :-1]) # align with Attention.forward
target = text[:, 1:] # without [GO] Symbol
cost = self.criterion(preds.view(-1, preds.shape[-1]), target.contiguous().view(-1))
return cost
def validation_step(self, batch, batch_idx):
"""Validation step."""
image, labels = batch
batch_size = image.size(0)
# For max length prediction
length_for_pred = torch.IntTensor([self.max_label_length] * batch_size)
text_for_pred = torch.LongTensor(batch_size, self.max_label_length + 1).fill_(0)
text_for_loss, length_for_loss = self.converter.encode(labels, batch_max_length=self.max_label_length)
if self.ctc:
preds = self.model(image, text_for_pred)
# Calculate evaluation loss for CTC deocder.
preds_size = torch.IntTensor([preds.size(1)] * batch_size)
# permute 'preds' to use CTCloss format
cost = self.criterion(preds.log_softmax(2).permute(1, 0, 2), text_for_loss, preds_size, length_for_loss)
# Select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = self.converter.decode(preds_index.data, preds_size.data)
else:
preds = self.model(image, text_for_pred, is_train=False)
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = self.criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probabilty (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = self.converter.decode(preds_index, length_for_pred)
labels = self.converter.decode(text_for_loss[:, 1:], length_for_loss)
# calculate accuracy & confidence score
preds_prob = F.softmax(preds, dim=2)
preds_max_prob, _ = preds_prob.max(dim=2)
confidence_score_list = []
fake_output = torch.IntTensor([1] * batch_size)
fake_target = []
for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):
if not self.ctc:
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
if pred == gt:
fake_target.append(1)
else:
fake_target.append(0)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
except Exception:
confidence_score = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list.append(confidence_score)
fake_target = torch.IntTensor(fake_target)
show = min(5, batch_size)
if batch_idx == self.check_val_batch_idx:
table_data = []
for gt, pred, confidence in zip(labels, preds_str[:show], confidence_score_list[:show]):
if not self.ctc:
gt = gt[:gt.find('[s]')]
pred = pred[:pred.find('[s]')]
table_data.append((gt, pred, f"{confidence:0.4f} {str(pred == gt)}"))
table = tabulate(table_data, headers=TABLE_HEADER, tablefmt='psql')
self.infer_table = table
self.check_val_batch_idx = random.randint(0, max(self.val_batch_num - 1, 0))
self.val_accuracy.update(fake_output, fake_target)
self.log("val_loss", cost, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=batch_size)
self.log("val_acc_1", self.val_accuracy, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, batch_size=batch_size)
return cost
def forward(self, x):
"""Forward of the OCRNet model. No decode in the forward."""
image = x
batch_size = image.size(0)
# For max length prediction
text_for_pred = torch.LongTensor(batch_size, self.max_label_length + 1).fill_(0)
if 'CTC' in self.experiment_spec.model.prediction:
preds = self.model(image, text_for_pred)
else:
preds = self.model(image, text_for_pred, is_train=False)
return preds
def training_epoch_end(self, training_step_outputs):
"""Log Training metrics to status.json"""
average_train_loss = 0.0
for out in training_step_outputs:
average_train_loss += out['loss'].item()
average_train_loss /= len(training_step_outputs)
self.status_logging_dict["train_loss"] = average_train_loss
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and Val metrics generated.",
status_level=status_logging.Status.RUNNING
)
def validation_epoch_end(self, outputs: List[Any]) -> None:
"""Save the best accuracy model after validation"""
@rank_zero_only
def val_epoch_end():
current_acc = self.val_accuracy.compute()
if current_acc > self.best_acc:
torch.save(self.model, f'{self.experiment_spec.train.results_dir}/best_accuracy.pth')
self.best_acc = current_acc
current_model_log = f'{"Current_accuracy":17s}: {current_acc:0.3f}'
best_model_log = f'{"Best_accuracy":17s}: {self.best_acc:0.3f}'
self.console_logger.info(f'{current_model_log}')
self.console_logger.info(f'{best_model_log}')
infer_table_list = self.infer_table.split("\n")
for table in infer_table_list:
self.console_logger.info(table)
val_epoch_end()
# status logging
average_val_loss = 0.0
for out in outputs:
average_val_loss += out.item()
average_val_loss /= len(outputs)
self.status_logging_dict["val_loss"] = average_val_loss
self.status_logging_dict["val_acc"] = self.val_accuracy.compute().item()
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Save the model architecture in the checkpoint"""
checkpoint["whole_model"] = pickle.dumps(self.model)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/model/pl_ocrnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model utils of OCRNet"""
import argparse
import torch
import torch.nn.init as init
from nvidia_tao_pytorch.cv.ocrnet.model.model import Model
from nvidia_tao_pytorch.cv.ocrnet.utils.utils import load_checkpoint
def translate_model_config(experiment_spec):
"""Translate the model config to match with CLOVA"""
parser = argparse.ArgumentParser()
opt, _ = parser.parse_known_args()
dataset_config = experiment_spec.dataset
model_config = experiment_spec.model
opt.batch_max_length = dataset_config.max_label_length
opt.imgH = model_config.input_height
opt.imgW = model_config.input_width
opt.input_channel = model_config.input_channel
if model_config.TPS:
opt.Transformation = "TPS"
else:
opt.Transformation = "None"
opt.FeatureExtraction = model_config.backbone
opt.SequenceModeling = model_config.sequence
opt.Prediction = model_config.prediction
opt.num_fiducial = model_config.num_fiducial
opt.output_channel = model_config.feature_channel
opt.hidden_size = model_config.hidden_size
if model_config.quantize:
opt.quantize = True
else:
opt.quantize = False
return opt
def create_quantize_model(model):
"""Add quantized module to the model.
Args:
model (torch.nn.Module): The model to which the quantized module is to be added.
"""
from nvidia_tao_pytorch.cv.ocrnet.model.feature_extraction import BasicBlock
from pytorch_quantization import nn as quant_nn
# construct module dict
modules_dict = {}
for name, m in model.named_modules():
modules_dict[name] = m
for name, m in model.named_modules():
if isinstance(m, BasicBlock):
m.quantize = True
m.residual_quantizer = quant_nn.TensorQuantizer(quant_nn.QuantConv2d.default_quant_desc_input)
elif isinstance(m, torch.nn.modules.conv.Conv2d):
bias = m.bias
weight = m.weight
# if bias:
# use_bias = True
# else:
# use_bias = False
use_bias = bool(bias is not None)
quant_m = quant_nn.QuantConv2d(m.in_channels,
m.out_channels,
m.kernel_size,
stride=m.stride,
padding=m.padding,
dilation=m.dilation,
groups=m.groups,
bias=use_bias,
padding_mode=m.padding_mode)
quant_m.weight = weight
if use_bias:
quant_m.bias = bias
# Get the parent module name and attribute name
parent_name = ".".join(name.split(".")[:-1])
parent_module = modules_dict[parent_name]
attribute_name = name.split(".")[-1]
if attribute_name.isdigit():
# process sequential module
parent_module[int(attribute_name)] = quant_m
else:
setattr(parent_module, attribute_name, quant_m)
def load_for_finetune(model, state_dict):
"""Load the state_dict for finetune.
Args:
model (torch.nn.Module): The model to which the state_dict is to be loaded.
state_dict (dict): A dictionary containing the state_dict to be loaded.
"""
try:
model.load_state_dict(state_dict, strict=False)
except RuntimeError as e:
# ignore the prediction layer weights when finetune
if "size mismatch" in str(e):
state_dict.pop("Prediction.weight")
state_dict.pop("Prediction.bias")
model.load_state_dict(state_dict, strict=False)
else:
raise e
def build_ocrnet_model(experiment_spec, num_class):
"""Build OCRNet model of nn.module.
Args:
experiment_spec (dict): A dictionary of experiment specifications.
num_class (int): The number of classes.
Returns:
nn.Module: The OCRNet model.
"""
opt = translate_model_config(experiment_spec)
opt.num_class = num_class
# Init the stuff for QDQ
if opt.quantize:
from pytorch_quantization import nn as quant_nn
from pytorch_quantization.tensor_quant import QuantDescriptor
quant_desc_input = QuantDescriptor(calib_method='histogram')
quant_nn.QuantConv2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantLinear.set_default_quant_desc_input(quant_desc_input)
from pytorch_quantization import quant_modules
quant_modules.initialize()
model = Model(opt)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# Load pretrained weights or resume model
if experiment_spec.train.resume_training_checkpoint_path is not None:
model_path = experiment_spec.train.resume_training_checkpoint_path
load_graph = True
finetune = False
elif experiment_spec.train.pretrained_model_path is not None:
model_path = experiment_spec.train.pretrained_model_path
load_graph = False
finetune = True
elif experiment_spec.train.quantize_model_path is not None:
model_path = experiment_spec.train.quantize_model_path
load_graph = True
finetune = False
else:
model_path = None
load_graph = False
finetune = False
if model_path is not None:
print(f'loading pretrained model from {model_path}')
ckpt = load_checkpoint(model_path,
key=experiment_spec.encryption_key,
to_cpu=True)
if not isinstance(ckpt, Model):
# The public state_dict are with DP module
ckpt = {key.replace("module.", ""): value for key, value in ckpt.items()}
state_dict = ckpt
if finetune:
load_for_finetune(model, state_dict)
else:
model.load_state_dict(state_dict, strict=True)
else:
# The TAO OCRNet are without DP module
if load_graph:
model = ckpt
if opt.quantize:
create_quantize_model(model)
else:
state_dict = ckpt.state_dict()
load_for_finetune(model, state_dict)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet sequence module."""
import torch.nn as nn
class BidirectionalLSTM(nn.Module):
"""Bi-SLTM."""
def __init__(self, input_size, hidden_size, output_size):
"""Init.
Args:
input_size (int): The size of the input.
hidden_size (int): The size of the hidden layer.
output_size (int): The size of the output.
"""
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(input_size, hidden_size, bidirectional=True, batch_first=True)
self.linear = nn.Linear(hidden_size * 2, output_size)
def forward(self, input): # pylint: disable=redefined-builtin
"""
input : visual feature [batch_size x T x input_size]
output : contextual feature [batch_size x T x output_size]
"""
self.rnn.flatten_parameters()
recurrent, _ = self.rnn(input) # batch_size x T x input_size -> batch_size x T x (2*hidden_size)
output = self.linear(recurrent) # batch_size x T x output_size
return output
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/model/sequence_modeling.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Build Dataloader for OCRNet """
import argparse
import torch
from nvidia_tao_pytorch.cv.ocrnet.dataloader.ocr_dataset import (LmdbDataset,
RawGTDataset,
AlignCollate)
def translate_dataset_config(experiment_spec):
"""Translate experiment spec to match with CLOVA"""
parser = argparse.ArgumentParser()
opt, _ = parser.parse_known_args()
opt.exp_name = experiment_spec.results_dir
# 1. Init dataset params
dataset_config = experiment_spec.dataset
model_config = experiment_spec.model
# Support single dataset source now
# Shall we check it with output feature length to avoid Nan in CTC Loss?
# (image_width // stride) >= 2 * max_label_length - 1
opt.batch_max_length = dataset_config.max_label_length
opt.imgH = model_config.input_height
opt.imgW = model_config.input_width
opt.input_channel = model_config.input_channel
if dataset_config.augmentation.keep_aspect_ratio:
opt.PAD = True
else:
opt.PAD = False
if model_config.input_channel == 3:
opt.rgb = True
else:
opt.rgb = False
# load character list:
# Don't convert the characters to lower case
with open(dataset_config.character_list_file, "r") as f:
characters = "".join([ch.strip() for ch in f.readlines()])
opt.character = characters
# hardcode the data_filtering_off to be True.
# And there will be KeyError when encoding the labels if
# the labels and character list cannot match
opt.data_filtering_off = True
opt.workers = dataset_config.workers
opt.batch_size = dataset_config.batch_size
return opt
def build_dataloader(experiment_spec, data_path, shuffle=True, gt_file=None):
"""Build dataloader for training and validation.
Args:
experiment_spec (dict): A dictionary of experiment specifications.
data_path (str): The path to the dataset.
shuffle (bool, optional): Whether to shuffle the data. Default is True.
gt_file (str, optional): The path to the ground truth file. Default is None.
Returns:
torch.utils.data.DataLoader: A dataloader for the dataset.
"""
opt = translate_dataset_config(experiment_spec)
AlignCollate_func = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
if gt_file is not None:
dataset = RawGTDataset(gt_file, data_path, opt)
else:
dataset = LmdbDataset(data_path, opt)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=opt.batch_size,
shuffle=shuffle,
num_workers=int(opt.workers),
collate_fn=AlignCollate_func, pin_memory=True)
return data_loader
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/dataloader/build_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet dataset."""
import os
import sys
import re
import six
import math
import lmdb
# from natsort import natsorted
from nvidia_tao_pytorch.core.path_utils import expand_path
from PIL import Image
import torch
from torch.utils.data import ConcatDataset, Dataset, Subset
from torch._utils import _accumulate
import torchvision.transforms as transforms
class Batch_Balanced_Dataset(object):
"""Generate batch data from multiple data sources."""
def __init__(self, opt):
"""
Modulate the data ratio in the batch.
For example, when select_data is "MJ-ST" and batch_ratio is "0.5-0.5",
the 50% of the batch is filled with MJ and the other 50% of the batch is filled with ST.
"""
log = open(f'{opt.exp_name}/log_dataset.txt', 'a')
dashed_line = '-' * 80
print(dashed_line)
log.write(dashed_line + '\n')
print(f'dataset_root: {opt.train_data}\nopt.select_data: {opt.select_data}\nopt.batch_ratio: {opt.batch_ratio}')
log.write(f'dataset_root: {opt.train_data}\nopt.select_data: {opt.select_data}\nopt.batch_ratio: {opt.batch_ratio}\n')
assert len(opt.select_data) == len(opt.batch_ratio)
_AlignCollate = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
self.data_loader_list = []
self.dataloader_iter_list = []
batch_size_list = []
Total_batch_size = 0
for selected_d, batch_ratio_d in zip(opt.select_data, opt.batch_ratio):
_batch_size = max(round(opt.batch_size * float(batch_ratio_d)), 1)
print(dashed_line)
log.write(dashed_line + '\n')
_dataset, _dataset_log = hierarchical_dataset(root=opt.train_data, opt=opt, select_data=[selected_d])
total_number_dataset = len(_dataset)
log.write(_dataset_log)
"""
The total number of data can be modified with opt.total_data_usage_ratio.
ex) opt.total_data_usage_ratio = 1 indicates 100% usage, and 0.2 indicates 20% usage.
See 4.2 section in our paper.
"""
number_dataset = int(total_number_dataset * float(opt.total_data_usage_ratio))
dataset_split = [number_dataset, total_number_dataset - number_dataset]
indices = range(total_number_dataset)
_dataset, _ = [Subset(_dataset, indices[offset - length:offset])
for offset, length in zip(_accumulate(dataset_split), dataset_split)]
selected_d_log = f'num total samples of {selected_d}: {total_number_dataset} x {opt.total_data_usage_ratio} (total_data_usage_ratio) = {len(_dataset)}\n'
selected_d_log += f'num samples of {selected_d} per batch: {opt.batch_size} x {float(batch_ratio_d)} (batch_ratio) = {_batch_size}'
print(selected_d_log)
log.write(selected_d_log + '\n')
batch_size_list.append(str(_batch_size))
Total_batch_size += _batch_size
_data_loader = torch.utils.data.DataLoader(
_dataset, batch_size=_batch_size,
shuffle=True,
num_workers=int(opt.workers),
collate_fn=_AlignCollate, pin_memory=True)
self.data_loader_list.append(_data_loader)
self.dataloader_iter_list.append(iter(_data_loader))
Total_batch_size_log = f'{dashed_line}\n'
batch_size_sum = '+'.join(batch_size_list)
Total_batch_size_log += f'Total_batch_size: {batch_size_sum} = {Total_batch_size}\n'
Total_batch_size_log += f'{dashed_line}'
opt.batch_size = Total_batch_size
print(Total_batch_size_log)
log.write(Total_batch_size_log + '\n')
log.close()
def get_batch(self):
"""Generate batch data."""
balanced_batch_images = []
balanced_batch_texts = []
for i, data_loader_iter in enumerate(self.dataloader_iter_list):
try:
image, text = next(data_loader_iter)
balanced_batch_images.append(image)
balanced_batch_texts += text
except StopIteration:
self.dataloader_iter_list[i] = iter(self.data_loader_list[i])
image, text = next(self.dataloader_iter_list[i])
balanced_batch_images.append(image)
balanced_batch_texts += text
except ValueError:
pass
balanced_batch_images = torch.cat(balanced_batch_images, 0)
return balanced_batch_images, balanced_batch_texts
def hierarchical_dataset(root, opt, select_data='/'):
""" select_data='/' contains all sub-directory of root directory """
dataset_list = []
dataset_log = f'dataset_root: {root}\t dataset: {select_data[0]}'
print(dataset_log)
dataset_log += '\n'
for dirpath, dirnames, _ in os.walk(root + '/'):
if not dirnames:
select_flag = False
for selected_d in select_data:
if selected_d in dirpath:
select_flag = True
break
if select_flag:
dataset = LmdbDataset(dirpath, opt)
sub_dataset_log = f'sub-directory:\t/{os.path.relpath(dirpath, root)}\t num samples: {len(dataset)}'
print(sub_dataset_log)
dataset_log += f'{sub_dataset_log}\n'
dataset_list.append(dataset)
concatenated_dataset = ConcatDataset(dataset_list)
return concatenated_dataset, dataset_log
class LmdbDataset(Dataset):
"""LMDB Dataset wrapper."""
def __init__(self, root, opt):
"""Init the LMDB dataset.
Args:
root (str): The root directory of the dataset.
opt (dict): A dictionary of options for the dataset.
"""
self.root = root
self.opt = opt
self.env = lmdb.open(root, max_readers=32, readonly=True, lock=False, readahead=False, meminit=False)
if not self.env:
print('cannot create lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode()))
self.nSamples = nSamples
if self.opt.data_filtering_off:
# for fast check or benchmark evaluation with no filtering
self.filtered_index_list = [index + 1 for index in range(self.nSamples)]
else:
""" Filtering part
If you want to evaluate IC15-2077 & CUTE datasets which have special character labels,
use --data_filtering_off and only evaluate on alphabets and digits.
see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L190-L192
And if you want to evaluate them with the model trained with --sensitive option,
use --sensitive and --data_filtering_off,
see https://github.com/clovaai/deep-text-recognition-benchmark/blob/dff844874dbe9e0ec8c5a52a7bd08c7f20afe704/test.py#L137-L144
"""
self.filtered_index_list = []
for index in range(self.nSamples):
index += 1 # lmdb starts with 1
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
if len(label) > self.opt.batch_max_length:
# print(f'The length of the label is longer than max_length: length
# {len(label)}, {label} in dataset {self.root}')
continue
# By default, images containing characters which are not in opt.character are filtered.
# You can add [UNK] token to `opt.character` in utils.py instead of this filtering.
out_of_char = f'[^{self.opt.character}]'
if re.search(out_of_char, label.lower()):
continue
self.filtered_index_list.append(index)
self.nSamples = len(self.filtered_index_list)
def __len__(self):
"""Number of samples."""
return self.nSamples
def __getitem__(self, index):
"""Generate single sample."""
assert index <= len(self), 'index range error'
index = self.filtered_index_list[index]
with self.env.begin(write=False) as txn:
label_key = 'label-%09d'.encode() % index
label = txn.get(label_key).decode('utf-8')
img_key = 'image-%09d'.encode() % index
imgbuf = txn.get(img_key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
if self.opt.rgb:
img = Image.open(buf).convert('RGB') # for color image
else:
img = Image.open(buf).convert('L')
except IOError:
print(f'Corrupted image for {index}')
# make dummy image and dummy label for corrupted image.
if self.opt.rgb:
img = Image.new('RGB', (self.opt.imgW, self.opt.imgH))
else:
img = Image.new('L', (self.opt.imgW, self.opt.imgH))
label = '[dummy_label]'
# if not self.opt.sensitive:
# label = label.lower()
# We only train and evaluate on alphanumerics (or pre-defined character set in train.py)
# out_of_char = f'[^{self.opt.character}]'
# label = re.sub(out_of_char, '', label)
return (img, label)
class RawDataset(Dataset):
"""Raw dataset wrapper."""
def __init__(self, root, opt):
"""Init raw dataset."""
self.opt = opt
self.image_path_list = []
for dirpath, _, filenames in os.walk(root):
for name in filenames:
_, ext = os.path.splitext(name)
ext = ext.lower()
if ext in ['.jpg', '.jpeg', '.png']:
self.image_path_list.append(os.path.join(dirpath, name))
# self.image_path_list = natsorted(self.image_path_list)
self.nSamples = len(self.image_path_list)
def __len__(self):
"""Number of samples."""
return self.nSamples
def __getitem__(self, index):
"""Generate single sample."""
try:
if self.opt.rgb:
img = Image.open(self.image_path_list[index]).convert('RGB') # for color image
else:
img = Image.open(self.image_path_list[index]).convert('L')
except IOError:
print(f'Corrupted image for {index}')
# make dummy image and dummy label for corrupted image.
if self.opt.rgb:
img = Image.new('RGB', (self.opt.imgW, self.opt.imgH))
else:
img = Image.new('L', (self.opt.imgW, self.opt.imgH))
return (img, self.image_path_list[index])
class RawGTDataset(Dataset):
"""Raw image and label dataset wrapper."""
def __init__(self, gt_file, img_dir, opt):
"""Init raw-gt dataset.
Args:
root (str): The root directory of the dataset.
opt (dict): A dictionary of options for the dataset.
"""
self.opt = opt
self.label_list = []
self.img_path_list = []
gt_file = expand_path(gt_file)
with open(gt_file, "r", encoding="utf-8") as f:
for line in f.readlines():
img_name, label = line.strip("\n").split()
self.label_list.append(label)
img_path = expand_path(f"{img_dir}/{img_name}")
self.img_path_list.append(img_path)
self.nSamples = len(self.img_path_list)
def __len__(self):
"""Number of samples"""
return self.nSamples
def __getitem__(self, index):
"""Generate single sample"""
label = self.label_list[index]
try:
if self.opt.rgb:
img = Image.open(self.img_path_list[index]).convert('RGB') # for color image
else:
img = Image.open(self.img_path_list[index]).convert('L')
except IOError:
print(f'Corrupted image for {index} {self.img_path_list[index]}')
# make dummy image and dummy label for corrupted image.
if self.opt.rgb:
img = Image.new('RGB', (self.opt.imgW, self.opt.imgH))
else:
img = Image.new('L', (self.opt.imgW, self.opt.imgH))
return (img, label)
class ResizeNormalize(object):
"""Resize and normalize wrapper."""
def __init__(self, size, interpolation=Image.BICUBIC):
"""Init the transform op.
Args:
size (int or tuple): Desired output size of the image. If int, the output size will be (size, size). If tuple, output size will be matched to this.
interpolation (int, optional): Desired interpolation. Default is `PIL.Image.BICUBIC`.
"""
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
"""Call."""
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class NormalizePAD(object):
"""Normalize and pad wrapper."""
def __init__(self, max_size, PAD_type='right'):
"""Initializes the NormalizePAD class.
Args:
max_size (int): The maximum size of the input.
PAD_type (str, optional): The type of padding to use. Default is 'right'.
"""
self.toTensor = transforms.ToTensor()
self.max_size = max_size
self.max_width_half = math.floor(max_size[2] / 2)
self.PAD_type = PAD_type
def __call__(self, img):
"""Call."""
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
c, h, w = img.size()
Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
Pad_img[:, :, :w] = img # right pad
if self.max_size[2] != w: # add border Pad
Pad_img[:, :, w:] = img[:, :, w - 1].unsqueeze(2).expand(c, h, self.max_size[2] - w)
return Pad_img
class AlignCollate(object):
"""Align the batch data."""
def __init__(self, imgH=32, imgW=100, keep_ratio_with_pad=False):
"""Init.
Args:
imgH (int, optional): The height of the input image. Default is 32.
imgW (int, optional): The width of the input image. Default is 100.
keep_ratio_with_pad (bool, optional): Whether to keep the aspect ratio of the input image while padding. Default is False.
"""
self.imgH = imgH
self.imgW = imgW
self.keep_ratio_with_pad = keep_ratio_with_pad
def __call__(self, batch):
"""Call."""
batch = filter(lambda x: x is not None, batch)
images, labels = zip(*batch)
if self.keep_ratio_with_pad: # same concept with 'Rosetta' paper
resized_max_w = self.imgW
input_channel = 3 if images[0].mode == 'RGB' else 1
transform = NormalizePAD((input_channel, self.imgH, resized_max_w))
resized_images = []
for image in images:
w, h = image.size
ratio = w / float(h)
if math.ceil(self.imgH * ratio) > self.imgW:
resized_w = self.imgW
else:
resized_w = math.ceil(self.imgH * ratio)
resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)
resized_images.append(transform(resized_image))
# resized_image.save('./image_test/%d_test.jpg' % w)
image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)
else:
transform = ResizeNormalize((self.imgW, self.imgH))
image_tensors = [transform(image) for image in images]
image_tensors = torch.cat([t.unsqueeze(0) for t in image_tensors], 0)
return image_tensors, labels
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/dataloader/ocr_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet dataloader module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocrnet/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
""" ConvNeXt
Paper: `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
Original code and weights from https://github.com/facebookresearch/ConvNeXt, original copyright below
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the MIT license
# Modified by: Daquan Zhou
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.fx_features import register_notrace_module
from timm.models.helpers import named_apply, build_model_with_cfg
from timm.models.layers import trunc_normal_, ClassifierHead, SelectAdaptivePool2d, DropPath, Mlp
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = dict(
convnext_tiny=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth"),
convnext_small=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth"),
convnext_base=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth"),
convnext_large=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth"),
convnext_tiny_hnf=_cfg(url=''),
convnext_base_in22k=_cfg(
# url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", num_classes=80),
url="pretrained/convnext_base_22k_224.pth", num_classes=80),
convnext_large_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", num_classes=80),
convnext_xlarge_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", num_classes=80),
)
def _is_contiguous(tensor: torch.Tensor) -> bool:
"""Check if the tensor is continguous for torch jit script purpose"""
# jit is oh so lovely :/
# if torch.jit.is_tracing():
# return True
if torch.jit.is_scripting():
return tensor.is_contiguous()
return tensor.is_contiguous(memory_format=torch.contiguous_format)
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims"""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):
"""Initialize the ConvMlp Class.
Args:
in_features: number of input features
hidden_feautres: number of hidden features
out_features: number of output features
act_layer: activation layer class to be used
norm_layer: normalization layer class to be used
drop: dropout probability
"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""Forward function"""
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
@register_notrace_module
class LayerNorm2d(nn.LayerNorm):
""" LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W)."""
def __init__(self, normalized_shape, eps=1e-6):
"""Initialize the Layernorm2d class.
Args:
normalized_shape: shape to be normalized to
eps: epsilon value for numerically stability
"""
super().__init__(normalized_shape, eps=eps)
def forward(self, x) -> torch.Tensor:
"""Forward function."""
if _is_contiguous(x):
return F.layer_norm(
x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2)
s, u = torch.var_mean(x, dim=1, keepdim=True)
x = (x - u) * torch.rsqrt(s + self.eps)
x = x * self.weight[:, None, None] + self.bias[:, None, None]
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate
choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear
is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW.
"""
def __init__(self, dim, drop_path=0., ls_init_value=1e-6, conv_mlp=True, mlp_ratio=4, norm_layer=None):
"""Initialize ConvNext Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
super().__init__()
if not norm_layer:
norm_layer = partial(LayerNorm2d, eps=1e-6) if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
mlp_layer = ConvMlp if conv_mlp else Mlp
self.use_conv_mlp = conv_mlp
self.conv_dw = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = norm_layer(dim)
self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=nn.GELU)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
"""Forward function."""
shortcut = x
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 3, 1, 2)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + shortcut
return x
class ConvNeXtStage(nn.Module):
"""ConvNeXt Stage."""
def __init__(
self, in_chs, out_chs, stride=2, depth=2, dp_rates=None, ls_init_value=1.0, conv_mlp=True,
norm_layer=None, cl_norm_layer=None, no_downsample=False):
"""Initialize ConvNext Stage.
Args:
in_chs (int): Number of input channels.
out_chs (int): Number of output channels.
"""
super().__init__()
if in_chs != out_chs or stride > 1:
self.downsample = nn.Sequential(
norm_layer(in_chs),
nn.Conv2d(in_chs, out_chs, kernel_size=stride, stride=stride if not no_downsample else 1),
)
else:
self.downsample = nn.Identity()
dp_rates = dp_rates or [0.] * depth
self.blocks = nn.Sequential(*[ConvNeXtBlock(
dim=out_chs, drop_path=dp_rates[j], ls_init_value=ls_init_value, conv_mlp=conv_mlp,
norm_layer=norm_layer if conv_mlp else cl_norm_layer)
for j in range(depth)]
)
def forward(self, x):
"""Forward function."""
x = self.downsample(x)
x = self.blocks(x)
return x
class ConvNeXt(nn.Module):
"""A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf"""
def __init__(
self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, patch_size=4,
depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), ls_init_value=1e-6, conv_mlp=True, use_head=True,
head_init_scale=1., head_norm_first=False, norm_layer=None, drop_rate=0., drop_path_rate=0.,
remove_last_downsample=False
):
""" Initialize the ConvNext Class
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (tuple(int)): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_rate (float): Head dropout rate
drop_path_rate (float): Stochastic depth rate. Default: 0.
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
super().__init__()
assert output_stride == 32
if norm_layer is None:
norm_layer = partial(LayerNorm2d, eps=1e-6)
cl_norm_layer = norm_layer if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
else:
assert conv_mlp,\
'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input'
cl_norm_layer = norm_layer
self.num_classes = num_classes
self.drop_rate = drop_rate
self.feature_info = []
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size),
norm_layer(dims[0])
)
self.stages = nn.Sequential()
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
curr_stride = patch_size
prev_chs = dims[0]
stages = []
# 4 feature resolution stages, each consisting of multiple residual blocks
for i in range(len(depths)):
stride = 2 if i > 0 else 1
# FIXME support dilation / output_stride
curr_stride *= stride
out_chs = dims[i]
no_downsample = remove_last_downsample and (i == len(depths) - 1)
stages.append(ConvNeXtStage(
prev_chs, out_chs, stride=stride,
depth=depths[i], dp_rates=dp_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp,
norm_layer=norm_layer, cl_norm_layer=cl_norm_layer, no_downsample=no_downsample)
)
prev_chs = out_chs
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = prev_chs
if head_norm_first:
# norm -> global pool -> fc ordering, like most other nets (not compat with FB weights)
self.norm_pre = norm_layer(self.num_features) # final norm layer, before pooling
if use_head:
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
else:
# pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights)
self.norm_pre = nn.Identity()
if use_head:
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', norm_layer(self.num_features)),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
def get_classifier(self):
"""Returns classifier of ConvNeXt"""
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool='avg'):
"""Redefine the classification head"""
if isinstance(self.head, ClassifierHead):
# norm -> global pool -> fc
self.head = ClassifierHead(
self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
else:
# pool -> norm -> fc
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', self.head.norm),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
def forward_features(self, x, return_feat=False):
"""Extract features"""
x = self.stem(x)
out_list = []
for i in range(len(self.stages)):
x = self.stages[i](x)
out_list.append(x)
x = self.norm_pre(x)
return x, out_list if return_feat else x
def forward(self, x):
"""Forward function"""
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
"""Initialize weights"""
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
nn.init.constant_(module.bias, 0)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap FB checkpoints -> timm """
if 'model' in state_dict:
state_dict = state_dict['model']
out_dict = {}
import re
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k)
k = k.replace('dwconv', 'conv_dw')
k = k.replace('pwconv', 'mlp.fc')
k = k.replace('head.', 'head.fc.')
if k in model.state_dict().keys():
if k.startswith('norm.'):
k = k.replace('norm', 'head.norm')
if v.ndim == 2 and 'head' not in k:
model_shape = model.state_dict()[k].shape
v = v.reshape(model_shape)
out_dict[k] = v
return out_dict
def _create_hybrid_backbone(variant='convnext_base_in22k', pretrained=False, **kwargs):
"""Create ConvNeXt hybrid backbone for FAN"""
model = build_model_with_cfg(
ConvNeXt, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/backbone/convnext_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
#
# Modified by: Daquan Zhou
# --------------------------------------------------------
import math
from copy import deepcopy
from typing import Optional
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.fx_features import register_notrace_function
from timm.models.helpers import build_model_with_cfg
from timm.models.layers import PatchEmbed, Mlp
from timm.models.layers import _assert
from timm.models.vision_transformer import checkpoint_filter_fn, init_weights_vit_timm
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
def overlay_external_default_cfg(default_cfg, kwargs):
""" Overlay 'external_default_cfg' in kwargs on top of default_cfg arg."""
external_default_cfg = kwargs.pop('external_default_cfg', None)
if external_default_cfg:
default_cfg.pop('url', None) # url should come from external cfg
default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg
default_cfg.update(external_default_cfg)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (my experiments)
'swin_base_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_base_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',
),
'swin_large_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_large_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',
),
'swin_small_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',
),
'swin_tiny_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',
),
'swin_base_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_base_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_large_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_large_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
num_classes=21841),
}
def window_partition(x, window_size: int):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: int, H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
"""Initialize WindowAttention class"""
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask: Optional[torch.Tensor] = None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper.
Based on the official XCiT code
- https://github.com/facebookresearch/xcit/blob/master/xcit.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
"""Initialize PositionalEncodingFourier class"""
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
self.eps = 1e-6
def forward(self, B: int, H: int, W: int):
"""Forward function"""
device = self.token_projection.weight.device
y_embed = torch.arange(1, H + 1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W)
x_embed = torch.arange(1, W + 1, dtype=torch.float32, device=device).repeat(1, H, 1)
y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos.repeat(B, 1, 1, 1) # (B, C, H, W)
class FANMlp(nn.Module):
"""FANMlp"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False):
"""Initialize FANMlp"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.linear = linear
if self.linear:
self.relu = nn.ReLU(inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward function"""
x = self.fc1(x)
if self.linear:
x = self.relu(x)
x = self.drop(self.gamma * self.dwconv(x, H, W)) # + x
x = self.fc2(x)
x = self.drop(x)
return x
class DWConv(nn.Module):
"""Depth-wise convolution"""
def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3):
"""Initialize DWConv class"""
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(
in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features)
self.act = act_layer()
self.bn = nn.BatchNorm2d(in_features)
self.conv2 = torch.nn.Conv2d(
in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features)
def forward(self, x, H: int, W: int):
"""Forward function"""
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ChannelProcessing(nn.Module):
"""Channel Processing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
sr_ratio=1, linear=False, drop_path=0.,
mlp_hidden_dim=None, act_layer=nn.GELU, drop=0., norm_layer=nn.LayerNorm, cha_sr_ratio=1, c_head_num=None):
"""Initialize ChannelProcessing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
num_heads = c_head_num or num_heads
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.cha_sr_ratio = cha_sr_ratio if num_heads > 1 else 1
# config of mlp for v processing
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp_v = FANMlp(in_features=dim // self.cha_sr_ratio, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, linear=linear)
self.norm_v = norm_layer(dim // self.cha_sr_ratio)
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.linear = linear
self.sr_ratio = sr_ratio
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def _gen_attn(self, q, k):
"""Returns attention"""
q = q.softmax(-2).transpose(-1, -2)
_, _, N, _ = k.shape
k = torch.nn.functional.adaptive_avg_pool2d(k.softmax(-2), (N, 1))
attn = torch.nn.functional.sigmoid(q @ k)
return attn * self.temperature
def forward(self, x, H, W, atten=None):
"""Forward functions """
B, N, C = x.shape
v = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = self._gen_attn(q, k)
attn = self.attn_drop(attn)
Bv, Hd, Nv, Cv = v.shape
v = self.norm_v(self.mlp_v(v.transpose(1, 2).reshape(Bv, Nv, Hd * Cv), H, W)).reshape(Bv, Nv, Hd, Cv).transpose(1, 2)
repeat_time = N // attn.shape[-1]
attn = attn.repeat_interleave(repeat_time, dim=-1) if attn.shape[-1] > 1 else attn
x = (attn * v.transpose(-1, -2)).permute(0, 3, 1, 2).reshape(B, N, C)
return x
@torch.jit.ignore
def no_weight_decay(self):
"""layers to ignore for weight decay"""
return {'temperature'}
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block."""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., mlp_type=None,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
"""Initialize SwinTransformerBlock class
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp_type = mlp_type
if mlp_type == 'Mlp':
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
else:
self.mlp = ChannelProcessing(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
drop_path=drop_path, mlp_hidden_dim=mlp_hidden_dim)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
"""Forward function"""
H, W = self.input_resolution
B, L, C = x.shape
_assert(L == H * W, "input feature has wrong size")
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
if self.mlp_type == 'Mlp':
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class PatchMerging(nn.Module):
r""" Patch Merging Layer."""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
"""Initialize PatchMerging class
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""Forward function
Args:
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
_assert(L == H * W, "input feature has wrong size")
_assert(H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even.")
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
"""exptra representation"""
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
"""returns FLOPs"""
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage."""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., mlp_type=None,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
"""Initialize BasicLayer
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, mlp_type=mlp_type,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
"""Forward function"""
for blk in self.blocks:
if not torch.jit.is_scripting() and self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
"""exptra representation"""
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, weight_init='', mlp_type='Mlp', **kwargs):
"""Initialize SwinTransformer class
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
self.patch_grid = self.patch_embed.grid_size
# absolute position embedding
if self.ape:
# self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
# trunc_normal_(self.absolute_pos_embed, std=.02)
self.absolute_pos_embed = PositionalEncodingFourier(dim=embed_dim)
else:
self.absolute_pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
layers = []
for i_layer in range(self.num_layers):
layers += [BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_type=mlp_type[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
if weight_init.startswith('jax'):
for n, m in self.named_modules():
init_weights_vit_timm(m, n, head_bias=head_bias, jax_impl=True) # pylint: disable=E1123
else:
self.apply(init_weights_vit_timm)
@torch.jit.ignore
def no_weight_decay(self):
"""layers to ignore for weight decay"""
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
"""keywords to ignore for weight decay"""
return {'relative_position_bias_table'}
def get_classifier(self):
"""Returns classifier"""
return self.head
def reset_classifier(self, num_classes, global_pool=''):
"""Redefine classifier of FAN"""
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
"""Extract features"""
x = self.patch_embed(x)
B, N, _ = x.shape
H = W = math.sqrt(N)
if self.absolute_pos_embed is not None:
# import pdb; pdb.set_trace()
x = x + self.absolute_pos_embed(B, H, W).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = self.pos_drop(x)
x = self.layers(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
"""Forward functions"""
x = self.forward_features(x)
x = self.head(x)
return x
def _create_fan_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
"""Create FAN Swin Transformer backbone"""
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs.pop('img_size', default_img_size)
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
SwinTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/backbone/swin_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
"""FAN backbone"""
import math
import warnings
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg
from timm.models.vision_transformer import Mlp as MlpOri
from timm.models.registry import register_model
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
# from timm.models.cait import ClassAttn
from nvidia_tao_pytorch.cv.backbone.convnext_utils import _create_hybrid_backbone
from nvidia_tao_pytorch.cv.backbone.swin_utils import _create_fan_swin_transformer
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# Patch size 16
'fan_tiny_8_p16_224': _cfg(),
'fan_tiny_12_p16_224': _cfg(),
'fan_small_12_p16_224': _cfg(),
'fan_base_18_p16_224': _cfg(),
'fan_large_24_p16_224': _cfg(),
'fan_xlarge_24_p16_224': _cfg(),
}
class ClassAttn(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
"""Initialize ClassAttn class"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fast_attn = hasattr(torch._C._nn, '_scaled_dot_product_attention') # pylint:disable=I1101
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
"""Taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
with slight modifications to do CA
"""
B, N, C = x.shape
q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if torch.onnx.is_in_onnx_export() or not self.fast_attn:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C)
else:
# Since Torch 1.14, scaled_dot_product_attention has been optimized for performance
x, _ = F._scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p,
)
x_cls = x.transpose(1, 2).reshape(B, 1, C)
x_cls = self.proj(x_cls)
x_cls = self.proj_drop(x_cls)
return x_cls
class PositionalEncodingFourier(nn.Module):
"""Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper."""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
"""Initialize PositionalEncodingFourier class"""
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
self.eps = 1e-6
def forward(self, B: int, H: int, W: int, fp32=True):
"""Forward function"""
device = self.token_projection.weight.device
y_embed = torch.arange(1, H + 1, dtype=torch.float32 if fp32 else torch.float16, device=device).unsqueeze(1).repeat(1, 1, W)
x_embed = torch.arange(1, W + 1, dtype=torch.float32 if fp32 else torch.float16, device=device).repeat(1, H, 1)
y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32 if fp32 else torch.float16, device=device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos.repeat(B, 1, 1, 1) # (B, C, H, W)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution + batch norm"""
return torch.nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_planes)
)
def sigmoid(x, inplace=False):
"""Sigmoid function"""
return x.sigmoid_() if inplace else x.sigmoid()
def make_divisible(v, divisor=8, min_value=None):
"""Make tensor divisible"""
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeExcite(nn.Module):
"""SqueezeExcite"""
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_):
"""Initialize SqueezeExcite class"""
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
"""Forward function"""
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class SEMlp(nn.Module):
"""SEMlp"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False, use_se=True):
"""Initialize SEMlP Class"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
# self.dwconv = DWConv(hidden_features)
self.dwconv = DWConv(hidden_features)
self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.linear = linear
if self.linear:
self.relu = nn.ReLU(inplace=True)
self.se = SqueezeExcite(out_features, se_ratio=0.25) if use_se else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward function"""
B, N, C = x.shape
x = self.fc1(x)
if self.linear:
x = self.relu(x)
x = self.drop(self.gamma * self.dwconv(x, H, W)) + x
x = self.fc2(x)
x = self.drop(x)
x = self.se(x.permute(0, 2, 1).reshape(B, C, H, W)).reshape(B, C, N).permute(0, 2, 1)
return x, H, W
class Mlp(nn.Module):
"""Mlp class used for FAN"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
"""Initialize Mlp class"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward function"""
x = self.fc1(x)
x = self.drop(self.gamma * self.dwconv(x, H, W)) + x
x = self.fc2(x)
x = self.drop(x)
return x
class ConvPatchEmbed(nn.Module):
"""Image to Patch Embedding using multiple convolutional layers"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU):
"""Initialize ConvPatchEmbed class"""
super().__init__()
img_size = to_2tuple(img_size)
num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if patch_size == 16:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 8, 2),
act_layer(),
conv3x3(embed_dim // 8, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
act_layer(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size == 8:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
act_layer(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size == 4:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 1, 2),
)
else:
raise ('For convolutional projection, patch size has to be in [8, 16]')
def forward(self, x):
"""Forward function"""
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2) # (B, N, C)
return x, (Hp, Wp)
class DWConv(nn.Module):
"""Depth-wise convolution"""
def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3):
"""Initialize DWConv class"""
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(
in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features)
self.act = act_layer()
self.bn = nn.BatchNorm2d(in_features)
self.conv2 = torch.nn.Conv2d(
in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features)
def forward(self, x, H: int, W: int):
"""Forward function"""
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ClassAttentionBlock(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False):
"""Initialize ClassAttentionBlock class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttn(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = MlpOri(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
if eta is not None: # LayerScale Initialization (no layerscale when None)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
else:
self.gamma1, self.gamma2 = 1.0, 1.0
# See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721
self.tokens_norm = tokens_norm
def forward(self, x, return_attention=False):
"""Forward function"""
x_norm1 = self.norm1(x)
if return_attention:
x1, attn = self.attn(x_norm1, use_attn=return_attention)
else:
x1 = self.attn(x_norm1)
x_attn = torch.cat([x1, x_norm1[:, 1:]], dim=1)
x = x + self.drop_path(self.gamma1 * x_attn)
if self.tokens_norm:
x = self.norm2(x)
else:
x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1)
x_res = x
cls_token = x[:, 0:1]
cls_token = self.gamma2 * self.mlp(cls_token)
x = torch.cat([cls_token, x[:, 1:]], dim=1)
x = x_res + self.drop_path(x)
if return_attention:
return attn
return x
class TokenMixing(nn.Module):
"""Token Mixing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
sr_ratio=1, linear=False, share_atten=False, drop_path=0., emlp=False, sharpen_attn=False,
mlp_hidden_dim=None, act_layer=nn.GELU, drop=None, norm_layer=nn.LayerNorm):
"""Initialize TokenMixing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.share_atten = share_atten
self.emlp = emlp
cha_sr = 1
self.q = nn.Linear(dim, dim // cha_sr, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2 // cha_sr, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.linear = linear
self.sr_ratio = sr_ratio
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W, atten=None, return_attention=False):
"""Forward function"""
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q * self.scale @ k.transpose(-2, -1)) # * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn @ v
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, patch_size=2, feature_size=None, in_chans=3, embed_dim=384):
"""Initialize HybridEmbedding class"""
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# NOTE Most reliable way of determining output dims is to run forward pass
training = backbone.training
if training:
backbone.eval()
o = self.backbone.forward_features(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0
self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
"""Forward function"""
x = self.backbone.forward_features(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
_, _, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x, (H // self.patch_size[0], W // self.patch_size[1])
class ChannelProcessing(nn.Module):
"""Channel Processing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., drop_path=0.,
mlp_hidden_dim=None, act_layer=nn.GELU, drop=None, norm_layer=nn.LayerNorm, cha_sr_ratio=1, c_head_num=None):
"""Initialize ChannelProcessing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
num_heads = c_head_num or num_heads
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.cha_sr_ratio = cha_sr_ratio if num_heads > 1 else 1
# config of mlp for v processing
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp_v = Mlp(in_features=dim // self.cha_sr_ratio, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.norm_v = norm_layer(dim // self.cha_sr_ratio)
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def _gen_attn(self, q, k):
"""Returns attention"""
_, _, N, _ = k.shape
if torch.onnx.is_in_onnx_export():
# If softmax dim is not the last dimension, then PyTorch decompose the softmax ops into
# smaller ops like ReduceMax, ReduceSum, Sub, and Div.
# As a result, ONNX export fails for opset_version >= 12.
# Here, we rearrange the transpose so that softmax is done over the last dimension.
q = q.transpose(-1, -2).softmax(-1)
k = k.transpose(-1, -2).softmax(-1)
warnings.warn("Replacing default adatpive_avg_pool2d to custom implementation for ONNX export")
# adaptive_avg_pool2d is not supported for torch to onnx export
k = adaptive_avg_pool(k.transpose(-1, -2), (N, 1))
else:
q = q.softmax(-2).transpose(-1, -2)
k = torch.nn.functional.adaptive_avg_pool2d(k.softmax(-2), (N, 1))
attn = torch.nn.functional.sigmoid(q @ k)
return attn * self.temperature
def forward(self, x, H, W, atten=None):
"""Forward functions """
B, N, C = x.shape
v = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = self._gen_attn(q, k)
attn = self.attn_drop(attn)
Bv, Hd, Nv, Cv = v.shape
v = self.norm_v(self.mlp_v(v.transpose(1, 2).reshape(Bv, Nv, Hd * Cv), H, W)).reshape(Bv, Nv, Hd, Cv).transpose(1, 2)
repeat_time = N // attn.shape[-1]
attn = attn.repeat_interleave(repeat_time, dim=-1) if attn.shape[-1] > 1 else attn
x = (attn * v.transpose(-1, -2)).permute(0, 3, 1, 2).reshape(B, N, C)
return x, (attn * v.transpose(-1, -2)).transpose(-1, -2) # attn
@torch.jit.ignore
def no_weight_decay(self):
"""Ignore during weight decay"""
return {'temperature'}
def adaptive_avg_pool(x, size):
"""Function to convert to ONNX exportable F.adaptive_avg_pool2d
Args:
x: Input tensor
size: Output Dimension. Can be either an integer or a tuple
"""
inp_size = x.size()
kernel_width, kernel_height = inp_size[2], inp_size[3]
if size is not None:
if isinstance(size, int):
kernel_width = math.ceil(inp_size[2] / size)
kernel_height = math.ceil(inp_size[3] / size)
elif isinstance(size, (list, tuple)):
assert len(size) == 2
kernel_width = math.ceil(inp_size[2] / size[0])
kernel_height = math.ceil(inp_size[3] / size[1])
if torch.is_tensor(kernel_width):
kernel_width = kernel_width.item()
kernel_height = kernel_height.item()
return F.avg_pool2d(
input=x, ceil_mode=False, kernel_size=(kernel_width, kernel_height)
)
class FANBlock_SE(nn.Module):
"""FAN SE block from https://arxiv.org/abs/2204.12451"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., sharpen_attn=False, use_se=False,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., sr_ratio=1., qk_scale=None, linear=False, downsample=None, c_head_num=None):
"""Initialize FANBlock_SE class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = TokenMixing(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop, drop=drop, drop_path=drop_path, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = SEMlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
def forward(self, x, H: int, W: int, attn=None):
"""Forward function"""
x_new = self.attn(self.norm1(x), H, W)
x = x + self.drop_path(self.gamma1 * x_new)
x_new, H, W = self.mlp(self.norm2(x), H, W)
x = x + self.drop_path(self.gamma2 * x_new)
return x, H, W
class FANBlock(nn.Module):
"""FAN block from https://arxiv.org/abs/2204.12451"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., sharpen_attn=False,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., sr_ratio=1., downsample=None, c_head_num=None):
"""Initialize FANBlock class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = TokenMixing(dim, num_heads=num_heads, qkv_bias=qkv_bias, mlp_hidden_dim=int(dim * mlp_ratio), sharpen_attn=sharpen_attn,
attn_drop=attn_drop, proj_drop=drop, drop=drop, drop_path=drop_path, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ChannelProcessing(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,
drop_path=drop_path, drop=drop, mlp_hidden_dim=int(dim * mlp_ratio), c_head_num=c_head_num)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.downsample = downsample
self.H = None
self.W = None
def forward(self, x, attn=None, return_attention=False):
"""Forward function"""
H, W = self.H, self.W
x_new, attn_s = self.attn(self.norm1(x), H, W)
x = x + self.drop_path(self.gamma1 * x_new)
x_new, _ = self.mlp(self.norm2(x), H, W, atten=attn)
x = x + self.drop_path(self.gamma2 * x_new)
if return_attention:
return x, attn_s
if self.downsample is not None:
x, H, W = self.downsample(x, H, W)
self.H, self.W = H, W
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding"""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
"""Overlap PatchEmbed"""
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward functions """
B, _, C = x.shape
x = x.transpose(-1, -2).reshape(B, C, H, W)
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
class FAN(nn.Module):
"""
Based on timm code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, sharpen_attn=False, channel_dims=None,
num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., sr_ratio=None, backbone=None, use_checkpoint=False,
act_layer=None, norm_layer=None, se_mlp=False, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False, c_head_num=None, hybrid_patch_size=2, head_init_scale=1.0):
"""Initialize FAN class"""
super().__init__()
img_size = to_2tuple(img_size)
self.use_checkpoint = use_checkpoint
assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \
'`patch_size` should divide image dimensions evenly'
self.num_classes = num_classes
num_heads = [num_heads] * depth if not isinstance(num_heads, list) else num_heads
channel_dims = [embed_dim] * depth if channel_dims is None else channel_dims
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
if backbone is None:
self.patch_embed = ConvPatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer)
else:
self.patch_embed = HybridEmbed(backbone=backbone, patch_size=hybrid_patch_size, embed_dim=embed_dim)
self.use_pos_embed = use_pos_embed
if use_pos_embed:
self.pos_embed = PositionalEncodingFourier(dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if se_mlp:
build_block = FANBlock_SE
else:
build_block = FANBlock
self.blocks = nn.ModuleList([])
for i in range(depth):
if i < depth - 1 and channel_dims[i] != channel_dims[i + 1]:
downsample = OverlapPatchEmbed(img_size=img_size,
patch_size=3,
stride=2,
in_chans=channel_dims[i],
embed_dim=channel_dims[i + 1])
else:
downsample = None
self.blocks.append(
build_block(
dim=channel_dims[i], num_heads=num_heads[i], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, sr_ratio=sr_ratio[i],
attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta,
downsample=downsample, c_head_num=c_head_num[i] if c_head_num is not None else None))
self.num_features = self.embed_dim = channel_dims[i]
self.cls_token = nn.Parameter(torch.zeros(1, 1, channel_dims[i]))
self.cls_attn_blocks = nn.ModuleList([
ClassAttentionBlock(
dim=channel_dims[-1], num_heads=num_heads[-1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm)
for _ in range(cls_attn_layers)])
# Classifier head
self.norm = norm_layer(channel_dims[i])
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Init weights
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
"""layers to ignore for weight decay"""
return {'pos_embed', 'cls_token'}
def get_classifier(self):
"""Returns classifier"""
return self.head
def reset_classifier(self, num_classes, global_pool=''):
"""Redefine classifier of FAN"""
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
"""Extract features"""
B = x.shape[0]
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
H, W = Hp, Wp
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
H, W = blk.H, blk.W
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x)
x = self.norm(x)[:, 0]
return x
def base_forward(self, x):
"""Base forward function"""
B = x.shape[0]
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
H, W = Hp, Wp
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
H, W = blk.H, blk.W
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x)
x = self.norm(x)
return x
def forward(self, x):
"""Forward functions"""
x = self.forward_features(x)
x = self.head(x)
return x
def get_spatial_feat(self, x):
"""Turn token feature into spatial feature.
Args:
x (torch.Tensor): token feature in [B, 1024+1, 768]
Return:
x (torch.Tensor): feature map in (B, 768, H, W)
"""
b, n, c = x.shape
h, w = int((n - 1 + 1e-6) ** 0.5), int((n - 1 + 1e-6) ** 0.5)
x = x[:, 1:].transpose(2, 1).reshape(b, c, h, w)
return x
def get_last_selfattention(self, x, use_cls_attn=False, layer_idx=11):
"""Returns last self-attention"""
B = x.shape[0]
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
return_idx = layer_idx or len(self.blocks) - 1
for i, blk in enumerate(self.blocks):
if i == return_idx:
x, attn = blk(x, Hp, Wp, return_attention=True)
else:
x, Hp, Wp = blk(x, Hp, Wp)
attn = None
if use_cls_attn:
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for i, blk in enumerate(self.cls_attn_blocks):
if i < len(self.cls_attn_blocks) - 1:
x = blk(x)
else:
attn = blk(x, return_attention=True)
return attn
return attn
def checkpoint_filter_fn(state_dict, model):
"""Filter loaded checkpoints"""
if 'model' in state_dict:
state_dict = state_dict['model']
# For consistency with timm's transformer models while being compatible with official weights source we rename
# pos_embeder to pos_embed. Also account for use_pos_embed == False
use_pos_embed = getattr(model, 'pos_embed', None) is not None
pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')]
for k in pos_embed_keys:
if use_pos_embed:
state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k)
else:
del state_dict[k]
if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict():
num_ca_blocks = len(model.cls_attn_blocks)
for i in range(num_ca_blocks):
qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight')
qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1])
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j]
qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None)
if qkv_bias is not None:
qkv_bias = qkv_bias.reshape(3, -1)
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j]
return state_dict
def _create_fan(variant, pretrained=False, default_cfg=None, **kwargs):
"""Create FAN backbone"""
default_cfg = default_cfg or default_cfgs[variant]
model = build_model_with_cfg(
FAN, variant, pretrained, pretrained_cfg=default_cfg, pretrained_filter_fn=checkpoint_filter_fn, **kwargs)
return model
# FAN-ViT Models
@register_model
def fan_tiny_12_p16_224(pretrained=False, bn_tf=False, **kwargs):
"""FAN-ViT Tiny"""
depth = 12
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=192, depth=depth, num_heads=4, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_tiny_12_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
@register_model
def fan_small_12_p16_224_se_attn(pretrained=False, **kwargs):
"""FAN-ViT SE Small"""
depth = 12
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, se_mlp=True, **kwargs)
model = _create_fan('fan_small_12_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
@register_model
def fan_small_12_p16_224(pretrained=False, **kwargs):
"""FAN-ViT Small"""
depth = 12
sr_ratio = [1] * depth
model_kwargs = dict(
patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, **kwargs)
model = _create_fan('fan_small_12_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
@register_model
def fan_base_18_p16_224(pretrained=False, **kwargs):
"""FAN-ViT Base"""
depth = 18
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=448, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_base_18_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
@register_model
def fan_large_24_p16_224(pretrained=False, **kwargs):
"""FAN-ViT Large"""
depth = 24
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=480, depth=depth, num_heads=10, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_large_24_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
# FAN-Hybrid Models
# CNN backbones are based on ConvNeXt architecture with only first two stages for downsampling purpose
# This has been verified to be beneficial for downstream tasks
@register_model
def fan_tiny_8_p4_hybrid(pretrained=False, **kwargs):
"""FAN Hybrid Tiny"""
depth = 8
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2 + 1)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=192, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_tiny_8_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone, **model_kwargs)
return model
@register_model
def fan_small_12_p4_hybrid(pretrained=False, **kwargs):
"""FAN Hybrid Small"""
depth = 10
channel_dims = [384] * 10 + [384] * (depth - 10)
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_small_12_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone,
channel_dims=channel_dims, **model_kwargs)
return model
@register_model
def fan_base_16_p4_hybrid(pretrained=False, **kwargs):
"""FAN Hybrid Base"""
depth = 16
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=448, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_base_18_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone, **model_kwargs)
return model
@register_model
def fan_large_16_p4_hybrid(pretrained=False, **kwargs):
"""FAN Hybrid Large"""
depth = 22
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 5], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=480, depth=depth, num_heads=10, eta=1.0, tokens_norm=True, sharpen_attn=False, head_init_scale=0.001, **kwargs)
model = _create_fan('fan_large_24_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone, **model_kwargs)
return model
@register_model
def fan_Xlarge_16_p4_hybrid(pretrained=False, **kwargs):
"""For those who have enough GPUs, could try this...."""
depth = 23
stage_depth = 20
channel_dims = [528] * stage_depth + [768] * (depth - stage_depth)
num_heads = [11] * stage_depth + [16] * (depth - stage_depth)
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2 + 1)
model_args = dict(depths=[3, 7], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=channel_dims[0], depth=depth, num_heads=num_heads, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_xlarge_24_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone,
channel_dims=channel_dims, **model_kwargs)
return model
@register_model
def fan_swin_tiny_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-T @ 224x224, trained ImageNet-1k"""
mlp_type = ['FAN', 'FAN', 'FAN', 'Mlp']
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_fan_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, mlp_type=mlp_type, **model_kwargs)
# FAN-Swin Models
@register_model
def fan_swin_small_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k"""
mlp_type = ['FAN', 'FAN', 'FAN', 'Mlp']
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_fan_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, mlp_type=mlp_type, **model_kwargs)
@register_model
def fan_swin_base_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k"""
mlp_type = ['FAN', 'FAN', 'FAN', 'Mlp']
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_fan_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, mlp_type=mlp_type, **model_kwargs)
@register_model
def fan_swin_large_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k"""
mlp_type = ['FAN', 'FAN', 'FAN', 'Mlp']
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_fan_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, mlp_type=mlp_type, **model_kwargs)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/backbone/fan.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modified by: Shiyi Lan
# Mostly copy-paste from timm library.
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""Vision Transformer Backbone"""
import warnings
import math
import torch
import torch.nn as nn
import pytorch_lightning as pl
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
"""Cut & paste from PyTorch official master until it's in a few official releases - RW
Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
"""
def norm_cdf(x):
"""Computes standard normal cumulative distribution function"""
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std) # noqa: E741
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
"""type: (Tensor, float, float, float, float) -> Tensor"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""drop path forward"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
"""Initialize DropPath class"""
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
"""Forward function"""
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
"""Mlp"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
"""Initialize Mlp class"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""Forward function"""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvStem(nn.Module):
"""ConvStem, from Early Convolutions Help Transformers See Better, Tete et
al.
https://arxiv.org/abs/2106.14881
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=4,
norm_layer=None,
):
"""Initialize ConvStem class"""
super().__init__()
assert embed_dim % 8 == 0, "Embed dimension must be divisible by 8 for ConvStem"
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size // patch_size, img_size // patch_size)
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.depth = depth
# build stem, similar to the design in https://arxiv.org/abs/2106.14881
stem = []
input_dim, output_dim = in_chans, embed_dim // (2 ** (depth - 1))
for idx in range(depth):
stage_list = [
nn.Conv2d(
input_dim,
output_dim,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
nn.GroupNorm(1, output_dim, eps=1e-6),
nn.GELU(),
]
if idx == depth - 1:
stage_list.append(nn.Conv2d(output_dim, embed_dim, kernel_size=1))
stage = nn.Sequential(*stage_list)
input_dim = output_dim
output_dim *= 2
stem.append(stage)
self.proj = nn.ModuleList(stem)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
"""Forward function"""
for _, stage in enumerate(self.proj):
x = stage(x)
B, C, H, W = x.shape # B, 768, 32, 32
x = x.reshape(B, C, H * W).transpose(2, 1)
return x
class Attention(nn.Module):
"""Attention"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
"""Initialize Attention class"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
"""Forward function"""
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
"""ViT Block"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
"""Initialize Block class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
"""Forward function"""
y, attn = self.attn(self.norm1(x))
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
if return_attention:
return x, attn
return x
class PatchEmbed(nn.Module):
"""Image to Patch Embedding."""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
"""Initialize PatchEmbed class"""
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
"""Forward function"""
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(pl.LightningModule):
"""Vision Transformer."""
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, frozen_stages=-1, cfg=None, **kargs):
"""Initialize VisionTransformer class"""
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_size = patch_size
self.patch_embed = ConvStem(img_size=img_size[0], patch_size=16,
in_chans=in_chans, embed_dim=embed_dim)
num_patches = (img_size[0] // patch_size) ** 2
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.frozen_stages = frozen_stages
if frozen_stages[0] == 0 and frozen_stages[1] > 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
for i in range(max(self.frozen_stages[0], 1), self.frozen_stages[1] + 1):
self.blocks[i - 1].eval()
for param in self.blocks[i - 1].parameters():
param.requires_grad = False
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
"""Interpolate Positional Encoding based on given resolution"""
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
"""Prepare tokens"""
B, _, w, h = x.shape
# [B, 1024, embed_dim]
x = self.patch_embed(x) # patch linear embedding
# cls_token: [1, 1, embed_dim]
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def base_forward(self, x):
"""Base forward pass.
Output size: [B, 1024+1, embed_dim], where 1024=input_h/4 * input_w/4
"""
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
def decoder_forward(self, x):
"""Decoder forward"""
x = self.decoder_embed(x)
for blk in self.decoder_blocks:
x = blk(x)
x = self.decoder_pred(x)
return x[:, 1:]
def decode_mask(self, x):
"""decode mask"""
b, n, c = x.shape
h1, w1 = int((n + 1e-6) ** 0.5), int((n + 1e-6) ** 0.5)
h0, w0 = int((c + 1e-6) ** 0.5), int((c + 1e-6) ** 0.5)
x = torch.einsum('nchwpq->nhpwqc', x.reshape(b, h1, w1, h0, w0, 1)).reshape(b, 1, h1 * h0, w1 * w0)
return x
def get_spatial_feat(self, x):
"""Turn token feature into spatial feature.
Args:
x (torch.Tensor): token feature in [B, 1024+1, 768]
Return:
x (torch.Tensor): feature map in (B, 768, H, W)
"""
b, n, c = x.shape
h, w = int((n - 1 + 1e-6) ** 0.5), int((n - 1 + 1e-6) ** 0.5)
x = x[:, 1:].transpose(2, 1).reshape(b, c, h, w)
return x
def forward(self, x):
"""Forward function"""
return self.base_forward(x)
def summary_feat(self, x):
"""summary feat"""
return x[:, 0]
def get_selfattention(self, x, idx):
"""get self attention"""
attentions = []
if idx < 0:
idx = len(self.blocks) + idx
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < idx:
x = blk(x)
else:
# return attention of the last block
x, attention = blk(x, return_attention=True)
attentions.append(attention)
attentions = torch.cat(attentions, 0)
return attentions
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/backbone/vision_transformer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CV backbone module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/backbone/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GCViT Backbone Module """
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, DropPath
def _to_channel_last(x):
"""
Args:
x: (B, C, H, W)
Returns:
x: (B, H, W, C)
"""
return x.permute(0, 2, 3, 1)
def _to_channel_first(x):
"""
Args:
x: (B, H, W, C)
Returns:
x: (B, C, H, W)
"""
return x.permute(0, 3, 1, 2)
def window_partition(x, window_size, h_w, w_w):
"""
Args:
x: (B, H, W, C)
window_size: window size
Returns:
local window features (num_windows*B, window_size, window_size, C)
"""
B, _, _, C = x.shape
x = x.view(B, h_w, window_size, w_w, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W, h_w, w_w):
"""
Args:
windows: local window features (num_windows*B, window_size, window_size, C)
window_size: Window size
H: Height of image
W: Width of image
Returns:
x: (B, H, W, C)
"""
# Casting to int leads to error
B = windows.shape[0] // (H * W // window_size // window_size)
x = windows.view(B, h_w, w_w, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class Mlp(nn.Module):
"""
Multi-Layer Perceptron (MLP) block
"""
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.):
"""
Args:
in_features: input features dimension.
hidden_features: hidden features dimension.
out_features: output features dimension.
act_layer: activation function.
drop: dropout rate.
"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""Forward function."""
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class SqueezeExcitation(nn.Module):
"""
Squeeze and excitation block
"""
def __init__(self,
inp,
oup,
expansion=0.25):
"""
Args:
inp: input features dimension.
oup: output features dimension.
expansion: expansion ratio.
"""
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(oup, int(inp * expansion), bias=False),
nn.GELU(),
nn.Linear(int(inp * expansion), oup, bias=False),
nn.Sigmoid()
)
def forward(self, x):
"""Forward function."""
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class ReduceSize(nn.Module):
"""
Down-sampling block based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
norm_layer=nn.LayerNorm,
keep_dim=False):
"""
Args:
dim: feature size dimension.
norm_layer: normalization layer.
keep_dim: bool argument for maintaining the resolution.
"""
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1,
groups=dim, bias=False),
nn.GELU(),
SqueezeExcitation(dim, dim),
nn.Conv2d(dim, dim, 1, 1, 0, bias=False),
)
if keep_dim:
dim_out = dim
else:
dim_out = 2 * dim
self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False)
self.norm2 = norm_layer(dim_out)
self.norm1 = norm_layer(dim)
def forward(self, x):
"""Forward function."""
x = x.contiguous()
x = self.norm1(x)
x = _to_channel_first(x)
x = x + self.conv(x)
x = self.reduction(x)
x = _to_channel_last(x)
x = self.norm2(x)
return x
class PatchEmbed(nn.Module):
"""
Patch embedding block based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self, in_chans=3, dim=96):
"""
Args:
in_chans: number of input channels.
dim: feature size dimension.
"""
super().__init__()
self.proj = nn.Conv2d(in_chans, dim, 3, 2, 1)
self.conv_down = ReduceSize(dim=dim, keep_dim=True)
def forward(self, x):
"""Forward function."""
x = self.proj(x)
x = _to_channel_last(x)
x = self.conv_down(x)
return x
class FeatExtract(nn.Module):
"""
Feature extraction block based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self, dim, keep_dim=False):
"""
Args:
dim: feature size dimension.
keep_dim: bool argument for maintaining the resolution.
"""
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1,
groups=dim, bias=False),
nn.GELU(),
SqueezeExcitation(dim, dim),
nn.Conv2d(dim, dim, 1, 1, 0, bias=False),
)
if not keep_dim:
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.keep_dim = keep_dim
def forward(self, x):
"""Forward function."""
x = x.contiguous()
x = x + self.conv(x)
if not self.keep_dim:
x = self.pool(x)
return x
class WindowAttention(nn.Module):
"""Local window attention based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
"""
def __init__(self,
dim,
num_heads,
window_size,
qkv_bias=True,
qk_scale=None,
attn_drop=0.,
proj_drop=0.,
use_rel_pos_bias=True
):
"""
Args:
dim: feature size dimension.
num_heads: number of attention head.
window_size: window size.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
attn_drop: attention dropout rate.
proj_drop: output dropout rate.
use_rel_pos_bias: set bias for relative positional embedding
"""
super().__init__()
window_size = (window_size, window_size)
self.window_size = window_size
self.num_heads = num_heads
head_dim = torch.div(dim, num_heads, rounding_mode='floor')
self.fast_attn = hasattr(torch._C._nn, '_scaled_dot_product_attention') # pylint:disable=I1101
self.scale = qk_scale or head_dim ** -0.5
self.use_rel_pos_bias = use_rel_pos_bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, q_global):
"""Forward function."""
B_, N, C = x.shape
head_dim = torch.div(C, self.num_heads, rounding_mode='floor')
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
if torch.onnx.is_in_onnx_export() or not self.fast_attn or self.use_rel_pos_bias:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.use_rel_pos_bias:
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1],
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
else:
# Since Torch 1.14, scaled_dot_product_attention has been optimized for performance
x, _ = F._scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p,
)
x = x.transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class WindowAttentionGlobal(nn.Module):
"""Global window attention based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
num_heads,
window_size,
qkv_bias=True,
qk_scale=None,
attn_drop=0.,
proj_drop=0.,
use_rel_pos_bias=True
):
"""
Args:
dim: feature size dimension.
num_heads: number of attention head.
window_size: window size.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
attn_drop: attention dropout rate.
proj_drop: output dropout rate.
use_rel_pos_bias: set bias for relative positional embedding
"""
super().__init__()
window_size = (window_size, window_size)
self.window_size = window_size
self.num_heads = num_heads
head_dim = torch.div(dim, num_heads, rounding_mode='floor')
self.scale = qk_scale or head_dim ** -0.5
self.fast_attn = hasattr(torch._C._nn, '_scaled_dot_product_attention') # pylint:disable=I1101
self.use_rel_pos_bias = use_rel_pos_bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, q_global):
"""Forward function."""
B_, N, C = x.shape
B = q_global.shape[0]
head_dim = torch.div(C, self.num_heads, rounding_mode='floor')
B_dim = torch.div(B_, B, rounding_mode='floor')
kv = self.qkv(x).reshape(B_, N, 2, self.num_heads, head_dim).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
q_global = q_global.repeat(1, B_dim, 1, 1, 1)
q = q_global.reshape(B_, self.num_heads, N, head_dim)
if torch.onnx.is_in_onnx_export() or not self.fast_attn or self.use_rel_pos_bias:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.use_rel_pos_bias:
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1],
-1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
else:
# Since Torch 1.14, scaled_dot_product_attention has been optimized for performance
x, _ = F._scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p,
)
x = x.transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class GCViTBlock(nn.Module):
"""
GCViT block based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
input_resolution,
num_heads,
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
attention=WindowAttentionGlobal,
norm_layer=nn.LayerNorm,
layer_scale=None,
use_rel_pos_bias=True
):
"""
Args:
dim: feature size dimension.
input_resolution: input image resolution.
num_heads: number of attention head.
window_size: window size.
mlp_ratio: MLP ratio.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
drop: dropout rate.
attn_drop: attention dropout rate.
drop_path: drop path rate.
act_layer: activation function.
attention: attention block type.
norm_layer: normalization layer.
layer_scale: layer scaling coefficient.
use_rel_pos_bias: set bias for relative positional embedding
"""
super().__init__()
self.window_size = window_size
self.norm1 = norm_layer(dim)
self.attn = attention(dim,
num_heads=num_heads,
window_size=window_size,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
use_rel_pos_bias=use_rel_pos_bias
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.layer_scale = False
if layer_scale is not None and type(layer_scale) in [int, float]:
self.layer_scale = True
self.gamma1 = nn.Parameter(layer_scale * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(layer_scale * torch.ones(dim), requires_grad=True)
else:
self.gamma1 = 1.0
self.gamma2 = 1.0
inp_w = torch.div(input_resolution, window_size, rounding_mode='floor')
self.num_windows = int(inp_w * inp_w)
def forward(self, x, q_global):
"""Forward function."""
_, H, W, C = x.shape
shortcut = x
x = self.norm1(x)
h_w = torch.div(H, self.window_size, rounding_mode='floor')
w_w = torch.div(W, self.window_size, rounding_mode='floor')
x_windows = window_partition(x, self.window_size, h_w, w_w)
x_windows = x_windows.view(-1, self.window_size * self.window_size, C)
attn_windows = self.attn(x_windows, q_global)
x = window_reverse(attn_windows, self.window_size, H, W, h_w, w_w)
x = shortcut + self.drop_path(self.gamma1 * x)
x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x)))
return x
class GlobalQueryGen(nn.Module):
"""Global query generator based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
input_resolution,
image_resolution,
window_size,
num_heads):
"""
For instance, repeating log(56/7) = 3 blocks, with input window dimension 56 and output window dimension 7 at
down-sampling ratio 2. Please check Fig.5 of GC ViT paper for details.
Args:
dim: feature size dimension.
input_resolution: input image resolution.
window_size: window size.
num_heads: number of heads.
"""
super().__init__()
if input_resolution == image_resolution // 4:
self.to_q_global = nn.Sequential(
FeatExtract(dim, keep_dim=False),
FeatExtract(dim, keep_dim=False),
FeatExtract(dim, keep_dim=False),
)
elif input_resolution == image_resolution // 8:
self.to_q_global = nn.Sequential(
FeatExtract(dim, keep_dim=False),
FeatExtract(dim, keep_dim=False),
)
elif input_resolution == image_resolution // 16:
if window_size == input_resolution:
self.to_q_global = nn.Sequential(
FeatExtract(dim, keep_dim=True)
)
else:
self.to_q_global = nn.Sequential(
FeatExtract(dim, keep_dim=False)
)
elif input_resolution == image_resolution // 32:
self.to_q_global = nn.Sequential(
FeatExtract(dim, keep_dim=True)
)
self.num_heads = num_heads
self.N = window_size * window_size
self.dim_head = torch.div(dim, self.num_heads, rounding_mode='floor')
self.window_size = window_size
def forward(self, x):
"""Foward function."""
x = self.to_q_global(x)
B, _, H, W = x.shape
if self.window_size != H or self.window_size != W:
x = F.interpolate(x, size=(self.window_size, self.window_size), mode='bicubic')
x = _to_channel_last(x)
x = x.reshape(B, 1, self.N, self.num_heads, self.dim_head).permute(0, 1, 3, 2, 4)
return x
class GCViTLayer(nn.Module):
"""
GCViT layer based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
depth,
input_resolution,
image_resolution,
num_heads,
window_size,
downsample=True,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
norm_layer=nn.LayerNorm,
layer_scale=None,
use_rel_pos_bias=True):
"""
Args:
dim: feature size dimension.
depth: number of layers in each stage.
input_resolution: input image resolution.
window_size: window size in each stage.
downsample: bool argument for down-sampling.
mlp_ratio: MLP ratio.
num_heads: number of heads in each stage.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
drop: dropout rate.
attn_drop: attention dropout rate.
drop_path: drop path rate.
norm_layer: normalization layer.
layer_scale: layer scaling coefficient.
use_rel_pos_bias: set bias for relative positional embedding
"""
super().__init__()
self.blocks = nn.ModuleList([
GCViTBlock(dim=dim,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attention=WindowAttention if (i % 2 == 0) else WindowAttentionGlobal,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
layer_scale=layer_scale,
input_resolution=input_resolution,
use_rel_pos_bias=use_rel_pos_bias)
for i in range(depth)])
self.downsample = None if not downsample else ReduceSize(dim=dim, norm_layer=norm_layer)
self.q_global_gen = GlobalQueryGen(dim, input_resolution, image_resolution, window_size, num_heads)
def forward(self, x):
"""Forward function."""
q_global = self.q_global_gen(_to_channel_first(x))
for blk in self.blocks:
x = blk(x, q_global)
if self.downsample is None:
return x
return self.downsample(x)
class GCViT(nn.Module):
"""
GCViT based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
depths,
window_size,
mlp_ratio,
num_heads,
resolution=224,
drop_path_rate=0.2,
in_chans=3,
num_classes=1000,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
norm_layer=nn.LayerNorm,
layer_scale=None,
use_rel_pos_bias=True,
**kwargs):
"""
Args:
dim: feature size dimension.
depths: number of layers in each stage.
window_size: window size in each stage.
mlp_ratio: MLP ratio.
num_heads: number of heads in each stage.
resolution: input image resolution.
drop_path_rate: drop path rate.
in_chans: number of input channels.
num_classes: number of classes.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
drop_rate: dropout rate.
attn_drop_rate: attention dropout rate.
norm_layer: normalization layer.
layer_scale: layer scaling coefficient.
use_rel_pos_bias: set bias for relative positional embedding
"""
super().__init__()
num_features = int(dim * 2 ** (len(depths) - 1))
self.num_classes = num_classes
self.patch_embed = PatchEmbed(in_chans=in_chans, dim=dim)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.levels = nn.ModuleList()
for i in range(len(depths)):
level = GCViTLayer(dim=int(dim * 2 ** i),
depth=depths[i],
num_heads=num_heads[i],
window_size=window_size[i],
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])],
norm_layer=norm_layer,
downsample=(i < len(depths) - 1),
layer_scale=layer_scale,
input_resolution=int(2 ** (-2 - i) * resolution),
image_resolution=resolution,
use_rel_pos_bias=use_rel_pos_bias)
self.levels.append(level)
self.norm = norm_layer(num_features)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.head = nn.Linear(num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay_keywords(self):
"""Returns eywords to ignore during weight decay"""
return {'rpb'}
def forward_features(self, x):
"""Extract features"""
x = self.patch_embed(x)
x = self.pos_drop(x)
for level in self.levels:
x = level(x)
x = self.norm(x)
x = _to_channel_first(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward(self, x):
"""Forward function."""
x = self.forward_features(x)
x = self.head(x)
return x
def gc_vit_xxtiny(pretrained=False, **kwargs):
"""GCViT-XXTiny model."""
model = GCViT(depths=[2, 2, 6, 2],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
**kwargs)
if pretrained:
model.load_state_dict(torch.load(pretrained))
return model
def gc_vit_xtiny(pretrained=False, **kwargs):
"""GCViT-XTiny model."""
model = GCViT(depths=[3, 4, 6, 5],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
**kwargs)
if pretrained:
model.load_state_dict(torch.load(pretrained))
return model
def gc_vit_tiny(pretrained=False, **kwargs):
"""GCViT-Tiny model."""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
**kwargs)
if pretrained:
model.load_state_dict(torch.load(pretrained))
return model
def gc_vit_small(pretrained=False, **kwargs):
"""GCViT-Small model."""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[3, 6, 12, 24],
window_size=[7, 7, 14, 7],
dim=96,
mlp_ratio=2,
drop_path_rate=0.3,
layer_scale=1e-5,
**kwargs)
if pretrained:
model.load_state_dict(torch.load(pretrained))
return model
def gc_vit_base(pretrained=False, **kwargs):
"""GCViT-Base model."""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[4, 8, 16, 32],
window_size=[7, 7, 14, 7],
dim=128,
mlp_ratio=2,
drop_path_rate=0.5,
layer_scale=1e-5,
**kwargs)
if pretrained:
model.load_state_dict(torch.load(pretrained))
return model
def gc_vit_large(pretrained=False, **kwargs):
"""GCViT-Large model."""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[6, 12, 24, 48],
window_size=[7, 7, 14, 7],
dim=192,
mlp_ratio=2,
drop_path_rate=0.5,
layer_scale=1e-5,
**kwargs)
if pretrained:
model.load_state_dict(torch.load(pretrained))
return model
def gc_vit_base_384(pretrained=False, **kwargs):
"""GCViT-Base model with image resolution of 384."""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[4, 8, 16, 32],
window_size=[12, 12, 24, 12],
dim=128,
mlp_ratio=2,
drop_path_rate=0.5,
layer_scale=1e-5,
resolution=384,
**kwargs)
if pretrained:
model.load_state_dict(torch.load(pretrained))
return model
def gc_vit_large_384(pretrained=False, **kwargs):
"""GCViT-Large model with image resolution of 384."""
model = GCViT(depths=[3, 4, 19, 5],
num_heads=[6, 12, 24, 48],
window_size=[12, 12, 24, 12],
dim=192,
mlp_ratio=2,
drop_path_rate=0.5,
layer_scale=1e-5,
resolution=384,
**kwargs)
if pretrained:
model.load_state_dict(torch.load(pretrained))
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/backbone/gc_vit.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Point Cloud root module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PointPillars root module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for PointPillars."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation utils."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/eval_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation utils for PointPillars."""
import pickle
import time
import numpy as np
import torch
import tqdm
import matplotlib.pyplot as plt
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.models import load_data_to_gpu
from nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
"""Statistics infomation."""
for cur_thresh in cfg.model.post_processing.recall_thresh_list:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
min_thresh = cfg.model.post_processing.recall_thresh_list[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
"""Evaluate on one epoch."""
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / "detected_labels"
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.model.post_processing.recall_thresh_list:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], broadcast_buffers=False)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for _i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
world_size = common_utils.get_dist_info()[1]
det_annos = common_utils.merge_results_dist(det_annos, len(dataset))
metric = common_utils.merge_results_dist([metric], world_size)
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, _val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.model.post_processing.recall_thresh_list:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.model.post_processing.eval_metric,
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
def view_points(points: np.ndarray, view: np.ndarray, normalize: bool) -> np.ndarray:
"""
This is a helper class that maps 3d points to a 2d plane. It can be used to implement both perspective and
orthographic projections. It first applies the dot product between the points and the view. By convention,
the view should be such that the data is projected onto the first 2 axis. It then optionally applies a
normalization along the third dimension.
For a perspective projection the view should be a 3x3 camera matrix, and normalize=True
For an orthographic projection with translation the view is a 3x4 matrix and normalize=False
For an orthographic projection without translation the view is a 3x3 matrix (optionally 3x4 with last columns
all zeros) and normalize=False
:param points: <np.float32: 3, n> Matrix of points, where each point (x, y, z) is along each column.
:param view: <np.float32: n, n>. Defines an arbitrary projection (n <= 4).
The projection should be such that the corners are projected onto the first 2 axis.
:param normalize: Whether to normalize the remaining coordinate (along the third axis).
:return: <np.float32: 3, n>. Mapped point. If normalize=False, the third coordinate is the height.
"""
assert view.shape[0] <= 4
assert view.shape[1] <= 4
assert points.shape[0] == 3
viewpad = np.eye(4)
viewpad[:view.shape[0], :view.shape[1]] = view
nbr_points = points.shape[1]
# Do operation in homogenous coordinates.
points = np.concatenate((points, np.ones((1, nbr_points))))
points = np.dot(viewpad, points)
points = points[:3, :]
if normalize:
points = points / points[2:3, :].repeat(3, 0).reshape(3, nbr_points)
return points
def check_numpy_to_torch(x):
"""Check and convert numpy array to torch tensor."""
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def draw_box(box, axis, view, colors, linewidth):
"""Draw box."""
# box: (3, 4), append first point to form a loop
x = np.concatenate((box[0, :], box[0, :1]), axis=-1)
y = np.concatenate((box[1, :], box[1, :1]), axis=-1)
axis.plot(
x, y,
color=colors[0],
linewidth=linewidth
)
def visual(points, gt_anno, det, det_scores, frame_id, eval_range=35, conf_th=0.1):
"""Visualization."""
_, ax = plt.subplots(1, 1, figsize=(9, 9), dpi=200)
# points
points = view_points(points[:3, :], np.eye(4), normalize=False)
dists = np.sqrt(np.sum(points[:2, :] ** 2, axis=0))
colors = np.minimum(1, dists / eval_range)
ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)
# (B, 8, 3)
boxes_gt = boxes_to_corners_3d(gt_anno)
# Show GT boxes.
for box in boxes_gt:
# (8, 3)
bev = box[4:, :]
bev = view_points(bev.transpose(), np.eye(4), normalize=False)
draw_box(bev, ax, view=np.eye(4), colors=('r', 'r', 'r'), linewidth=2)
# Show EST boxes.
if len(det) == 0:
plt.axis('off')
plt.savefig(frame_id + ".png")
plt.close()
return
boxes_est = boxes_to_corners_3d(det)
for idx, box in enumerate(boxes_est):
if det_scores[idx] < conf_th:
continue
bev = box[4:, :]
bev = view_points(bev.transpose(), np.eye(4), normalize=False)
draw_box(bev, ax, view=np.eye(4), colors=('g', 'g', 'g'), linewidth=1)
axes_limit = eval_range + 3 # Slightly bigger to include boxes that extend beyond the range.
ax.set_xlim(-axes_limit, axes_limit)
ax.set_ylim(-axes_limit, axes_limit)
plt.axis('off')
plt.savefig(frame_id + ".png")
plt.close()
def infer_one_epoch(
cfg, model, dataloader,
logger, save_to_file=False,
result_dir=None
):
"""Do inference on one epoch."""
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / "detected_labels"
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
image_output_dir = result_dir / "detected_boxes"
image_output_dir.mkdir(parents=True, exist_ok=True)
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='infer', dynamic_ncols=True)
for _i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
visual(
batch_dict['points'].cpu().numpy()[:, 1:].transpose(),
batch_dict["gt_boxes"][0].cpu().numpy()[:, :7],
pred_dicts[0]['pred_boxes'].cpu().numpy(),
pred_dicts[0]['pred_scores'].cpu().numpy(),
str(image_output_dir / batch_dict['frame_id'][0]),
eval_range=100,
conf_th=cfg.inference.viz_conf_thresh
)
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
ret_dict = {}
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Inference done.*****************')
return ret_dict
def infer_one_epoch_trt(
cfg, model, dataloader, logger,
save_to_file=False, result_dir=None
):
"""Do inference on one epoch with TensorRT engine."""
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / "detected_labels"
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
image_output_dir = result_dir / "detected_boxes"
image_output_dir.mkdir(parents=True, exist_ok=True)
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='infer', dynamic_ncols=True)
for _i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
points = batch_dict['points']
batch_size = batch_dict['batch_size']
points_np, num_points_np = sparse_to_dense(points, batch_size)
# Do infer
outputs_final = model(
{
"points": points_np,
"num_points": num_points_np,
}
)
pred_dicts = []
for output_final in outputs_final:
pred_dict = {'pred_boxes': [], 'pred_scores': [], 'pred_labels': []}
for box in output_final:
if box[-1] > -0.5:
pred_dict['pred_boxes'].append(torch.Tensor(box[:7]))
pred_dict['pred_scores'].append(torch.Tensor(np.array([box[7]]))[0])
pred_dict['pred_labels'].append(torch.Tensor(np.array([box[8]]))[0])
if len(pred_dict['pred_boxes']) > 0:
pred_dict['pred_boxes'] = torch.stack(pred_dict['pred_boxes'])
pred_dict['pred_scores'] = torch.stack(pred_dict['pred_scores'])
pred_dict['pred_labels'] = (torch.stack(pred_dict['pred_labels']) + 0.01).int()
else:
pred_dict['pred_boxes'] = torch.zeros((0, 7)).float().cuda()
pred_dict['pred_scores'] = torch.zeros((0, )).float().cuda()
pred_dict['pred_labels'] = torch.zeros((0,)).int().cuda()
pred_dicts.append(pred_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
for pdi, _ in enumerate(pred_dicts):
visual(
points_np[pdi].transpose(),
batch_dict["gt_boxes"][pdi].cpu().numpy()[:, :7],
pred_dicts[pdi]['pred_boxes'].cpu().numpy(),
pred_dicts[pdi]['pred_scores'].cpu().numpy(),
str(image_output_dir / batch_dict['frame_id'][pdi]),
eval_range=60,
conf_th=cfg.inference.viz_conf_thresh
)
if cfg.LOCAL_RANK == 0:
disp_dict = {}
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
ret_dict = {}
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
logger.info('Result is save to %s' % result_dir)
logger.info('****************Inference done.*****************')
return ret_dict
def sparse_to_dense(points, batch_size):
"""Convert sparse points to dense format."""
points = points.cpu().numpy()
points_dense = []
num_points_dense = []
for b in range(batch_size):
points_per_frame = np.copy(points[points[:, 0] == b][:, 1:])
num_points_ = points_per_frame.shape[0]
points_dense.append(points_per_frame)
num_points_dense.append(num_points_)
return points_dense, num_points_dense
def eval_one_epoch_trt(cfg, model, dataloader, logger, dist_test=False, save_to_file=False, result_dir=None):
"""Do evaluation on one epoch with TensorRT engine."""
result_dict = dict()
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / "detected_labels"
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'gt_num': 0,
}
for cur_thresh in cfg.model.post_processing.recall_thresh_list:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
logger.info('*************** EVALUATION *****************')
total_time = 0
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for _, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
points = batch_dict['points']
batch_size = batch_dict['batch_size']
torch.cuda.synchronize()
start = time.time()
points_np, num_points_np = sparse_to_dense(points, batch_size)
# Do infer
outputs_final = model(
{
"points": points_np,
"num_points": num_points_np,
}
)
torch.cuda.synchronize()
end = time.time()
total_time += end - start
pred_dicts = []
for output_final in outputs_final:
pred_dict = {'pred_boxes': [], 'pred_scores': [], 'pred_labels': []}
for box in output_final:
if box[-1] > -0.5:
pred_dict['pred_boxes'].append(torch.Tensor(box[:7]))
pred_dict['pred_scores'].append(torch.Tensor(np.array([box[7]]))[0])
pred_dict['pred_labels'].append(torch.Tensor(np.array([box[8]]))[0])
else:
break
if len(pred_dict['pred_boxes']) > 0:
pred_dict['pred_boxes'] = torch.stack(pred_dict['pred_boxes'])
pred_dict['pred_scores'] = torch.stack(pred_dict['pred_scores'])
pred_dict['pred_labels'] = (torch.stack(pred_dict['pred_labels']) + 0.01).int()
else:
pred_dict['pred_boxes'] = torch.zeros((0, 7)).float().cuda()
pred_dict['pred_scores'] = torch.zeros((0, )).float().cuda()
pred_dict['pred_labels'] = torch.zeros((0,)).int().cuda()
pred_dicts.append(pred_dict)
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
)
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
logger.info('*************** Performance *****************')
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return result_dict
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.model.post_processing.recall_thresh_list:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric=cfg.model.post_processing.eval_metric,
output_path=final_output_dir
)
logger.info(result_str)
logger.info('**********Eval time per frame: %.3f ms**********' % (total_time / len(dataloader) * 1000))
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return result_dict
if __name__ == '__main__':
pass
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/eval_utils/eval_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization utils."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/visual_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Visualization utils."""
import mayavi.mlab as mlab
import numpy as np
import torch
box_colormap = [
[1, 1, 1],
[0, 1, 0],
[0, 1, 1],
[1, 1, 0],
]
def check_numpy_to_torch(x):
"""Check and convert numpy array to torch tensor."""
if isinstance(x, np.ndarray):
return torch.from_numpy(x).float(), True
return x, False
def rotate_points_along_z(points, angle):
"""
Args:
points: (B, N, 3 + C)
angle: (B), angle along z-axis, angle increases x ==> y
Returns:
"""
points, is_numpy = check_numpy_to_torch(points)
angle, _ = check_numpy_to_torch(angle)
cosa = torch.cos(angle)
sina = torch.sin(angle)
zeros = angle.new_zeros(points.shape[0])
ones = angle.new_ones(points.shape[0])
rot_matrix = torch.stack((
cosa, sina, zeros,
-sina, cosa, zeros,
zeros, zeros, ones
), dim=1).view(-1, 3, 3).float()
points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)
points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)
return points_rot.numpy() if is_numpy else points_rot
def boxes_to_corners_3d(boxes3d):
"""
7 -------- 4
/| /|
6 -------- 5 .
| | | |
. 3 -------- 0
|/ |/
2 -------- 1
Args:
boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center
Returns:
"""
boxes3d, is_numpy = check_numpy_to_torch(boxes3d)
template = boxes3d.new_tensor((
[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1],
[1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1],
)) / 2
corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :]
corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3)
corners3d += boxes3d[:, None, 0:3]
return corners3d.numpy() if is_numpy else corners3d
def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0),
show_intensity=False, size=(600, 600), draw_origin=True):
"""Visualize points."""
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size)
if show_intensity:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point', colormap='gnuplot', scale_factor=1, figure=fig)
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point', colormap='gnuplot', scale_factor=1, figure=fig)
if draw_origin:
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1)
return fig
def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2):
"""Draw sphere points."""
if not isinstance(pts, np.ndarray):
pts = pts.cpu().numpy()
if fig is None:
fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600))
if isinstance(color, np.ndarray) and color.shape[0] == 1:
color = color[0]
color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0)
if isinstance(color, np.ndarray):
pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8)
pts_color[:, 0:3] = color
pts_color[:, 3] = 255
G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere',
scale_factor=scale_factor, figure=fig)
G.glyph.color_mode = 'color_by_scalar'
G.glyph.scale_mode = 'scale_by_vector'
G.module_manager.scalar_lut_manager.lut.table = pts_color
else:
mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color,
colormap='gnuplot', scale_factor=scale_factor, figure=fig)
mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2)
mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig)
mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig)
return fig
def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)):
"""Draw grid."""
mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig)
return fig
def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)):
"""Draw multiple grid range."""
for x in range(bv_range[0], bv_range[2], grid_size):
for y in range(bv_range[1], bv_range[3], grid_size):
fig = draw_grid(x, y, x + grid_size, y + grid_size, fig)
return fig
def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None):
"""Draw scenes."""
if not isinstance(points, np.ndarray):
points = points.cpu().numpy()
if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray):
ref_boxes = ref_boxes.cpu().numpy()
if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray):
gt_boxes = gt_boxes.cpu().numpy()
if ref_scores is not None and not isinstance(ref_scores, np.ndarray):
ref_scores = ref_scores.cpu().numpy()
if ref_labels is not None and not isinstance(ref_labels, np.ndarray):
ref_labels = ref_labels.cpu().numpy()
fig = visualize_pts(points)
fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40))
if gt_boxes is not None:
corners3d = boxes_to_corners_3d(gt_boxes)
fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100)
if ref_boxes is not None:
ref_corners3d = boxes_to_corners_3d(ref_boxes)
if ref_labels is None:
fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100)
else:
for k in range(ref_labels.min(), ref_labels.max() + 1):
cur_color = tuple(box_colormap[k % len(box_colormap)])
mask = (ref_labels == k)
fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100)
mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0)
return fig
def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None):
"""
:param corners3d: (N, 8, 3)
:param fig:
:param color:
:param line_width:
:param cls:
:param tag:
:param max_num:
:return:
"""
num = min(max_num, len(corners3d))
for n in range(num):
b = corners3d[n] # (8, 3)
if cls is not None:
if isinstance(cls, np.ndarray):
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
else:
mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig)
for k in range(0, 4):
i, j = k, (k + 1) % 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k + 4, (k + 1) % 4 + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = k, k + 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 0, 5
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
i, j = 1, 4
mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius,
line_width=line_width, figure=fig)
return fig
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/visual_utils/visualize_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training utilities for PointPillars."""
import glob
import os
import struct
import tempfile
import time
from datetime import timedelta
import torch
from torch.nn.utils import clip_grad_norm_
import tqdm
from eff.core.codec import encrypt_stream
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, total_epochs, current_epoch, dataloader_iter, tb_log=None,
status_logging=None, leave_pbar=False):
"""Train for one epoch."""
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
tic = toc = 0
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
tic = time.perf_counter()
for _ in range(total_it_each_epoch):
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except: # noqa: E722
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.grad_norm_clip)
optimizer.step()
accumulated_iter += 1
disp_dict.update({'loss': loss.item(), 'lr': cur_lr})
# log to console and tensorboard
if rank == 0:
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, accumulated_iter)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
toc = time.perf_counter()
if status_logging is not None:
status_logging.get_status_logger().kpi = {
"learning_rate": cur_lr,
"loss": loss.item()
}
data = {
"epoch": current_epoch,
"time_per_epoch": str(timedelta(seconds=toc - tic)),
"max_epoch": total_epochs,
"eta": str(timedelta(seconds=(total_epochs - current_epoch) * (toc - tic)))
}
status_logging.get_status_logger().write(
data=data,
message="Train metrics generated.",
status_level=status_logging.Status.RUNNING
)
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, status_logging,
ckpt_save_dir, key,
train_sampler=None, lr_warmup_scheduler=None, ckpt_save_interval=1,
max_ckpt_save_num=50, merge_all_iters_to_one_epoch=False):
"""Train model."""
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.warmup_epoch:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
status_logging=status_logging,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
total_epochs=total_epochs,
current_epoch=cur_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.tlt'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d.tlt' % trained_epoch)
save_checkpoint(
checkpoint_model(model, optimizer, trained_epoch, accumulated_iter),
ckpt_name,
key
)
def model_state_to_cpu(model_state):
"""Move model states to CPU."""
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
"""Get checkpoint states."""
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pointcloud
version = 'pointcloud+' + pointcloud.__version__
except: # noqa: E722
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def checkpoint_model(model=None, optimizer=None, epoch=None, it=None):
"""Get checkpoint states from model."""
optim_state = optimizer.state_dict() if optimizer is not None else None
try:
import pointcloud
version = 'pointcloud+' + pointcloud.__version__
except: # noqa: E722
version = 'none'
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model = model.module
return {'epoch': epoch, 'it': it, 'model': model, 'optimizer_state': optim_state, 'version': version}
def encrypt_onnx(tmp_file_name, output_file_name, key):
"""Encrypt the onnx model"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
# set the input name magic number
open_encoded_file.write(struct.pack("<i", 0))
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def encrypt_pytorch(tmp_file_name, output_file_name, key):
"""Encrypt the pytorch model"""
with open(tmp_file_name, "rb") as open_temp_file, open(output_file_name,
"wb") as open_encoded_file:
encrypt_stream(
input_stream=open_temp_file, output_stream=open_encoded_file,
passphrase=key, encryption=True
)
def save_checkpoint(state, filename, key):
"""Save the checkpoint."""
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
handle, temp_name = tempfile.mkstemp(".tlt")
os.close(handle)
torch.save(state, temp_name)
encrypt_pytorch(temp_name, filename, key)
os.remove(temp_name)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/train_utils/train_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training utils."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/train_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training optimization utilities."""
from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
from .learning_schedules_fastai import CosineWarmupLR, OneCycle
def build_optimizer(model, optim_cfg):
"""Build optimizer."""
if optim_cfg.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=optim_cfg.lr, weight_decay=optim_cfg.weight_decay)
elif optim_cfg.optimizer == 'sgd':
optimizer = optim.SGD(
model.parameters(), lr=optim_cfg.lr, weight_decay=optim_cfg.weight_decay,
momentum=optim_cfg.momentum
)
elif optim_cfg.optimizer == 'adam_onecycle':
def children(m: nn.Module):
return list(m.children())
def num_children(m: nn.Module) -> int:
return len(children(m))
flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m] # noqa: E731
get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))] # noqa: E731
optimizer_func = partial(optim.Adam, betas=(0.9, 0.99))
optimizer = OptimWrapper.create(
optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.weight_decay, true_wd=True, bn_wd=True
)
else:
raise NotImplementedError
return optimizer
def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg):
"""Build learning rate scheduler."""
decay_steps = [x * total_iters_each_epoch for x in optim_cfg.decay_step_list]
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in decay_steps:
if cur_epoch >= decay_step:
cur_decay = cur_decay * optim_cfg.lr_decay
return max(cur_decay, optim_cfg.lr_clip / optim_cfg.lr)
lr_warmup_scheduler = None
total_steps = total_iters_each_epoch * total_epochs
if optim_cfg.optimizer == 'adam_onecycle':
lr_scheduler = OneCycle(
optimizer, total_steps, optim_cfg.lr, list(optim_cfg.moms), optim_cfg.div_factor, optim_cfg.pct_start
)
else:
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
if optim_cfg.lr_warmup:
lr_warmup_scheduler = CosineWarmupLR(
optimizer, T_max=optim_cfg.warmup_epoch * len(total_iters_each_epoch),
eta_min=optim_cfg.lr / optim_cfg.div_factor
)
return lr_scheduler, lr_warmup_scheduler
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/train_utils/optimization/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is modified from https://github.com/traveller59/second.pytorch
"""FastAI optimizer."""
from collections import Iterable
import torch
from torch import nn
from torch._utils import _unflatten_dense_tensors
from torch.nn.utils import parameters_to_vector
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
def split_bn_bias(layer_groups):
"""Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups."""
split_groups = []
for lg in layer_groups:
l1, l2 = [], []
for c in lg.children():
if isinstance(c, bn_types):
l2.append(c)
else:
l1.append(c)
split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)]
return split_groups
def get_master(layer_groups, flat_master: bool = False):
"""Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32."""
split_groups = split_bn_bias(layer_groups)
model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups]
if flat_master:
master_params = []
for lg in model_params:
if len(lg) != 0:
mp = parameters_to_vector([param.data.float() for param in lg])
mp = torch.nn.Parameter(mp, requires_grad=True)
if mp.grad is None:
mp.grad = mp.new(*mp.size())
master_params.append([mp])
else:
master_params.append([])
return model_params, master_params
master_params = [[param.clone().float().detach() for param in lg] for lg in model_params]
for mp in master_params:
for param in mp:
param.requires_grad = True
return model_params, master_params
def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None:
"""Copy the `model_params` gradients to `master_params` for the optimizer step."""
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(master_group) != 0:
master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group]))
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
if model.grad is not None:
if master.grad is None:
master.grad = master.data.new(*master.data.size())
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master2model(model_params, master_params, flat_master: bool = False) -> None:
"""Copy `master_params` to `model_params`."""
if flat_master:
for model_group, master_group in zip(model_params, master_params):
if len(model_group) != 0:
for model, master in zip(model_group, _unflatten_dense_tensors(master_group[0].data, model_group)):
model.data.copy_(master)
else:
for model_group, master_group in zip(model_params, master_params):
for model, master in zip(model_group, master_group):
model.data.copy_(master.data)
def listify(p=None, q=None):
"""Make `p` listy and the same length as `q`."""
if p is None:
p = []
elif isinstance(p, str):
p = [p]
elif not isinstance(p, Iterable):
p = [p]
n = q if type(q) == int else len(p) if q is None else len(q)
if len(p) == 1:
p = p * n
assert len(p) == n, f'List len mismatch ({len(p)} vs {n})'
return list(p)
def trainable_params(m: nn.Module):
"""Return list of trainable params in `m`."""
res = filter(lambda p: p.requires_grad, m.parameters())
return res
def is_tuple(x) -> bool:
"""Is tuple or not."""
return isinstance(x, tuple)
# copy from fastai.
class OptimWrapper():
"""Basic wrapper around `opt` to simplify hyper-parameters changes."""
def __init__(self, opt, wd, true_wd: bool = False, bn_wd: bool = True):
"""Initialize."""
self.opt, self.true_wd, self.bn_wd = opt, true_wd, bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_func, lr,
layer_groups, **kwargs):
"""Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."""
split_groups = split_bn_bias(layer_groups)
opt = opt_func([{'params': trainable_params(grp), 'lr': 0} for grp in split_groups])
opt = cls(opt, **kwargs)
opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func
return opt
def new(self, layer_groups):
"""Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."""
opt_func = getattr(self, 'opt_func', self.opt.__class__)
split_groups = split_bn_bias(layer_groups)
opt_func([{'params': trainable_params(grp), 'lr': 0} for grp in split_groups])
return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd)
def __repr__(self) -> str:
"""Format to string."""
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
# Pytorch optimizer methods
def step(self) -> None:
"""Set weight decay and step optimizer."""
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr, wd, pg1, pg2 in zip(self._lr, self._wd, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
for p in pg1['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
if self.bn_wd:
for p in pg2['params']:
# When some parameters are fixed: Shaoshuai Shi
if p.requires_grad is False:
continue
p.data.mul_(1 - wd * lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self) -> None:
"""Clear optimizer gradients."""
self.opt.zero_grad()
# Passthrough to the inner opt.
def __getattr__(self, k: str):
"""Get attribute."""
return getattr(self.opt, k, None)
def clear(self):
"""Reset the state of the inner optimizer."""
sd = self.state_dict()
sd['state'] = {}
self.load_state_dict(sd)
# Hyperparameters as properties
@property
def lr(self) -> float:
"""Learning rate."""
return self._lr[-1]
@lr.setter
def lr(self, val: float) -> None:
"""Learning rate."""
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self) -> float:
"""Momentum."""
return self._mom[-1]
@mom.setter
def mom(self, val: float) -> None:
"""Momentum."""
if 'momentum' in self.opt_keys:
self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys:
self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self) -> float:
"""Beta."""
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val: float) -> None:
"""Set beta (or alpha as makes sense for given optimizer)."""
if val is None:
return
if 'betas' in self.opt_keys:
self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys:
self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self) -> float:
"""weight decay."""
return self._wd[-1]
@wd.setter
def wd(self, val: float) -> None:
"""Set weight decay."""
if not self.true_wd:
self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
# Helper functions
def read_defaults(self) -> None:
"""Read the values inside the optimizer for the hyper-parameters."""
self._beta = None
if 'lr' in self.opt_keys:
self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys:
self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys:
self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys:
self._mom, self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys:
self._wd = self.read_val('weight_decay')
def set_val(self, key: str, val, bn_groups: bool = True):
"""Set `val` inside the optimizer dictionary at `key`."""
if is_tuple(val):
val = [(v1, v2) for v1, v2 in zip(*val)]
for v, pg1, pg2 in zip(val, self.opt.param_groups[::2], self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups:
pg2[key] = v
return val
def read_val(self, key: str):
"""Read a hyperparameter `key` in the optimizer dictionary."""
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]):
val = [o[0] for o in val], [o[1] for o in val]
return val
class FastAIMixedOptim(OptimWrapper):
"""FastAI Mixed Optimizer."""
@classmethod
def create(cls, opt_func, lr,
layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs):
"""Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`."""
opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs)
opt.model_params, opt.master_params = get_master(layer_groups, flat_master)
opt.flat_master = flat_master
opt.loss_scale = loss_scale
opt.model = model
# Changes the optimizer so that the optimization step is done in FP32.
# opt = self.learn.opt
mom, wd, beta = opt.mom, opt.wd, opt.beta
lrs = [lr for lr in opt._lr for _ in range(2)]
opt_params = [{'params': mp, 'lr': lr} for mp, lr in zip(opt.master_params, lrs)]
opt.opt = opt_func(opt_params)
opt.mom, opt.wd, opt.beta = mom, wd, beta
return opt
def step(self):
"""step."""
model_g2master_g(self.model_params, self.master_params, self.flat_master)
for group in self.master_params:
for param in group:
param.grad.div_(self.loss_scale)
super(FastAIMixedOptim, self).step()
self.model.zero_grad()
# Update the params from master to model.
master2model(self.model_params, self.master_params, self.flat_master)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/train_utils/optimization/fastai_optim.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is modified from https://github.com/traveller59/second.pytorch
"""FastAI Learning Rate Scheduler."""
import math
from functools import partial
import numpy as np
import torch.optim.lr_scheduler as lr_sched
from .fastai_optim import OptimWrapper
class LRSchedulerStep(object):
"""Step LR scheduler"""
def __init__(self, fai_optimizer: OptimWrapper, total_step, lr_phases,
mom_phases):
"""Initialize."""
# if not isinstance(fai_optimizer, OptimWrapper):
# raise TypeError('{} is not a fastai OptimWrapper'.format(
# type(fai_optimizer).__name__))
self.optimizer = fai_optimizer
self.total_step = total_step
self.lr_phases = []
for i, (start, lambda_func) in enumerate(lr_phases):
if len(self.lr_phases) != 0:
assert self.lr_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func) # nosec
if i < len(lr_phases) - 1:
self.lr_phases.append((int(start * total_step), int(lr_phases[i + 1][0] * total_step), lambda_func))
else:
self.lr_phases.append((int(start * total_step), total_step, lambda_func))
assert self.lr_phases[0][0] == 0
self.mom_phases = []
for i, (start, lambda_func) in enumerate(mom_phases):
if len(self.mom_phases) != 0:
assert self.mom_phases[-1][0] < start
if isinstance(lambda_func, str):
lambda_func = eval(lambda_func) # nosec
if i < len(mom_phases) - 1:
self.mom_phases.append((int(start * total_step), int(mom_phases[i + 1][0] * total_step), lambda_func))
else:
self.mom_phases.append((int(start * total_step), total_step, lambda_func))
assert self.mom_phases[0][0] == 0
def step(self, step):
"""Step."""
for start, end, func in self.lr_phases:
if step >= start:
self.optimizer.lr = func((step - start) / (end - start))
for start, end, func in self.mom_phases:
if step >= start:
self.optimizer.mom = func((step - start) / (end - start))
def annealing_cos(start, end, pct):
"""Cosine Annealing."""
# print(pct, start, end)
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start - end) / 2 * cos_out
class OneCycle(LRSchedulerStep):
"""One Cycle LR scheduler."""
def __init__(self, fai_optimizer, total_step, lr_max, moms, div_factor,
pct_start):
"""Initialize."""
self.lr_max = lr_max
self.moms = moms
self.div_factor = div_factor
self.pct_start = pct_start
low_lr = self.lr_max / self.div_factor
lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)),
(self.pct_start,
partial(annealing_cos, self.lr_max, low_lr / 1e4)))
mom_phases = ((0, partial(annealing_cos, *self.moms)),
(self.pct_start, partial(annealing_cos,
*self.moms[::-1])))
fai_optimizer.lr, fai_optimizer.mom = low_lr, self.moms[0]
super().__init__(fai_optimizer, total_step, lr_phases, mom_phases)
class CosineWarmupLR(lr_sched._LRScheduler):
"""Cosine warmup LR scheduler."""
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
"""Initialize."""
self.T_max = T_max
self.eta_min = eta_min
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
"""Get learning rate."""
return [self.eta_min + (base_lr - self.eta_min) *
(1 - math.cos(math.pi * self.last_epoch / self.T_max)) / 2
for base_lr in self.base_lrs]
class FakeOptim:
"""Fake optimizer."""
def __init__(self):
"""Initialize."""
self.lr = 0
self.mom = 0
if __name__ == "__main__":
import matplotlib.pyplot as plt
opt = FakeOptim() # 3e-3, wd=0.4, div_factor=10
schd = OneCycle(opt, 100, 3e-3, (0.95, 0.85), 10.0, 0.1)
lrs = []
moms = []
for i in range(100):
schd.step(i)
lrs.append(opt.lr)
moms.append(opt.mom)
plt.plot(lrs)
# plt.plot(moms)
plt.show()
plt.plot(moms)
plt.show()
| tao_pytorch_backend-main | nvidia_tao_pytorch/pointcloud/pointpillars/tools/train_utils/optimization/learning_schedules_fastai.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.