python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segmentation Detector Representer."""
import cv2
import numpy as np
import pyclipper
from shapely.geometry import Polygon
def get_post_processing(config):
"""Get post processing."""
try:
cls = globals()[config['type']](**config['args'])
return cls
except Exception:
return None
class SegDetectorRepresenter():
"""SegDetector Representer class."""
def __init__(self, thresh=0.3, box_thresh=0.7, max_candidates=1000, unclip_ratio=1.5):
"""Initialize."""
self.min_size = 3
self.thresh = thresh
self.box_thresh = box_thresh
self.max_candidates = max_candidates
self.unclip_ratio = unclip_ratio
def __call__(self, batch, pred, is_output_polygon=False):
"""Generate final bbox and score.
Args:
batch (dict): Produced by dataloaders. It is a dict contains:
image, a tensor of shape (N, C, H, W).
polygons, the polygons of objective regions, tensor of shape (N, K, 4, 2)
ignore_tags: tensor of shape (N, K), indicates whether a region is ignorable or not.
shape: the original shape of images.
filename: the original filenames of images.
pred (list): Prediction result.
"""
pred = pred[:, 0, :, :]
segmentation = self.binarize(pred)
boxes_batch = []
scores_batch = []
for batch_index in range(pred.shape[0]):
height = batch['img'].shape[1]
width = batch['img'].shape[2]
if is_output_polygon:
boxes, scores = self.polygons_from_bitmap(pred[batch_index], segmentation[batch_index], width, height)
else:
boxes, scores = self.boxes_from_bitmap(pred[batch_index], segmentation[batch_index], width, height)
boxes_batch.append(boxes)
scores_batch.append(scores)
return boxes_batch, scores_batch
def binarize(self, pred):
"""Binarize."""
return pred > self.thresh
def polygons_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
"""Generate the polygon according to the approximate binary map."""
assert len(_bitmap.shape) == 2
bitmap = _bitmap # The first channel
pred_temp = pred
height, width = bitmap.shape
boxes = []
scores = []
contours, _ = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours[:self.max_candidates]:
epsilon = 0.0001 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
points = approx.reshape((-1, 2))
if points.shape[0] < 4:
continue
# _, sside = self.get_mini_boxes(contour)
# if sside < self.min_size:
# continue
score = self.box_score_fast(pred_temp, contour.squeeze(1))
if self.box_thresh > score:
continue
if points.shape[0] > 2:
box = self.unclip(points, unclip_ratio=self.unclip_ratio)
if len(box) > 1:
continue
else:
continue
box = box.reshape(-1, 2)
_, sside = self.get_mini_boxes(box.reshape((-1, 1, 2)))
if sside < self.min_size + 2:
continue
if not isinstance(dest_width, int):
dest_width = dest_width.item()
dest_height = dest_height.item()
box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height)
boxes.append(box)
scores.append(score)
return boxes, scores
def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
"""Generate the bbox according to the approximate binary map."""
assert len(_bitmap.shape) == 2
bitmap = _bitmap
pred_temp = pred
height, width = bitmap.shape
contours, _ = cv2.findContours(
(bitmap * 255).astype(np.uint8),
cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
num_contours = min(len(contours), self.max_candidates)
boxes = np.zeros((num_contours, 4, 2), dtype=np.int16)
scores = np.zeros((num_contours,), dtype=np.float32)
for index in range(num_contours):
contour = contours[index].squeeze(1)
points, sside = self.get_mini_boxes(contour)
if sside < self.min_size:
continue
points = np.array(points)
score = self.box_score_fast(pred_temp, points)
if self.box_thresh > score:
continue
box = self.unclip(points, unclip_ratio=self.unclip_ratio).reshape(-1, 1, 2)
box, sside = self.get_mini_boxes(box)
if sside < self.min_size + 2:
continue
box = np.array(box)
if not isinstance(dest_width, int):
dest_width = dest_width.item()
dest_height = dest_height.item()
box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height)
boxes[index, :, :] = box.astype(np.int16)
scores[index] = score
return boxes, scores
def unclip(self, box, unclip_ratio=1.5):
"""Expand the bbox."""
poly = Polygon(box)
distance = poly.area * unclip_ratio / poly.length
offset = pyclipper.PyclipperOffset() # pylint: disable=I1101
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) # pylint: disable=I1101
expanded = np.array(offset.Execute(distance))
return expanded
def get_mini_boxes(self, contour):
"""Generate the bbox with minimum area, get its coordinates and the short size of height/width."""
bounding_box = cv2.minAreaRect(contour)
points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
index_1, index_2, index_3, index_4 = 0, 1, 2, 3
if points[1][1] > points[0][1]:
index_1 = 0
index_4 = 1
else:
index_1 = 1
index_4 = 0
if points[3][1] > points[2][1]:
index_2 = 2
index_3 = 3
else:
index_2 = 3
index_3 = 2
box = [points[index_1], points[index_2], points[index_3], points[index_4]]
return box, min(bounding_box[1])
def box_score_fast(self, bitmap, _box):
"""Calculate the bbox score according to bbox's coordinate and bitmap."""
h, w = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1)
xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1)
ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1)
ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
box[:, 0] = box[:, 0] - xmin
box[:, 1] = box[:, 1] - ymin
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/post_processing/seg_detector_representer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/post_processing/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OCDNet Hydra."""
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import Optional, List
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class ModelConfig:
"""Model config."""
backbone: str = "deformable_resnet18"
pretrained: bool = True
in_channels: int = 3
neck: str = "FPN"
inner_channels: int = 256
head: str = "DBHead"
out_channels: int = 2
k: int = 50
load_pruned_graph: bool = MISSING
pruned_graph_path: str = MISSING
@dataclass
class Optimargs:
"""Optimargs config."""
lr: float = 0.001
weight_decay: float = 0.0
amsgrad: bool = True
@dataclass
class Optimizer:
"""Optimizer config."""
type: str = "Adam"
args: Optimargs = Optimargs()
@dataclass
class Loss:
"""Loss config."""
type: str = "DBLoss"
alpha: int = 5
beta: int = 10
ohem_ratio: int = 3
@dataclass
class Postprocessingargs:
"""Postprocessingargs config."""
thresh: float = MISSING
box_thresh: float = MISSING
max_candidates: int = MISSING
unclip_ratio: float = MISSING
@dataclass
class Postprocessing:
"""Postprocessing config."""
type: str = "SegDetectorRepresenter"
args: Postprocessingargs = Postprocessingargs()
@dataclass
class Metricargs:
"""Metricargs config."""
is_output_polygon: bool = MISSING
@dataclass
class Metric:
"""Metric config."""
type: str = "QuadMetric"
args: Metricargs = Metricargs()
@dataclass
class LRSchedulerargs:
"""LRSchedulerargs config."""
warmup_epoch: int = MISSING
@dataclass
class LRScheduler:
"""LRScheduler config."""
type: str = "WarmupPolyLR"
args: LRSchedulerargs = LRSchedulerargs()
@dataclass
class Trainer:
"""Trainer config."""
is_output_polygon: bool = False
warmup_epoch: int = 3
seed: int = 2
log_iter: int = 10
clip_grad_norm: float = 5.0
show_images_iter: int = 50
tensorboard: bool = False
@dataclass
class Trainargs:
"""Train args config."""
img_mode: str = "BGR"
filter_keys: List[str] = field(default_factory=lambda: ['img_path', 'img_name', 'text_polys', 'texts', 'ignore_tags', 'shape'])
ignore_tags: List[str] = field(default_factory=lambda: ['*', '###'])
pre_processes: Optional[List[str]] = None
@dataclass
class Dataloader:
"""Train args config."""
batch_size: int = 32
shuffle: bool = True
pin_memory: bool = False
num_workers: int = 0
collate_fn: Optional[str] = ""
@dataclass
class TrainDataset:
"""Train Dataset config."""
data_name: str = "ICDAR2015Dataset"
data_path: List[str] = MISSING
args: Trainargs = Trainargs()
loader: Dataloader = Dataloader()
@dataclass
class Validateargs:
"""Validate args config."""
img_mode: str = "BGR"
filter_keys: List[str] = field(default_factory=lambda: [''])
ignore_tags: List[str] = field(default_factory=lambda: ['*', '###'])
pre_processes: Optional[List[str]] = None
@dataclass
class Validateloader:
"""Validate args config."""
batch_size: int = 1
shuffle: bool = False
pin_memory: bool = False
num_workers: int = 0
collate_fn: Optional[str] = "ICDARCollateFN"
@dataclass
class ValidateDataset:
"""Validate Dataset config."""
data_name: str = "ICDAR2015Dataset"
data_path: List[str] = MISSING
args: Validateargs = Validateargs()
loader: Validateloader = Validateloader()
@dataclass
class DataConfig:
"""Dataset config."""
train_dataset: TrainDataset = TrainDataset()
validate_dataset: ValidateDataset = ValidateDataset()
@dataclass
class TrainConfig:
"""Train experiment config."""
results_dir: Optional[str] = None
pretrained_model_path: str = ""
resume_training_checkpoint_path: str = ""
encryption_key: str = MISSING
num_epochs: int = 50
checkpoint_interval: int = 1
validation_interval: int = 1
gpu_id: List[int] = field(default_factory=lambda: [0])
post_processing: Postprocessing = Postprocessing()
metric: Metric = Metric()
trainer: Trainer = Trainer()
loss: Loss = Loss()
optimizer: Optimizer = Optimizer()
lr_scheduler: LRScheduler = LRScheduler()
@dataclass
class InferenceConfig:
"""Inference experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
trt_engine: Optional[str] = None
input_folder: str = MISSING
width: int = MISSING
height: int = MISSING
img_mode: str = MISSING
polygon: bool = True
show: bool = False
gpu_id: int = 0
post_processing: Postprocessing = Postprocessing()
@dataclass
class EvalConfig:
"""Evaluation experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
trt_engine: Optional[str] = None
gpu_id: int = 0
batch_size: int = 1
post_processing: Postprocessing = Postprocessing()
metric: Metric = Metric()
@dataclass
class PruneConfig:
"""Prune experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
gpu_id: int = 0
batch_size: int = 1
pruning_thresh: float = MISSING
@dataclass
class ExportConfig:
"""Export experiment config."""
results_dir: Optional[str] = None
checkpoint: str = MISSING
gpu_id: int = 0
width: int = MISSING
height: int = MISSING
opset_version: int = 11
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: str = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_num_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "FP32"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class GenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: str = MISSING
width: int = MISSING
height: int = MISSING
img_mode: str = "BGR"
tensorrt: TrtConfig = TrtConfig()
@dataclass
class ExperimentConfig:
"""Experiment config."""
train: TrainConfig = TrainConfig()
model: ModelConfig = ModelConfig()
evaluate: EvalConfig = EvalConfig()
dataset: DataConfig = DataConfig()
export: ExportConfig = ExportConfig()
gen_trt_engine: GenTrtEngineExpConfig = GenTrtEngineExpConfig()
inference: InferenceConfig = InferenceConfig()
prune: PruneConfig = PruneConfig()
encryption_key: str = MISSING
name: str = MISSING
num_gpus: int = 1
results_dir: str = "/results"
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OCDNet utils module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils module."""
import os
import cv2
import glob
import numpy as np
from natsort import natsorted
import matplotlib.pyplot as plt
def show_img(imgs: np.ndarray, title='img'):
"""Show img."""
color = (len(imgs.shape) == 3 and imgs.shape[-1] == 3)
imgs = np.expand_dims(imgs, axis=0)
for i, img in enumerate(imgs):
plt.figure()
plt.title(f'{title}_{i}')
plt.imshow(img, cmap=None if color else 'gray')
plt.show()
def draw_bbox(img_path, result, color=(255, 0, 0), thickness=2):
"""Draw bbox."""
if isinstance(img_path, str):
img_path = cv2.imread(img_path)
# img_path = cv2.cvtColor(img_path, cv2.COLOR_BGR2RGB)
img_path = img_path.copy()
for point in result:
point = point.astype(int)
cv2.polylines(img_path, [point], True, color, thickness)
return img_path
def save_result(result_path, box_list, score_list, is_output_polygon):
"""Save result."""
if is_output_polygon:
with open(result_path, 'wt', encoding="utf-8") as res:
for i, box in enumerate(box_list):
box = box.reshape(-1).tolist()
result = ",".join([str(int(x)) for x in box])
score = score_list[i]
res.write(result + ',' + str(score) + "\n")
else:
with open(result_path, 'wt', encoding="utf-8") as res:
for i, box in enumerate(box_list):
score = score_list[i]
box = box.reshape(-1).tolist()
result = ",".join([str(int(x)) for x in box])
res.write(result + ',' + str(score) + "\n")
def get_file_list(folder_path: str, p_postfix: list = None, sub_dir: bool = True) -> list:
"""Get file list
Args:
folder_path: The path to the folder
p_postfix: postfix
sub_dir: check the subfolder
Returns:
Return file list
"""
assert os.path.exists(folder_path) and os.path.isdir(folder_path)
if p_postfix is None:
p_postfix = ['.jpg']
if isinstance(p_postfix, str):
p_postfix = [p_postfix]
file_list = [x for x in glob.glob(folder_path + '/*.*') if
os.path.splitext(x)[-1] in p_postfix or '.*' in p_postfix]
return natsorted(file_list)
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/utils/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""get_metric module."""
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/utils/ocr_metric/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""icdar2015 module."""
from .quad_metric import QuadMetric # noqa: F401
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/utils/ocr_metric/icdar2015/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""quad_metric module."""
import numpy as np
from .detection.iou import DetectionIoUEvaluator
def get_metric(config):
"""get metric."""
try:
if 'args' not in config:
args = {}
else:
args = config['args']
if isinstance(args, dict):
cls = globals()[config['type']](**args)
else:
cls = globals()[config['type']](args)
return cls
except Exception:
return None
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
"""Initialize."""
self.reset()
def reset(self):
"""reset."""
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
"""update."""
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
return self
class QuadMetric():
"""QuadMetric class."""
def __init__(self, is_output_polygon=False):
"""Initialize."""
self.is_output_polygon = is_output_polygon
self.evaluator = DetectionIoUEvaluator(is_output_polygon=is_output_polygon)
def measure(self, batch, output, box_thresh=0.6):
"""Measure the quad metric
Args:
batch (dict): Produced by dataloaders. It is a dict of image, polygons and ignore_tags.
The image is a tensor with shape (N, C, H, W). The polygons is a tensor of
shape (N, K, 4, 2). The ignore_tags is a tensor of shape (N, K), indicates
whether a region is ignorable or not. The shape is the original shape of images.
The filename is the original filenames of images.
output: The prediction polygons and scores.
"""
results = []
gt_polyons_batch = batch['text_polys']
ignore_tags_batch = batch['ignore_tags']
pred_polygons_batch = np.array(output[0])
pred_scores_batch = np.array(output[1])
polygons = gt_polyons_batch
pred_polygons = pred_polygons_batch[0]
pred_scores = pred_scores_batch[0]
ignore_tags = ignore_tags_batch
gt = [{"points": np.int64(polygons[i]), "ignore": ignore_tags[i]} for i in range(len(polygons))]
if self.is_output_polygon:
pred = [{"points": pred_polygons[i]} for i in range(len(pred_polygons))]
else:
pred = []
for i in range(pred_polygons.shape[0]):
if pred_scores[i] >= box_thresh:
pred.append({"points": pred_polygons[i, :, :].astype(np.int)})
res = self.evaluator.evaluate_image(gt, pred)
results.append(res)
return results
def validate_measure(self, batch, output, box_thresh=0.6):
"""validate measure."""
return self.measure(batch, output, box_thresh)
def evaluate_measure(self, batch, output):
"""evaluate measure."""
return self.measure(batch, output), np.linspace(0, batch['image'].shape[0]).tolist()
def gather_measure(self, raw_metrics):
"""gather measure."""
raw_metrics = [image_metrics
for batch_metrics in raw_metrics
for image_metrics in batch_metrics]
result = self.evaluator.combine_results(raw_metrics)
precision = AverageMeter()
recall = AverageMeter()
fmeasure = AverageMeter()
precision.update(result['precision'], n=len(raw_metrics))
recall.update(result['recall'], n=len(raw_metrics))
fmeasure_score = 2 * precision.val * recall.val / (precision.val + recall.val + 1e-8)
fmeasure.update(fmeasure_score)
return {
'precision': precision,
'recall': recall,
'fmeasure': fmeasure
}
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/utils/ocr_metric/icdar2015/quad_metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""icdar2015 detection module."""
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/utils/ocr_metric/icdar2015/detection/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Detection IOU module."""
# pylint: disable=W0612,R1705
import numpy as np
from shapely.geometry import Polygon
import cv2
def iou_rotate(box_a, box_b, method='union'):
"""iou rotate."""
rect_a = cv2.minAreaRect(box_a)
rect_b = cv2.minAreaRect(box_b)
r1 = cv2.rotatedRectangleIntersection(rect_a, rect_b)
if r1[0] == 0:
return 0
else:
inter_area = cv2.contourArea(r1[1])
area_a = cv2.contourArea(box_a)
area_b = cv2.contourArea(box_b)
union_area = area_a + area_b - inter_area
if union_area == 0 or inter_area == 0:
return 0
if method == 'union':
iou = inter_area / union_area
elif method == 'intersection':
iou = inter_area / min(area_a, area_b)
else:
raise NotImplementedError
return iou
class DetectionIoUEvaluator(object):
"""IOU evaluator."""
def __init__(self, is_output_polygon=False, iou_constraint=0.5, area_precision_constraint=0.5):
"""Initialize."""
self.is_output_polygon = is_output_polygon
self.iou_constraint = iou_constraint
self.area_precision_constraint = area_precision_constraint
def evaluate_image(self, gt, pred):
"""evaluate image."""
def get_union(pD, pG):
return Polygon(pD).union(Polygon(pG)).area
def get_intersection_over_union(pD, pG):
return get_intersection(pD, pG) / get_union(pD, pG)
def get_intersection(pD, pG):
return Polygon(pD).intersection(Polygon(pG)).area
def compute_ap(conf_list, match_list, num_gt_care):
correct = 0
AP = 0
if len(conf_list) > 0:
conf_list = np.array(conf_list)
match_list = np.array(match_list)
sorted_ind = np.argsort(-conf_list)
conf_list = conf_list[sorted_ind]
match_list = match_list[sorted_ind]
for n in range(len(conf_list)):
match = match_list[n]
if match:
correct += 1
AP += float(correct) / (n + 1)
if num_gt_care > 0:
AP /= num_gt_care
return AP
per_sample_metrics = {}
matched_sum = 0
num_globalcare_gt = 0
num_globalcare_det = 0
recall = 0
precision = 0
hmean = 0
det_matched = 0
iou_mat = np.empty([1, 1])
gt_pols = []
det_pols = []
gt_pol_points = []
det_pol_points = []
# Array of Ground Truth Polygons' keys marked as don't Care
gt_dontcare_pols_mum = []
# Array of Detected Polygons' matched with a don't Care GT
det_dontcare_pols_num = []
pairs = []
det_matched_nums = []
evaluation_log = ""
for n in range(len(gt)):
points = gt[n]['points']
# transcription = gt[n]['text']
dont_care = gt[n]['ignore']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
gt_pol = points
gt_pols.append(gt_pol)
gt_pol_points.append(points)
if dont_care:
gt_dontcare_pols_mum.append(len(gt_pols) - 1)
evaluation_log += "GT polygons: " + str(len(gt_pols)) + (" (" + str(len(
gt_dontcare_pols_mum)) + " don't care)\n" if len(gt_dontcare_pols_mum) > 0 else "\n")
for n in range(len(pred)):
points = pred[n]['points']
if not Polygon(points).is_valid or not Polygon(points).is_simple:
continue
det_pol = points
det_pols.append(det_pol)
det_pol_points.append(points)
if len(gt_dontcare_pols_mum) > 0:
for dontcare_pol in gt_dontcare_pols_mum:
dontcare_pol = gt_pols[dontcare_pol]
intersected_area = get_intersection(dontcare_pol, det_pol)
pd_dimensions = Polygon(det_pol).area
precision = 0 if pd_dimensions == 0 else intersected_area / pd_dimensions
if (precision > self.area_precision_constraint):
det_dontcare_pols_num.append(len(det_pols) - 1)
break
evaluation_log += "DET polygons: " + str(len(det_pols)) + (" (" + str(len(
det_dontcare_pols_num)) + " don't care)\n" if len(det_dontcare_pols_num) > 0 else "\n")
if len(gt_pols) > 0 and len(det_pols) > 0:
# Calculate IoU and precision matrixs
output_shape = [len(gt_pols), len(det_pols)]
iou_mat = np.empty(output_shape)
gt_rect_mat = np.zeros(len(gt_pols), np.int8)
det_rect_mat = np.zeros(len(det_pols), np.int8)
for gt_num in range(len(gt_pols)):
for det_num in range(len(det_pols)):
pG = gt_pols[gt_num]
pD = det_pols[det_num]
iou_mat[gt_num, det_num] = get_intersection_over_union(pD, pG)
for gt_num in range(len(gt_pols)):
for det_num in range(len(det_pols)):
if gt_rect_mat[gt_num] == 0 and det_rect_mat[det_num] == 0 and gt_num not in gt_dontcare_pols_mum and det_num not in det_dontcare_pols_num:
if iou_mat[gt_num, det_num] > self.iou_constraint:
gt_rect_mat[gt_num] = 1
det_rect_mat[det_num] = 1
det_matched += 1
pairs.append({'gt': gt_num, 'det': det_num})
det_matched_nums.append(det_num)
evaluation_log += "Match GT #" + \
str(gt_num) + " with Det #" + str(det_num) + "\n"
num_gt_care = (len(gt_pols) - len(gt_dontcare_pols_mum))
num_det_care = (len(det_pols) - len(det_dontcare_pols_num))
if num_gt_care == 0:
recall = float(1)
precision = float(0) if num_det_care > 0 else float(1)
else:
recall = float(det_matched) / num_gt_care
precision = 0 if num_det_care == 0 else float(
det_matched) / num_det_care
hmean = 0 if (precision + recall) == 0 else 2.0 * \
precision * recall / (precision + recall)
matched_sum += det_matched
num_globalcare_gt += num_gt_care
num_globalcare_det += num_det_care
per_sample_metrics = {
'precision': precision,
'recall': recall,
'hmean': hmean,
'pairs': pairs,
'iou_mat': [] if len(det_pols) > 100 else iou_mat.tolist(),
'gt_pol_points': gt_pol_points,
'det_pol_points': det_pol_points,
'gt_care': num_gt_care,
'det_care': num_det_care,
'gt_dontcare': gt_dontcare_pols_mum,
'det_dontcare': det_dontcare_pols_num,
'det_matched': det_matched,
'evaluation_log': evaluation_log
}
return per_sample_metrics
def combine_results(self, results):
"""combine results."""
num_globalcare_gt = 0
num_globalcare_det = 0
matched_sum = 0
for result in results:
num_globalcare_gt += result['gt_care']
num_globalcare_det += result['det_care']
matched_sum += result['det_matched']
method_recall = 0 if num_globalcare_gt == 0 else float(
matched_sum) / num_globalcare_gt
method_precision = 0 if num_globalcare_det == 0 else float(
matched_sum) / num_globalcare_det
method_hmean = 0 if method_recall + method_precision == 0 else 2 * \
method_recall * method_precision / (method_recall + method_precision)
method_metrics = {'precision': method_precision,
'recall': method_recall, 'hmean': method_hmean}
return method_metrics
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/utils/ocr_metric/icdar2015/detection/iou.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorRT inference model builder for OCDnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from io import open # Python 2/3 compatibility. pylint: disable=W0622
import logging
import numpy as np
import pycuda.autoinit # noqa pylint: disable=W0611
import pycuda.driver as cuda
import tensorrt as trt
from nvidia_tao_deploy.cv.ocdnet.tensorrt_utils.tensorrt import _create_tensorrt_logger
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
level='INFO')
logger = logging.getLogger(__name__)
BINDING_TO_DTYPE = {
"input": np.float32,
"pred": np.float32,
}
class Engine(object):
"""A class to represent a TensorRT engine.
This class provides utility functions for performing inference on
a TensorRT engine.
Args:
engine: the CUDA engine to wrap.
"""
def __init__(self, engine, batch_size):
"""Initialization routine."""
self._engine = engine
self._context = None
self._batch_size = batch_size
@contextlib.contextmanager
def _create_context(self):
"""Create an execution context and allocate input/output buffers."""
try:
with self._engine.create_execution_context() as self._context:
self._device_buffers = []
self._host_buffers = []
self._input_binding_ids = {}
self._context.active_optimization_profile = 0
self._input_shape = []
for binding in range(self._engine.num_bindings):
if self._engine.binding_is_input(binding):
self._input_shape = self._engine.get_binding_shape(binding)[-3:]
self._max_batch_size = self._engine.get_profile_shape(0, 0)[2][0]
for i in range(self._engine.num_bindings):
tensor_shape = self._engine.get_binding_shape(i)
elt_count = abs(trt.volume(tensor_shape)) * self._max_batch_size
binding_name = self._engine.get_binding_name(i)
dtype = BINDING_TO_DTYPE[binding_name]
if self._engine.binding_is_input(i):
self._context.set_binding_shape(0, [self._max_batch_size] + list(self._input_shape))
self._input_binding_ids[binding_name] = i
page_locked_mem = None
else:
page_locked_mem = cuda.pagelocked_empty(elt_count, dtype=dtype)
page_locked_mem = page_locked_mem.reshape(*tensor_shape)
# Allocate memory.
self._host_buffers.append(page_locked_mem)
_mem_alloced = cuda.mem_alloc(elt_count * np.dtype(dtype).itemsize)
self._device_buffers.append(_mem_alloced)
if not self._input_binding_ids:
raise RuntimeError("No input bindings detected.")
# Create stream and events to measure timings.
self._stream = cuda.Stream()
self._start = cuda.Event()
self._end = cuda.Event()
yield
finally:
# Release context and allocated memory.
self._release_context()
def _do_infer(self, batch):
# make sure it is contiguous array
bindings = [int(device_buffer) for device_buffer in self._device_buffers]
# Transfer input data to device.
for node_name, array in batch.items():
if node_name != "input":
continue
array = np.ascontiguousarray(array)
cuda.memcpy_htod_async(
self._device_buffers[self._input_binding_ids[node_name]],
array,
self._stream
)
# Execute model.
self._start.record(self._stream)
self._context.execute_async_v2(bindings, self._stream.handle, None)
self._end.record(self._stream)
self._end.synchronize()
# Transfer predictions back.
outputs = dict() # pylint: disable=R1735
for i in range(self._engine.num_bindings):
if not self._engine.binding_is_input(i):
cuda.memcpy_dtoh_async(self._host_buffers[i], self._device_buffers[i],
self._stream)
out = np.copy(self._host_buffers[i][:self._batch_size])
name = self._engine.get_binding_name(i)
outputs[name] = out
return outputs
def _release_context(self):
"""Release context and allocated memory."""
for device_buffer in self._device_buffers:
device_buffer.free()
del (device_buffer) # pylint: disable=W4701
for host_buffer in self._host_buffers:
del (host_buffer) # pylint: disable=W4701
del (self._start)
del (self._end)
del (self._stream)
def infer(self, batch):
"""Perform inference on a Numpy array.
Args:
batch (ndarray): array to perform inference on.
Returns:
A dictionary of outputs where keys are output names
and values are output tensors.
"""
with self._create_context():
outputs = self._do_infer(batch)
return outputs
def infer_iterator(self, iterator):
"""Perform inference on an iterator of Numpy arrays.
This method should be preferred to ``infer`` when performing
inference on multiple Numpy arrays since this will re-use
the allocated execution and memory.
Args:
iterator: an iterator that yields Numpy arrays.
Yields:
A dictionary of outputs where keys are output names
and values are output tensors, for each array returned
by the iterator.
Returns:
None.
"""
with self._create_context():
for batch in iterator:
outputs = self._do_infer(batch)
yield outputs
def save(self, filename):
"""Save serialized engine into specified file.
Args:
filename (str): name of file to save engine to.
"""
with open(filename, "wb") as outf:
outf.write(self._engine.serialize())
class TrtModel(object):
"""A TensorRT model builder for OCDNet model inference based on TensorRT.
The TensorRT model builder builds a TensorRT engine from the engine file from the
tlt-converter and do inference in TensorRT. We use this as a way to verify the
TensorRT inference functionality of the FasterRCNN model.
"""
def __init__(self,
trt_engine_file,
batch_size):
"""Initialize the TensorRT model builder."""
self._trt_engine_file = trt_engine_file
self._batch_size = batch_size
self._trt_logger = _create_tensorrt_logger()
trt.init_libnvinfer_plugins(self._trt_logger, "")
def set_engine(self, trt_engine):
"""Set engine."""
self.engine = Engine(trt_engine,
self._batch_size)
def load_trt_engine_file(self):
"""load TensorRT engine file generated by tlt-converter."""
runtime = trt.Runtime(self._trt_logger)
with open(self._trt_engine_file, 'rb') as f:
_engine = f.read()
logger.info("Loading existing TensorRT engine and "
"ignoring the specified batch size and data type"
" information in spec file.")
self.engine = Engine(runtime.deserialize_cuda_engine(_engine),
self._batch_size)
def build_or_load_trt_engine(self):
"""Build engine or load engine depends on whether a trt engine is available."""
if self._trt_engine_file is not None:
# load engine
logger.info("Loading TensorRT engine file: %s for evaluation.", self._trt_engine_file)
self.load_trt_engine_file()
else:
raise ValueError("""A TensorRT engine file should
be provided for TensorRT based inference.""")
def predict(self, batch):
"""Do inference with TensorRT engine."""
return self.engine.infer(batch)
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/tensorrt_utils/tensorrt_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorRT utils for OCDNet."""
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/tensorrt_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCDNet INT8 calibration APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import pycuda.autoinit # noqa pylint: disable=W0611
import tensorrt as trt
"""Logger for data export APIs."""
logger = logging.getLogger(__name__)
# Array of TensorRT loggers. We need to keep global references to
# the TensorRT loggers that we create to prevent them from being
# garbage collected as those are referenced from C++ code without
# Python knowing about it.
tensorrt_loggers = []
def _create_tensorrt_logger(verbose=False):
"""Create a TensorRT logger.
Args:
verbose (bool): whether to make the logger verbose.
"""
if str(os.getenv('SUPPRES_VERBOSE_LOGGING', '0')) == '1':
# Do not print any warnings in TLT docker
trt_verbosity = trt.Logger.Severity.ERROR
elif verbose:
trt_verbosity = trt.Logger.Severity.INFO
else:
trt_verbosity = trt.Logger.Severity.WARNING
tensorrt_logger = trt.Logger(trt_verbosity)
tensorrt_loggers.append(tensorrt_logger)
return tensorrt_logger
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/tensorrt_utils/tensorrt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCDNet convert model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from nvidia_tao_deploy.utils.decoding import decode_model
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.ocdnet.config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.ocdnet.engine_builder import OCDNetEngineBuilder
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="gen_trt_engine", schema=ExperimentConfig
)
@monitor_status(name="ocdnet", mode="gen_trt_engine")
def main(cfg: ExperimentConfig) -> None:
"""Convert the onnx model to TRT engine."""
if cfg.gen_trt_engine.results_dir is not None:
results_dir = cfg.gen_trt_engine.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "gen_trt_engine")
os.makedirs(results_dir, exist_ok=True)
tmp_onnx_file, file_format = decode_model(cfg['gen_trt_engine']['onnx_file'])
engine_file = cfg['gen_trt_engine']['trt_engine']
data_type = cfg['gen_trt_engine']['tensorrt']['data_type']
workspace_size = cfg['gen_trt_engine']['tensorrt']['workspace_size']
min_batch_size = cfg['gen_trt_engine']['tensorrt']['min_batch_size']
opt_batch_size = cfg['gen_trt_engine']['tensorrt']['opt_batch_size']
max_batch_size = cfg['gen_trt_engine']['tensorrt']['max_batch_size']
input_height = cfg['gen_trt_engine']['height']
input_width = cfg['gen_trt_engine']['width']
img_mode = cfg['gen_trt_engine']['img_mode']
cal_image_dir = cfg['gen_trt_engine']['tensorrt']['calibration']['cal_image_dir']
cal_cache_file = cfg['gen_trt_engine']['tensorrt']['calibration']['cal_cache_file']
cal_batch_size = cfg['gen_trt_engine']['tensorrt']['calibration']['cal_batch_size']
cal_num_batches = cfg['gen_trt_engine']['tensorrt']['calibration']['cal_num_batches']
if engine_file:
if data_type == "int8":
if not os.path.isdir(cal_image_dir):
raise FileNotFoundError(
f"Calibration image directory {cal_image_dir} not found."
)
if len(os.listdir(cal_image_dir)) == 0:
raise FileNotFoundError(
f"Calibration image directory {cal_image_dir} is empty."
)
if cal_num_batches <= 0:
raise ValueError(
f"Calibration number of batches {cal_num_batches} is non-positive."
)
if cal_batch_size <= 0:
raise ValueError(
f"Calibration batch size {cal_batch_size} is non-positive."
)
if len(os.listdir(cal_image_dir)) < cal_num_batches * cal_batch_size:
raise ValueError(
f"Calibration images should be large than {cal_num_batches} * {cal_batch_size}."
)
builder = OCDNetEngineBuilder(input_width,
input_height,
img_mode,
workspace=workspace_size,
min_batch_size=min_batch_size,
opt_batch_size=opt_batch_size,
max_batch_size=max_batch_size,
)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
engine_file,
data_type,
calib_data_file=None,
calib_input=cal_image_dir,
calib_cache=cal_cache_file,
calib_num_images=cal_batch_size * cal_num_batches,
calib_batch_size=cal_batch_size)
logging.info("Generate TensorRT engine and calibration cache file successfully.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OCDNet scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference module."""
import os
import sys
import pathlib
import time
import cv2
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from nvidia_tao_deploy.cv.ocdnet.config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.ocdnet.post_processing.seg_detector_representer import get_post_processing
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.ocdnet.utils.utils import show_img, draw_bbox, save_result, get_file_list
from nvidia_tao_deploy.cv.ocdnet.tensorrt_utils.tensorrt_model import TrtModel
__dir__ = pathlib.Path(os.path.abspath(__file__))
sys.path.append(str(__dir__))
sys.path.append(str(__dir__.parent.parent))
def resize_image(img, image_size):
"""Resize image"""
resized_img = cv2.resize(img, image_size)
return resized_img
class Inferencer:
"""Infer class."""
def __init__(self, model_path, config, post_p_thre=0.7, gpu_id=None):
"""Init model."""
self.gpu_id = gpu_id
self.post_process = get_post_processing(config['inference']['post_processing'])
self.post_process.box_thresh = post_p_thre
self.img_mode = config['inference']['img_mode']
self.model = TrtModel(model_path, 1)
self.model.build_or_load_trt_engine()
self.is_trt = True
def predict(self, img_path: str, image_size, is_output_polygon=False):
"""Run prediction."""
assert os.path.exists(img_path), 'file is not exists'
img = cv2.imread(img_path, 1 if self.img_mode != 'GRAY' else 0).astype(np.float32)
if self.img_mode == 'RGB':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = resize_image(img, image_size)
rgb_mean = np.array([122.67891434, 116.66876762, 104.00698793])
image = img
image -= rgb_mean
image /= 255.
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
batch = {'img': image}
start = time.time()
if self.is_trt:
preds = self.model.predict({"input": image})["pred"]
box_list, score_list = self.post_process(batch, preds, is_output_polygon=is_output_polygon)
box_list, score_list = box_list[0], score_list[0]
if len(box_list) > 0:
if is_output_polygon:
idx = [x.sum() > 0 for x in box_list]
box_list = [box_list[i] for i, v in enumerate(idx) if v]
score_list = [score_list[i] for i, v in enumerate(idx) if v]
else:
idx = box_list.reshape(box_list.shape[0], -1).sum(axis=1) > 0
box_list, score_list = box_list[idx], score_list[idx]
else:
box_list, score_list = [], []
t = time.time() - start
return preds[0, 0, :, :], box_list, score_list, t
def run_experiment(experiment_config, model_path, post_p_thre, input_folder, output_folder,
width, height, polygon, show):
"""Run experiment."""
experiment_config['model']['pretrained'] = False
# Init the network
infer_model = Inferencer(
model_path,
experiment_config,
post_p_thre,
gpu_id=0
)
for img_path in tqdm(get_file_list(input_folder, p_postfix=['.jpg', '.png', '.jpeg', '.JPG', '.PNG', '.JPEG', '.bmp'])):
preds, boxes_list, score_list, _ = infer_model.predict(
img_path,
(width, height),
is_output_polygon=polygon
)
im = cv2.imread(img_path)
h_scale = im.shape[0] / float(height)
w_scale = im.shape[1] / float(width)
if np.shape(boxes_list) != (0,):
boxes_list[:, :, 0] = boxes_list[:, :, 0] * w_scale
boxes_list[:, :, 1] = boxes_list[:, :, 1] * h_scale
img = draw_bbox(im[:, :, ::-1], boxes_list)
if show:
show_img(preds)
show_img(img, title=os.path.basename(img_path))
plt.show()
# save result
img_path = pathlib.Path(img_path)
output_path = os.path.join(output_folder, img_path.stem + '_result.jpg')
pred_path = os.path.join(output_folder, img_path.stem + '_pred.jpg')
cv2.imwrite(output_path, img[:, :, ::-1])
cv2.imwrite(pred_path, preds * 255)
save_result(output_path.replace('_result.jpg', '.txt'), boxes_list, score_list, polygon)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"), config_name="inference", schema=ExperimentConfig
)
@monitor_status(name="ocdnet", mode="inference")
def main(cfg: ExperimentConfig) -> None:
"""Run the inference process."""
if cfg.inference.results_dir is not None:
results_dir = cfg.inference.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "inference")
os.makedirs(results_dir, exist_ok=True)
run_experiment(experiment_config=cfg,
model_path=cfg.inference.trt_engine,
post_p_thre=cfg.inference.post_processing.args.box_thresh,
input_folder=cfg.inference.input_folder,
output_folder=results_dir,
width=cfg.inference.width,
height=cfg.inference.height,
polygon=cfg.inference.polygon,
show=cfg.inference.show
)
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained ocdnet model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
from tqdm import tqdm
from omegaconf import OmegaConf
from nvidia_tao_deploy.cv.ocdnet.config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.ocdnet.data_loader.icdar_uber import get_dataloader
from nvidia_tao_deploy.cv.ocdnet.post_processing.seg_detector_representer import get_post_processing
from nvidia_tao_deploy.cv.ocdnet.utils.ocr_metric.icdar2015.quad_metric import get_metric
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.ocdnet.tensorrt_utils.tensorrt_model import TrtModel
class Evaluate():
"""Eval class."""
def __init__(self, model_path, config_file, gpu_id=0):
"""Initialize."""
config = config_file
config['model']['pretrained'] = False
self.validate_loader = get_dataloader(config['dataset']['validate_dataset'], False)
self.post_process = get_post_processing(config['evaluate']['post_processing'])
self.metric_cls = get_metric(config['evaluate']['metric'])
self.box_thresh = config['evaluate']['post_processing']["args"]["box_thresh"]
self.trt_model = None
if model_path.split(".")[-1] in ["trt", "engine"]:
self.trt_model = TrtModel(model_path, 1)
self.trt_model.build_or_load_trt_engine()
def eval(self):
"""eval function."""
raw_metrics = []
for _, batch in tqdm(enumerate(self.validate_loader), total=len(self.validate_loader), desc='test model'):
if _ >= len(self.validate_loader):
break
img = batch["img"]
preds = self.trt_model.predict({"input": img})["pred"]
boxes, scores = self.post_process(batch, preds, is_output_polygon=self.metric_cls.is_output_polygon)
raw_metric = self.metric_cls.validate_measure(batch, (boxes, scores), box_thresh=self.box_thresh)
raw_metrics.append(raw_metric)
metrics = self.metric_cls.gather_measure(raw_metrics)
return metrics['recall'].avg, metrics['precision'].avg, metrics['fmeasure'].avg
def run_experiment(experiment_config, model_path):
"""Run experiment."""
experiment_config = OmegaConf.to_container(experiment_config)
evaluation = Evaluate(model_path, experiment_config)
result = evaluation.eval()
print("Precision: ", result[1])
print("Recall: ", result[0])
print("F-measure: ", result[2])
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"), config_name="evaluate", schema=ExperimentConfig
)
@monitor_status(name="ocdnet", mode="evaluation")
def main(cfg: ExperimentConfig) -> None:
"""Run the evaluation process."""
if cfg.evaluate.results_dir is not None:
results_dir = cfg.evaluate.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "evaluate")
os.makedirs(results_dir, exist_ok=True)
run_experiment(experiment_config=cfg,
model_path=cfg.evaluate.trt_engine)
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import argparse
from nvidia_tao_deploy.cv.ocdnet import scripts
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_hydra import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"ocdnet",
add_help=True,
description="Train Adapt Optimize Deploy entrypoint for OCDNet"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="ocdnet")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/entrypoint/ocdnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for ocdnet."""
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloader Init."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/data_loader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset module."""
import copy
import cv2
import json
import multiprocessing
import numpy as np
import os
import pathlib
from nvidia_tao_deploy.cv.ocdnet.data_loader import icdar_uber
from nvidia_tao_deploy.utils.path_utils import expand_path
def get_dataset(data_path, module_name, transform, dataset_args):
"""Get dataset.
Args:
data_path: dataset file list.
module_name: custom dataset name,Supports data_loaders.ImageDataset
dataset_args: module_name args
Returns:
ConcatDataset object
"""
s_dataset = getattr(icdar_uber, module_name)(transform=transform, data_path=data_path,
**dataset_args)
return s_dataset
class ICDARCollateFN:
"""ICDAR Collation."""
def __init__(self, *args, **kwargs):
"""Initialize."""
pass
def __call__(self, batch):
"""Call fn."""
pass
def get_dataloader(module_config, distributed=False):
"""Get dataloader."""
if module_config is None:
return None
config = copy.deepcopy(module_config)
dataset_args = config['args']
dataset_name = config['data_name']
data_path = config['data_path']
if data_path is None:
return None
data_path = [x for x in data_path if x is not None]
if len(data_path) == 0:
return None
if 'collate_fn' not in config['loader'] or config['loader']['collate_fn'] is None or len(config['loader']['collate_fn']) == 0:
config['loader']['collate_fn'] = None
else:
config['loader']['collate_fn'] = globals()[config['loader']['collate_fn']]()
_dataset = get_dataset(data_path=data_path, module_name=dataset_name, transform=None, dataset_args=dataset_args)
return _dataset
def _load_txt(file_path: str):
with open(file_path, 'r', encoding='utf8') as f:
content = [x.strip().strip('\ufeff').strip('\xef\xbb\xbf') for x in f.readlines()]
return content
def _load_json(file_path: str):
with open(file_path, 'r', encoding='utf8') as f:
content = json.load(f)
return content
def load(file_path: str):
"""load file."""
file_path = pathlib.Path(file_path)
func_dict = {'.txt': _load_txt, '.json': _load_json, '.list': _load_txt}
assert file_path.suffix in func_dict
return func_dict[file_path.suffix](file_path)
def order_points_clockwise(pts):
"""order points clockwise."""
rect = np.zeros((4, 2), dtype="float32")
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def get_datalist(train_data_path):
"""Get train data list and val data list"""
train_data = []
for p in train_data_path:
# use list file
if os.path.isfile(p):
with open(p, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip('\n').replace('.jpg ', '.jpg\t').split('\t')
if len(line) > 1:
img_path = pathlib.Path(expand_path(line[0].strip(' ')))
label_path = pathlib.Path(expand_path(line[1].strip(' ')))
if img_path.exists() and img_path.stat().st_size > 0 and label_path.exists() and label_path.stat().st_size > 0:
train_data.append((str(img_path), str(label_path)))
# use standard directory structure
else:
img_dir = os.path.join(p, "img")
label_dir = os.path.join(p, "gt")
for img in os.listdir(img_dir):
img_file = os.path.join(img_dir, img)
label = "gt_" + img.split('.')[0] + ".txt"
label_file = os.path.join(label_dir, label)
assert os.path.exists(label_file), (
f"Cannot find label file for image: {img_file}"
)
train_data.append((img_file, label_file))
return sorted(train_data)
def get_datalist_uber(train_data_path):
"""Get uber train data list and val data list"""
train_data = []
for p in train_data_path:
# use list file
if os.path.isfile(p):
with open(p, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip('\n').replace('.jpg ', '.jpg\t').split('\t')
if len(line) > 1:
img_path = pathlib.Path(expand_path(line[0].strip(' ')))
label_path = pathlib.Path(expand_path(line[1].strip(' ')))
if img_path.exists() and img_path.stat().st_size > 0 and label_path.exists() and label_path.stat().st_size > 0:
train_data.append((str(img_path), str(label_path)))
# use standard directory structure
else:
img_dir = os.path.join(p, "img")
label_dir = os.path.join(p, "gt")
for img in os.listdir(img_dir):
img_file = os.path.join(img_dir, img)
label = "truth_" + img.split('.')[0] + ".txt"
label_file = os.path.join(label_dir, label)
assert os.path.exists(label_file), (
f"Cannot find label file for image: {img_file}"
)
train_data.append((img_file, label_file))
return sorted(train_data)
def expand_polygon(polygon):
"""expand bbox which has only one character."""
(x, y), (w, h), angle = cv2.minAreaRect(np.float32(polygon))
if angle < -45:
w, h = h, w
angle += 90
new_w = w + h
box = ((x, y), (new_w, h), angle)
points = cv2.boxPoints(box)
return order_points_clockwise(points)
class Resize2D:
"""Resize 2D."""
def __init__(self, short_size, resize_text_polys=True):
"""Initialize."""
self.short_size = short_size
self.resize_text_polys = resize_text_polys
def __call__(self, data: dict) -> dict:
"""Resize images and texts"""
im = data['img']
text_polys = data['text_polys']
h, w, _ = im.shape
if isinstance(self.short_size, (list, tuple)):
target_width = self.short_size[0]
target_height = self.short_size[1]
scale = (target_width / w, target_height / h)
im = cv2.resize(im, dsize=None, fx=scale[0], fy=scale[1])
if self.resize_text_polys:
text_polys[:, :, 0] *= scale[0]
text_polys[:, :, 1] *= scale[1]
else:
short_edge = min(h, w)
if short_edge < self.short_size:
# make sure shorter edge >= short_size
scale = self.short_size / short_edge
im = cv2.resize(im, dsize=None, fx=scale, fy=scale)
scale = (scale, scale)
if self.resize_text_polys:
text_polys[:, :, 0] *= scale[0]
text_polys[:, :, 1] *= scale[1]
data['img'] = im
data['text_polys'] = text_polys
return data
class BaseDataSet():
"""BaseDataSet class."""
def __init__(self, data_path: str, img_mode, pre_processes, filter_keys, ignore_tags, transform=None,
target_transform=None):
"""Initialize."""
assert img_mode in ['RGB', 'BGR', 'GRAY']
self.ignore_tags = ignore_tags
self.data_list = self.load_data(data_path)
item_keys = ['img_path', 'img_name', 'text_polys', 'texts', 'ignore_tags']
for item in item_keys:
assert item in self.data_list[0], f'data_list from load_data must contains {item_keys}'
self.img_mode = img_mode
self.filter_keys = filter_keys
self.transform = transform
self.target_transform = target_transform
self._init_pre_processes(pre_processes)
def _init_pre_processes(self, pre_processes):
self.aug = []
if pre_processes is not None:
for aug in pre_processes:
if 'args' not in aug:
args = {}
else:
args = aug['args']
if isinstance(args, dict):
cls = globals()[aug['type']](**args)
else:
cls = globals()[aug['type']](args)
self.aug.append(cls)
def load_data(self, data_path: str) -> list:
"""Load data to a list
Args:
data_path (str): file or folder
Returns:
A dict (dict): contains 'img_path','img_name','text_polys','texts','ignore_tags'
"""
raise NotImplementedError
def apply_pre_processes(self, data):
"""Apply pre_processing."""
for aug in self.aug:
data = aug(data)
return data
def __getitem__(self, index):
"""getitem function."""
try:
data = copy.deepcopy(self.data_list[index])
im = cv2.imread(data['img_path'], 1 if self.img_mode != 'GRAY' else 0).astype("float32")
if self.img_mode == 'RGB':
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
data['img'] = im
data['shape'] = [im.shape[0], im.shape[1]]
data = self.apply_pre_processes(data)
rgb_mean = np.array([122.67891434, 116.66876762, 104.00698793])
image = data['img']
image -= rgb_mean
image /= 255.
image = np.transpose(image, (2, 0, 1)).astype(np.float32)
data['img'] = image
data['text_polys'] = data['text_polys'].tolist()
if len(self.filter_keys):
data_dict = {}
for k, v in data.items():
if k not in self.filter_keys:
data_dict[k] = v
return data_dict
return data
except Exception:
return self.__getitem__(np.random.randint(self.__len__()))
def __len__(self):
"""len functin."""
return len(self.data_list)
class UberDataset(BaseDataSet):
"""Uber Dataset class."""
def __init__(self, data_path: str, img_mode, pre_processes, filter_keys, ignore_tags, transform=None, **kwargs):
"""Initialize."""
super().__init__(data_path, img_mode, pre_processes, filter_keys, ignore_tags, transform)
def load_data(self, data_path: str) -> list:
"""Load data."""
pool = multiprocessing.Pool(processes=4) # pylint: disable=R1732
data_list = pool.apply_async(get_datalist_uber, args=(data_path,)).get()
pool.close()
pool.join()
t_data_list = []
pool = multiprocessing.Pool(processes=4) # pylint: disable=R1732
for img_path, label_path in data_list:
tmp = pool.apply_async(self._get_annotation, args=(label_path,))
data = tmp.get()
if len(data['text_polys']) > 0:
item = {'img_path': img_path, 'img_name': pathlib.Path(img_path).stem}
item.update(data)
t_data_list.append(item)
else:
print(f'there is no suit bbox in {label_path}')
pool.close()
pool.join()
return t_data_list
def _get_annotation(self, label_path: str) -> dict:
polys = []
texts = []
ignores = []
with open(label_path, encoding='utf-8', mode='r') as f:
for line in f:
content = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split('\t')
params = content[0].split(" ")[:-2]
try:
poly = np.array(list(map(float, params))).reshape(-1, 2).astype(np.float32)
if cv2.contourArea(poly) > 0:
polys.append(poly)
label = content[1]
if len(label.split(" ")) > 1:
label = "###"
texts.append(label)
ignores.append(label in self.ignore_tags)
except Exception:
print(f'load label failed on {label_path}')
data = {
'text_polys': np.array(polys),
'texts': texts,
'ignore_tags': ignores,
}
return data
class ICDAR2015Dataset(BaseDataSet):
"""ICDAR2015 Dataset."""
def __init__(self, data_path: str, img_mode, pre_processes, filter_keys, ignore_tags, transform=None, **kwargs):
"""Initialize."""
super().__init__(data_path, img_mode, pre_processes, filter_keys, ignore_tags, transform)
def load_data(self, data_path: str) -> list:
"""Load data."""
data_list = get_datalist(data_path)
t_data_list = []
for img_path, label_path in data_list:
data = self._get_annotation(label_path)
if len(data['text_polys']) > 0:
item = {'img_path': img_path, 'img_name': pathlib.Path(img_path).stem}
item.update(data)
t_data_list.append(item)
else:
print(f'there is no suit bbox in {label_path}')
return t_data_list
def _get_annotation(self, label_path: str) -> dict:
boxes = []
texts = []
ignores = []
with open(label_path, encoding='utf-8', mode='r') as f:
for line in f.readlines():
params = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split(',')
try:
box = order_points_clockwise(np.array(list(map(float, params[:8]))).reshape(-1, 2))
if cv2.contourArea(box) > 0:
boxes.append(box)
label = params[8]
texts.append(label)
ignores.append(label in self.ignore_tags)
except Exception:
print(f'load label failed on {label_path}')
data = {
'text_polys': np.array(boxes),
'texts': texts,
'ignore_tags': ignores,
}
return data
| tao_deploy-main | nvidia_tao_deploy/cv/ocdnet/data_loader/icdar_uber.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import numpy as np
import tensorrt as trt
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
from nvidia_tao_deploy.inferencer.utils import allocate_buffers, do_inference
def trt_output_process_fn(y_encoded, height, width):
"""function to process TRT model output."""
det_out, keep_k = y_encoded
result = []
for idx, k in enumerate(keep_k.reshape(-1)):
det = det_out[idx].reshape(-1, 7)[:k]
xmin = det[:, 3] * width
ymin = det[:, 4] * height
xmax = det[:, 5] * width
ymax = det[:, 6] * height
cls_id = det[:, 1]
conf = det[:, 2]
result.append(np.stack((cls_id, conf, xmin, ymin, xmax, ymax), axis=-1))
return result
class RetinaNetInferencer(TRTInferencer):
"""Manages TensorRT objects for model inference."""
def __init__(self, engine_path, input_shape=None, batch_size=None, data_format="channel_first"):
"""Initializes TensorRT objects needed for model inference.
Args:
engine_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
data_format (str): either channel_first or channel_last
"""
# Load TRT engine
super().__init__(engine_path)
self.max_batch_size = self.engine.max_batch_size
self.execute_v2 = False
# Execution context is needed for inference
self.context = None
# Allocate memory for multiple usage [e.g. multiple batch inference]
self._input_shape = []
for binding in range(self.engine.num_bindings):
if self.engine.binding_is_input(binding):
binding_shape = self.engine.get_binding_shape(binding)
self._input_shape = binding_shape[-3:]
if len(binding_shape) == 4:
self.etlt_type = "onnx"
else:
self.etlt_type = "uff"
assert len(self._input_shape) == 3, "Engine doesn't have valid input dimensions"
if data_format == "channel_first":
self.height = self._input_shape[1]
self.width = self._input_shape[2]
else:
self.height = self._input_shape[0]
self.width = self._input_shape[1]
# set binding_shape for dynamic input
# do not override if the original model was uff
if (input_shape is not None or batch_size is not None) and (self.etlt_type != "uff"):
self.context = self.engine.create_execution_context()
if input_shape is not None:
self.context.set_binding_shape(0, input_shape)
self.max_batch_size = input_shape[0]
else:
self.context.set_binding_shape(0, [batch_size] + list(self._input_shape))
self.max_batch_size = batch_size
self.execute_v2 = True
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.engine,
self.context)
if self.context is None:
self.context = self.engine.create_execution_context()
input_volume = trt.volume(self._input_shape)
self.numpy_array = np.zeros((self.max_batch_size, input_volume))
def infer(self, imgs):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
actual_batch_size = len(imgs)
if actual_batch_size > max_batch_size:
raise ValueError(f"image_paths list bigger ({actual_batch_size}) than \
engine max batch size ({max_batch_size})")
self.numpy_array[:actual_batch_size] = imgs.reshape(actual_batch_size, -1)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, self.numpy_array.ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
y_pred = [i.reshape(max_batch_size, -1)[:actual_batch_size] for i in results]
# Process TRT outputs to proper format
return trt_output_process_fn(y_pred, self.height, self.width)
def __del__(self):
"""Clear things up on object deletion."""
# Clear session and buffer
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.engine:
del self.engine
if self.stream:
del self.stream
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet TensorRT engine builder."""
import logging
import os
import random
from six.moves import xrange
import sys
import traceback
from tqdm import tqdm
try:
from uff.model.uff_pb2 import MetaGraph
except ImportError:
print("Loading uff directly from the package source code")
# @scha: To disable tensorflow import issue
import importlib
import types
import pkgutil
package = pkgutil.get_loader("uff")
# Returns __init__.py path
src_code = package.get_filename().replace('__init__.py', 'model/uff_pb2.py')
loader = importlib.machinery.SourceFileLoader('helper', src_code)
helper = types.ModuleType(loader.name)
loader.exec_module(helper)
MetaGraph = helper.MetaGraph
import numpy as np
import onnx
import tensorrt as trt
from nvidia_tao_deploy.engine.builder import EngineBuilder
from nvidia_tao_deploy.engine.tensorfile import TensorFile
from nvidia_tao_deploy.engine.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_deploy.engine.utils import generate_random_tensorfile, prepare_chunk
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class RetinaNetEngineBuilder(EngineBuilder):
"""Parses an UFF graph and builds a TensorRT engine from it."""
def __init__(
self,
data_format="channels_first",
**kwargs
):
"""Init.
Args:
data_format (str): data_format.
"""
super().__init__(**kwargs)
self._data_format = data_format
def set_input_output_node_names(self):
"""Set input output node names."""
self._output_node_names = ["NMS"]
self._input_node_names = ["Input"]
def get_input_dims(self, model_path):
"""Get input dimension of UFF model."""
metagraph = MetaGraph()
with open(model_path, "rb") as f:
metagraph.ParseFromString(f.read())
for node in metagraph.graphs[0].nodes:
# if node.operation == "MarkOutput":
# print(f"Output: {node.inputs[0]}")
if node.operation == "Input":
return np.array(node.fields['shape'].i_list.val)[1:]
raise ValueError("Input dimension is not found in the UFF metagraph.")
def get_onnx_input_dims(self, model_path):
"""Get input dimension of ONNX model."""
onnx_model = onnx.load(model_path)
onnx_inputs = onnx_model.graph.input
logger.info('List inputs:')
for i, inputs in enumerate(onnx_inputs):
logger.info('Input %s -> %s.', i, inputs.name)
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][1:])
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][0])
return [i.dim_value for i in inputs.type.tensor_type.shape.dim][:]
def create_network(self, model_path, file_format="uff"):
"""Parse the ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the UFF/ONNX graph to load.
file_format: The file format of the decrypted etlt file (default: uff).
"""
if file_format == "uff":
logger.info("Parsing UFF model")
self.network = self.builder.create_network()
self.parser = trt.UffParser()
self.set_input_output_node_names()
in_tensor_name = self._input_node_names[0]
self._input_dims = self.get_input_dims(model_path)
input_dict = {in_tensor_name: self._input_dims}
for key, value in input_dict.items():
if self._data_format == "channels_first":
self.parser.register_input(key, value, trt.UffInputOrder(0))
else:
self.parser.register_input(key, value, trt.UffInputOrder(1))
for name in self._output_node_names:
self.parser.register_output(name)
self.builder.max_batch_size = self.max_batch_size
try:
assert self.parser.parse(model_path, self.network, trt.DataType.FLOAT)
except AssertionError as e:
logger.error("Failed to parse UFF File")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
f"UFF parsing failed on line {line} in statement {text}"
) from e
else:
logger.info("Parsing ONNX model")
self._input_dims = self.get_onnx_input_dims(model_path)
self.batch_size = self._input_dims[0]
self._input_dims = self._input_dims[1:]
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
model_path = os.path.realpath(model_path)
with open(model_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: %s", model_path)
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
logger.info("Network Description")
for input in inputs: # noqa pylint: disable=W0622
logger.info("Input '%s' with shape %s and dtype %s", input.name, input.shape, input.dtype)
for output in outputs:
logger.info("Output '%s' with shape %s and dtype %s", output.name, output.shape, output.dtype)
if self.batch_size <= 0: # dynamic batch size
logger.info("dynamic batch size handling")
opt_profile = self.builder.create_optimization_profile()
model_input = self.network.get_input(0)
input_shape = model_input.shape
input_name = model_input.name
real_shape_min = (self.min_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_opt = (self.opt_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_max = (self.max_batch_size, input_shape[1],
input_shape[2], input_shape[3])
opt_profile.set_shape(input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max)
self.config.add_optimization_profile(opt_profile)
def set_calibrator(self,
inputs=None,
calib_cache=None,
calib_input=None,
calib_num_images=5000,
calib_batch_size=8,
calib_data_file=None,
image_mean=None):
"""Simple function to set an Tensorfile based int8 calibrator.
Args:
calib_data_file: Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches
of random tensors, images from the file in calib_input of dimensions
(batch_size,) + (input_dims).
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
image_mean: Image mean per channel.
Returns:
No explicit returns.
"""
logger.info("Calibrating using TensorfileCalibrator")
n_batches = calib_num_images // calib_batch_size
if not os.path.exists(calib_data_file):
self.generate_tensor_file(calib_data_file,
calib_input,
self._input_dims,
n_batches=n_batches,
batch_size=calib_batch_size,
image_mean=image_mean)
self.config.int8_calibrator = TensorfileCalibrator(calib_data_file,
calib_cache,
n_batches,
calib_batch_size)
def generate_tensor_file(self, data_file_name,
calibration_images_dir,
input_dims, n_batches=10,
batch_size=1, image_mean=None):
"""Generate calibration Tensorfile for int8 calibrator.
This function generates a calibration tensorfile from a directory of images, or dumps
n_batches of random numpy arrays of shape (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to the output tensorfile to be saved.
calibration_images_dir (str): Path to the images to generate a tensorfile from.
input_dims (list): Input shape in CHW order.
n_batches (int): Number of batches to be saved.
batch_size (int): Number of images per batch.
image_mean (list): Image mean per channel.
Returns:
No explicit returns.
"""
if not os.path.exists(calibration_images_dir):
logger.info("Generating a tensorfile with random tensor images. This may work well as "
"a profiling tool, however, it may result in inaccurate results at "
"inference. Please generate a tensorfile using the tlt-int8-tensorfile, "
"or provide a custom directory of images for best performance.")
generate_random_tensorfile(data_file_name,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
else:
# Preparing the list of images to be saved.
num_images = n_batches * batch_size
valid_image_ext = ['jpg', 'jpeg', 'png']
image_list = [os.path.join(calibration_images_dir, image)
for image in os.listdir(calibration_images_dir)
if image.split('.')[-1] in valid_image_ext]
if len(image_list) < num_images:
raise ValueError('Not enough number of images provided:'
f' {len(image_list)} < {num_images}')
image_idx = random.sample(xrange(len(image_list)), num_images)
self.set_data_preprocessing_parameters(input_dims, image_mean)
# Writing out processed dump.
with TensorFile(data_file_name, 'w') as f:
for chunk in tqdm(image_idx[x:x + batch_size] for x in xrange(0, len(image_idx),
batch_size)):
dump_data = prepare_chunk(chunk, image_list,
image_width=input_dims[2],
image_height=input_dims[1],
channels=input_dims[0],
batch_size=batch_size,
**self.preprocessing_arguments)
f.write(dump_data)
f.closed
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Set data pre-processing parameters for the int8 calibration."""
num_channels = input_dims[0]
if num_channels == 3:
if not image_mean:
means = [103.939, 116.779, 123.68]
else:
assert len(image_mean) == 3, "Image mean should have 3 values for RGB inputs."
means = image_mean
elif num_channels == 1:
if not image_mean:
means = [117.3786]
else:
assert len(image_mean) == 1, "Image mean should have 1 value for grayscale inputs."
means = image_mean
else:
raise NotImplementedError(
f"Invalid number of dimensions {num_channels}.")
self.preprocessing_arguments = {"scale": 1.0,
"means": means,
"flip_channel": True}
def _set_tensor_dynamic_ranges(self, network, tensor_scale_dict):
"""Set the scaling factors obtained from quantization-aware training.
Args:
network: TensorRT network object.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
"""
tensors_in_dict = tensor_scale_dict.keys()
tensors_found = []
for idx in range(network.num_inputs):
input_tensor = network.get_input(idx)
for k in tensor_scale_dict.keys():
if k.startswith(str(input_tensor.name).split('/', maxsplit=1)[0]):
cal_scale = tensor_scale_dict.pop(k)
tensors_found.append(k)
input_tensor.dynamic_range = (-cal_scale, cal_scale)
break
for layer in network:
found_all_outputs = True
for idx in range(layer.num_outputs):
output_tensor = layer.get_output(idx)
for k in tensor_scale_dict.keys():
if k.startswith(str(output_tensor.name).split('/', maxsplit=1)[0]):
cal_scale = tensor_scale_dict.pop(k)
tensors_found.append(k)
output_tensor.dynamic_range = (-cal_scale, cal_scale)
break
found_all_outputs = False
if found_all_outputs:
layer.precision = trt.int8
if set(tensors_in_dict) != set(tensors_found):
logger.info("Tensors in scale dictionary but not in network: %s",
set(tensors_in_dict) - set(tensors_found))
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy RetinaNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from PIL import Image
from nvidia_tao_deploy.dataloader.kitti import KITTILoader
class RetinaNetKITTILoader(KITTILoader):
"""RetinaNet Dataloader."""
def __init__(self,
keep_aspect_ratio=False,
**kwargs):
"""Init.
Args:
keep_aspect_ratio (bool): keep aspect ratio of the image.
"""
super().__init__(**kwargs)
self.keep_aspect_ratio = keep_aspect_ratio
def preprocessing(self, image, label):
"""The image preprocessor loads an image from disk and prepares it as needed for batching.
This includes padding, resizing, normalization, data type casting, and transposing.
Args:
image (PIL.image): The Pillow image on disk to load.
label (np.array): labels
Returns:
image (np.array): A numpy array holding the image sample, ready to be concatenated
into the rest of the batch
label (np.array): labels
"""
def resize_pad(image, pad_color=(0, 0, 0)):
"""Resize and Pad.
A subroutine to implement padding and resizing. This will resize the image to fit
fully within the input size, and pads the remaining bottom-right portions with
the value provided.
Args:
image (PIL.Image): The PIL image object
pad_color (list): The RGB values to use for the padded area. Default: Black/Zeros.
Returns:
pad (PIL.Image): The PIL image object already padded and cropped,
scale (list): the resize scale used.
"""
width, height = image.size
width_scale = width / self.width
height_scale = height / self.height
if not self.keep_aspect_ratio:
image = image.resize(
(self.width, self.height),
resample=Image.BILINEAR)
return image, [height_scale, width_scale]
scale = 1.0 / max(width_scale, height_scale)
image = image.resize(
(round(width * scale), round(height * scale)),
resample=Image.BILINEAR)
if self.num_channels == 1:
pad = Image.new("L", (self.width, self.height))
pad.paste(0, [0, 0, self.width, self.height])
else:
pad = Image.new("RGB", (self.width, self.height))
pad.paste(pad_color, [0, 0, self.width, self.height])
pad.paste(image)
return pad, [scale, scale]
image, scale = resize_pad(image, (124, 116, 104))
image = np.asarray(image, dtype=self.dtype)
# Handle Grayscale
if self.num_channels == 1:
image = np.expand_dims(image, axis=2)
label[:, 2] /= scale[1]
label[:, 3] /= scale[0]
label[:, 4] /= scale[1]
label[:, 5] /= scale[0]
# Round
label = np.round(label, decimals=0)
# Filter out invalid labels
label = self._filter_invalid_labels(label)
return image, label
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/dataloader.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/retinanet/proto/retinanet_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/retinanet/proto/retinanet_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n;nvidia_tao_deploy/cv/retinanet/proto/retinanet_config.proto\"\xfa\x03\n\x0fRetinaNetConfig\x12\x15\n\raspect_ratios\x18\x01 \x01(\t\x12\x1c\n\x14\x61spect_ratios_global\x18\x02 \x01(\t\x12\x0e\n\x06scales\x18\x03 \x01(\t\x12\x11\n\tmin_scale\x18\x04 \x01(\x02\x12\x11\n\tmax_scale\x18\x05 \x01(\x02\x12\x19\n\x11two_boxes_for_ar1\x18\x06 \x01(\x08\x12\r\n\x05steps\x18\x07 \x01(\t\x12\x12\n\nclip_boxes\x18\x08 \x01(\x08\x12\x11\n\tvariances\x18\t \x01(\t\x12\x0f\n\x07offsets\x18\n \x01(\t\x12\x12\n\nmean_color\x18\x0b \x01(\t\x12\x0c\n\x04\x61rch\x18\x0c \x01(\t\x12\x17\n\x0floss_loc_weight\x18\r \x01(\x02\x12\x18\n\x10\x66ocal_loss_alpha\x18\x0e \x01(\x02\x12\x18\n\x10\x66ocal_loss_gamma\x18\x0f \x01(\x02\x12\x15\n\rfreeze_blocks\x18\x10 \x03(\x02\x12\x11\n\tfreeze_bn\x18\x11 \x01(\x08\x12\x0f\n\x07nlayers\x18\x12 \x01(\r\x12\x11\n\tn_kernels\x18\x13 \x01(\r\x12\x14\n\x0c\x66\x65\x61ture_size\x18\x14 \x01(\r\x12\x16\n\x0epos_iou_thresh\x18\x15 \x01(\x02\x12\x16\n\x0eneg_iou_thresh\x18\x16 \x01(\x02\x12\x17\n\x0fn_anchor_levels\x18\x17 \x01(\rb\x06proto3')
)
_RETINANETCONFIG = _descriptor.Descriptor(
name='RetinaNetConfig',
full_name='RetinaNetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='RetinaNetConfig.aspect_ratios', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='aspect_ratios_global', full_name='RetinaNetConfig.aspect_ratios_global', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scales', full_name='RetinaNetConfig.scales', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_scale', full_name='RetinaNetConfig.min_scale', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_scale', full_name='RetinaNetConfig.max_scale', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='two_boxes_for_ar1', full_name='RetinaNetConfig.two_boxes_for_ar1', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='steps', full_name='RetinaNetConfig.steps', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clip_boxes', full_name='RetinaNetConfig.clip_boxes', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='variances', full_name='RetinaNetConfig.variances', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='offsets', full_name='RetinaNetConfig.offsets', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_color', full_name='RetinaNetConfig.mean_color', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='RetinaNetConfig.arch', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='loss_loc_weight', full_name='RetinaNetConfig.loss_loc_weight', index=12,
number=13, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='focal_loss_alpha', full_name='RetinaNetConfig.focal_loss_alpha', index=13,
number=14, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='focal_loss_gamma', full_name='RetinaNetConfig.focal_loss_gamma', index=14,
number=15, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='RetinaNetConfig.freeze_blocks', index=15,
number=16, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='RetinaNetConfig.freeze_bn', index=16,
number=17, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nlayers', full_name='RetinaNetConfig.nlayers', index=17,
number=18, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_kernels', full_name='RetinaNetConfig.n_kernels', index=18,
number=19, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='feature_size', full_name='RetinaNetConfig.feature_size', index=19,
number=20, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pos_iou_thresh', full_name='RetinaNetConfig.pos_iou_thresh', index=20,
number=21, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='neg_iou_thresh', full_name='RetinaNetConfig.neg_iou_thresh', index=21,
number=22, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='n_anchor_levels', full_name='RetinaNetConfig.n_anchor_levels', index=22,
number=23, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=64,
serialized_end=570,
)
DESCRIPTOR.message_types_by_name['RetinaNetConfig'] = _RETINANETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RetinaNetConfig = _reflection.GeneratedProtocolMessageType('RetinaNetConfig', (_message.Message,), dict(
DESCRIPTOR = _RETINANETCONFIG,
__module__ = 'nvidia_tao_deploy.cv.retinanet.proto.retinanet_config_pb2'
# @@protoc_insertion_point(class_scope:RetinaNetConfig)
))
_sym_db.RegisterMessage(RetinaNetConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/proto/retinanet_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy RetinaNet Proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/retinanet/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.ssd.proto import augmentation_config_pb2 as nvidia__tao__deploy_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_deploy.cv.common.proto import detection_sequence_dataset_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2
from nvidia_tao_deploy.cv.common.proto import training_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_training__config__pb2
from nvidia_tao_deploy.cv.common.proto import nms_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_nms__config__pb2
from nvidia_tao_deploy.cv.common.proto import class_weighting_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2
from nvidia_tao_deploy.cv.retinanet.proto import eval_config_pb2 as nvidia__tao__deploy_dot_cv_dot_retinanet_dot_proto_dot_eval__config__pb2
from nvidia_tao_deploy.cv.retinanet.proto import retinanet_config_pb2 as nvidia__tao__deploy_dot_cv_dot_retinanet_dot_proto_dot_retinanet__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/retinanet/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_deploy/cv/retinanet/proto/experiment.proto\x1a\x38nvidia_tao_deploy/cv/ssd/proto/augmentation_config.proto\x1aInvidia_tao_deploy/cv/common/proto/detection_sequence_dataset_config.proto\x1a\x37nvidia_tao_deploy/cv/common/proto/training_config.proto\x1a\x32nvidia_tao_deploy/cv/common/proto/nms_config.proto\x1a>nvidia_tao_deploy/cv/common/proto/class_weighting_config.proto\x1a\x36nvidia_tao_deploy/cv/retinanet/proto/eval_config.proto\x1a;nvidia_tao_deploy/cv/retinanet/proto/retinanet_config.proto\"\xca\x02\n\nExperiment\x12\x13\n\x0brandom_seed\x18\x01 \x01(\r\x12&\n\x0e\x64\x61taset_config\x18\x02 \x01(\x0b\x32\x0e.DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x03 \x01(\x0b\x32\x13.AugmentationConfig\x12(\n\x0ftraining_config\x18\x04 \x01(\x0b\x32\x0f.TrainingConfig\x12 \n\x0b\x65val_config\x18\x05 \x01(\x0b\x32\x0b.EvalConfig\x12\x1e\n\nnms_config\x18\x06 \x01(\x0b\x32\n.NMSConfig\x12*\n\x10retinanet_config\x18\x07 \x01(\x0b\x32\x10.RetinaNetConfig\x12\x35\n\x16\x63lass_weighting_config\x18\x08 \x01(\x0b\x32\x15.ClassWeightingConfigb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_training__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_nms__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_retinanet_dot_proto_dot_eval__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_retinanet_dot_proto_dot_retinanet__config__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eval_config', full_name='Experiment.eval_config', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_config', full_name='Experiment.nms_config', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retinanet_config', full_name='Experiment.retinanet_config', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weighting_config', full_name='Experiment.class_weighting_config', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=481,
serialized_end=811,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_detection__sequence__dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__deploy_dot_cv_dot_ssd_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_training__config__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['eval_config'].message_type = nvidia__tao__deploy_dot_cv_dot_retinanet_dot_proto_dot_eval__config__pb2._EVALCONFIG
_EXPERIMENT.fields_by_name['nms_config'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_nms__config__pb2._NMSCONFIG
_EXPERIMENT.fields_by_name['retinanet_config'].message_type = nvidia__tao__deploy_dot_cv_dot_retinanet_dot_proto_dot_retinanet__config__pb2._RETINANETCONFIG
_EXPERIMENT.fields_by_name['class_weighting_config'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_class__weighting__config__pb2._CLASSWEIGHTINGCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_deploy.cv.retinanet.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/proto/experiment_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Config Base Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_deploy.cv.retinanet.proto.experiment_pb2 import Experiment
def load_proto(config):
"""Load the experiment proto."""
proto = Experiment()
def _load_from_file(filename, pb2):
if not os.path.exists(filename):
raise IOError(f"Specfile not found at: {filename}")
with open(filename, "r", encoding="utf-8") as f:
merge_text_proto(f.read(), pb2)
_load_from_file(config, proto)
return proto
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/proto/utils.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/retinanet/proto/eval_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/retinanet/proto/eval_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_deploy/cv/retinanet/proto/eval_config.proto\"\xc6\x01\n\nEvalConfig\x12)\n!validation_period_during_training\x18\x01 \x01(\r\x12\x33\n\x16\x61verage_precision_mode\x18\x02 \x01(\x0e\x32\x13.EvalConfig.AP_MODE\x12\x12\n\nbatch_size\x18\x03 \x01(\r\x12\x1e\n\x16matching_iou_threshold\x18\x04 \x01(\x02\"$\n\x07\x41P_MODE\x12\n\n\x06SAMPLE\x10\x00\x12\r\n\tINTEGRATE\x10\x01\x62\x06proto3')
)
_EVALCONFIG_AP_MODE = _descriptor.EnumDescriptor(
name='AP_MODE',
full_name='EvalConfig.AP_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='SAMPLE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGRATE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=221,
serialized_end=257,
)
_sym_db.RegisterEnumDescriptor(_EVALCONFIG_AP_MODE)
_EVALCONFIG = _descriptor.Descriptor(
name='EvalConfig',
full_name='EvalConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='validation_period_during_training', full_name='EvalConfig.validation_period_during_training', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_precision_mode', full_name='EvalConfig.average_precision_mode', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvalConfig.batch_size', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='matching_iou_threshold', full_name='EvalConfig.matching_iou_threshold', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_EVALCONFIG_AP_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=257,
)
_EVALCONFIG.fields_by_name['average_precision_mode'].enum_type = _EVALCONFIG_AP_MODE
_EVALCONFIG_AP_MODE.containing_type = _EVALCONFIG
DESCRIPTOR.message_types_by_name['EvalConfig'] = _EVALCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EvalConfig = _reflection.GeneratedProtocolMessageType('EvalConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALCONFIG,
__module__ = 'nvidia_tao_deploy.cv.retinanet.proto.eval_config_pb2'
# @@protoc_insertion_point(class_scope:EvalConfig)
))
_sym_db.RegisterMessage(EvalConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/proto/eval_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet convert etlt/onnx model to TRT engine."""
import argparse
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.retinanet.proto.utils import load_proto
from nvidia_tao_deploy.cv.retinanet.engine_builder import RetinaNetEngineBuilder
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='retinanet', mode='gen_trt_engine')
def main(args):
"""RetinaNet TRT convert."""
# decrypt etlt
tmp_onnx_file, file_format = decode_model(args.model_path, args.key)
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
if args.engine_file is not None or args.data_type == 'int8':
if args.engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = args.engine_file
builder = RetinaNetEngineBuilder(verbose=args.verbose,
is_qat=es.training_config.enable_qat,
workspace=args.max_workspace_size,
max_batch_size=args.max_batch_size,
min_batch_size=args.min_batch_size,
opt_batch_size=args.opt_batch_size,
strict_type_constraints=args.strict_type_constraints,
force_ptq=args.force_ptq)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
args.data_type,
calib_data_file=args.cal_data_file,
calib_input=args.cal_image_dir,
calib_cache=args.cal_cache_file,
calib_num_images=args.batch_size * args.batches,
calib_batch_size=args.batch_size,
calib_json_file=args.cal_json_file)
logging.info("Export finished successfully.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='gen_trt_engine', description='Generate TRT engine of RetinaNet model.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a RetinaNet .etlt or .onnx model file.'
)
parser.add_argument(
'-k',
'--key',
type=str,
required=False,
help='Key to save or load a .etlt model.'
)
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument(
"--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration.")
parser.add_argument(
"--cal_data_file",
default="",
type=str,
help="Tensorfile to run calibration for int8 optimization.")
parser.add_argument(
'--cal_cache_file',
default=None,
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
'--cal_json_file',
default=None,
type=str,
help='Dictionary containing tensor scale for QAT models.')
parser.add_argument(
"--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument(
"--max_batch_size",
type=int,
default=1,
help="Max batch size for TensorRT engine builder.")
parser.add_argument(
"--opt_batch_size",
type=int,
default=1,
help="Optimal batch size for TensorRT engine builder.")
parser.add_argument(
"--min_batch_size",
type=int,
default=1,
help="Min batch size for TensorRT engine builder.")
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Number of images per batch for calibration.")
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument(
"--max_workspace_size",
type=int,
default=2,
help="Max memory workspace size to allow in Gb for TensorRT engine builder (default: 2).")
parser.add_argument(
"-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="A Boolean flag indicating whether to apply the \
TensorRT strict type constraints when building the TensorRT engine.")
parser.add_argument(
"--force_ptq",
action="store_true",
default=False,
help="Flag to force post training quantization for QAT models.")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy RetinaNet scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT Inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import numpy as np
from PIL import Image
from tqdm.auto import tqdm
import logging
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.retinanet.dataloader import RetinaNetKITTILoader
from nvidia_tao_deploy.cv.retinanet.inferencer import RetinaNetInferencer
from nvidia_tao_deploy.cv.retinanet.proto.utils import load_proto
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='retinanet', mode='inference')
def main(args):
"""RetinaNet TRT inference."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
conf_thres = es.nms_config.confidence_threshold if es.nms_config.confidence_threshold else 0.01
batch_size = args.batch_size if args.batch_size else es.eval_config.batch_size
trt_infer = RetinaNetInferencer(args.model_path, batch_size=batch_size)
c, h, w = trt_infer._input_shape
img_mean = es.augmentation_config.image_mean
if c == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Override path if provided through command line args
if args.image_dir:
image_dirs = [args.image_dir]
else:
image_dirs = [d.image_directory_path for d in es.dataset_config.validation_data_sources]
# Load mapping_dict from the spec file
mapping_dict = dict(es.dataset_config.target_class_mapping)
dl = RetinaNetKITTILoader(
shape=(c, h, w),
image_dirs=image_dirs,
label_dirs=[None],
mapping_dict=mapping_dict,
exclude_difficult=True,
batch_size=batch_size,
is_inference=True,
image_mean=img_mean,
keep_aspect_ratio=False,
dtype=trt_infer.inputs[0].host.dtype)
inv_classes = {v: k for k, v in dl.classes.items()}
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
os.makedirs(results_dir, exist_ok=True)
output_annotate_root = os.path.join(results_dir, "images_annotated")
output_label_root = os.path.join(results_dir, "labels")
os.makedirs(output_annotate_root, exist_ok=True)
os.makedirs(output_label_root, exist_ok=True)
for i, (imgs, _) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
y_pred = trt_infer.infer(imgs)
image_paths = dl.image_paths[np.arange(args.batch_size) + args.batch_size * i]
for i in range(len(y_pred)):
# Load image
img = Image.open(image_paths[i])
orig_width, orig_height = img.size
width_scale = orig_width / trt_infer.width
height_scale = orig_height / trt_infer.height
# Filter and scale back to original image resolution
y_pred_valid = y_pred[i][y_pred[i][:, 1] > conf_thres]
y_pred_valid[..., 2] = np.clip(y_pred_valid[..., 2].round(), 0.0, w) * width_scale
y_pred_valid[..., 3] = np.clip(y_pred_valid[..., 3].round(), 0.0, h) * height_scale
y_pred_valid[..., 4] = np.clip(y_pred_valid[..., 4].round(), 0.0, w) * width_scale
y_pred_valid[..., 5] = np.clip(y_pred_valid[..., 5].round(), 0.0, h) * height_scale
# Store images
bbox_img, label_strings = trt_infer.draw_bbox(img, y_pred_valid, inv_classes, args.threshold)
img_filename = os.path.basename(image_paths[i])
bbox_img.save(os.path.join(output_annotate_root, img_filename))
# Store labels
filename, _ = os.path.splitext(img_filename)
label_file_name = os.path.join(output_label_root, filename + ".txt")
with open(label_file_name, "w", encoding="utf-8") as f:
for l_s in label_strings:
f.write(l_s)
logging.info("Finished inference.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Inference with a RetinaNet TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the RetinaNet TensorRT engine.'
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help='Batch size.')
parser.add_argument(
'-t',
'--threshold',
type=float,
default=0.3,
help='Confidence threshold for inference.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import json
import numpy as np
from tqdm.auto import tqdm
import logging
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.retinanet.dataloader import RetinaNetKITTILoader
from nvidia_tao_deploy.cv.retinanet.inferencer import RetinaNetInferencer
from nvidia_tao_deploy.cv.retinanet.proto.utils import load_proto
from nvidia_tao_deploy.metrics.kitti_metric import KITTIMetric
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='retinanet', mode='evaluation')
def main(args):
"""RetinaNet TRT evaluation."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
matching_iou_threshold = es.eval_config.matching_iou_threshold if es.eval_config.matching_iou_threshold else 0.5
conf_thres = es.nms_config.confidence_threshold if es.nms_config.confidence_threshold else 0.01
batch_size = args.batch_size if args.batch_size else es.eval_config.batch_size
trt_infer = RetinaNetInferencer(args.model_path, batch_size=batch_size)
c, h, w = trt_infer._input_shape
ap_mode = es.eval_config.average_precision_mode
ap_mode_dict = {0: "sample", 1: "integrate"}
img_mean = es.augmentation_config.image_mean
if c == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Override path if provided through command line args
if args.image_dir:
image_dirs = [args.image_dir]
else:
image_dirs = [d.image_directory_path for d in es.dataset_config.validation_data_sources]
if args.label_dir:
label_dirs = [args.label_dir]
else:
label_dirs = [d.label_directory_path for d in es.dataset_config.validation_data_sources]
# Load mapping_dict from the spec file
mapping_dict = dict(es.dataset_config.target_class_mapping)
dl = RetinaNetKITTILoader(
shape=(c, h, w),
image_dirs=image_dirs,
label_dirs=label_dirs,
mapping_dict=mapping_dict,
exclude_difficult=True,
batch_size=batch_size,
image_mean=img_mean,
keep_aspect_ratio=False,
dtype=trt_infer.inputs[0].host.dtype)
eval_metric = KITTIMetric(n_classes=len(dl.classes) + 1,
matching_iou_threshold=matching_iou_threshold,
conf_thres=conf_thres,
average_precision_mode=ap_mode_dict[ap_mode])
gt_labels = []
pred_labels = []
for i, (imgs, labels) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
gt_labels.extend(labels)
y_pred = trt_infer.infer(imgs)
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > eval_metric.conf_thres]
y_pred_valid[..., 2] = np.clip(y_pred_valid[..., 2].round(), 0.0, w)
y_pred_valid[..., 3] = np.clip(y_pred_valid[..., 3].round(), 0.0, h)
y_pred_valid[..., 4] = np.clip(y_pred_valid[..., 4].round(), 0.0, w)
y_pred_valid[..., 5] = np.clip(y_pred_valid[..., 5].round(), 0.0, h)
pred_labels.append(y_pred_valid)
m_ap, ap = eval_metric(gt_labels, pred_labels, verbose=True)
m_ap = np.mean(ap[1:])
logging.info("*******************************")
class_mapping = {v: k for k, v in dl.classes.items()}
eval_results = {}
for i in range(len(dl.classes)):
eval_results['AP_' + class_mapping[i + 1]] = np.float64(ap[i + 1])
logging.info("{:<14}{:<6}{}".format(class_mapping[i + 1], 'AP', round(ap[i + 1], 5))) # noqa pylint: disable=C0209
logging.info("{:<14}{:<6}{}".format('', 'mAP', round(m_ap, 3))) # noqa pylint: disable=C0209
logging.info("*******************************")
# Store evaluation results into JSON
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
with open(os.path.join(results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='eval', description='Evaluate with a RetinaNet TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the RetinaNet TensorRT engine.'
)
parser.add_argument(
'-l',
'--label_dir',
type=str,
required=False,
help='Label directory.')
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=1,
help='Batch size.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for retinanet."""
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_proto import launch_job
import nvidia_tao_deploy.cv.retinanet.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_deploy.cv.retinanet.scripts, "retinanet", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/retinanet/entrypoint/retinanet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import numpy as np
import tensorrt as trt
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
from nvidia_tao_deploy.inferencer.utils import allocate_buffers, do_inference
def trt_output_process_fn(y_encoded):
"""Function to process TRT model output."""
nms_out, _ = y_encoded
# (x1, y1, x2, y2), shape = (N, 1, R, 4)
nmsed_boxes = nms_out[:, 0, :, 3:7]
# shape = (N, 1, R, 1)
nmsed_scores = nms_out[:, 0, :, 2]
# shape = (N, 1, R, 1)
nmsed_classes = nms_out[:, 0, :, 1]
result = []
# apply the spatial pyramid pooling to the proposed regions
for idx in range(nmsed_boxes.shape[0]):
loc = nmsed_boxes[idx].reshape(-1, 4)
cid = nmsed_classes[idx].reshape(-1, 1)
conf = nmsed_scores[idx].reshape(-1, 1)
result.append(np.concatenate((cid, conf, loc), axis=-1))
return result
class FRCNNInferencer(TRTInferencer):
"""Manages TensorRT objects for model inference."""
def __init__(self, engine_path, input_shape=None, batch_size=None, data_format="channel_first"):
"""Initializes TensorRT objects needed for model inference.
Args:
engine_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
data_format (str): either channel_first or channel_last
"""
# Load TRT engine
super().__init__(engine_path)
self.execute_v2 = False
# Execution context is needed for inference
self.context = None
# Allocate memory for multiple usage [e.g. multiple batch inference]
self._input_shape = []
for binding in range(self.engine.num_bindings):
if self.engine.binding_is_input(binding):
binding_shape = self.engine.get_binding_shape(binding)
self._input_shape = binding_shape[-3:]
if len(binding_shape) == 4:
self.etlt_type = "onnx"
else:
self.etlt_type = "uff"
assert len(self._input_shape) == 3, "Engine doesn't have valid input dimensions"
if data_format == "channel_first":
self.height = self._input_shape[1]
self.width = self._input_shape[2]
else:
self.height = self._input_shape[0]
self.width = self._input_shape[1]
# set binding_shape for dynamic input
if self.engine.has_implicit_batch_dimension:
self.max_batch_size = self.engine.max_batch_size
# do not override if the original model was uff
elif (input_shape is not None or batch_size is not None) and (self.etlt_type != "uff"):
self.context = self.engine.create_execution_context()
if input_shape is not None:
self.context.set_binding_shape(0, input_shape)
self.max_batch_size = input_shape[0]
else:
self.context.set_binding_shape(0, [batch_size] + list(self._input_shape))
self.max_batch_size = batch_size
self.execute_v2 = True
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.engine,
self.context,
True)
if self.context is None:
self.context = self.engine.create_execution_context()
input_volume = trt.volume(self._input_shape)
self.numpy_array = np.zeros((self.max_batch_size, input_volume))
def infer(self, imgs):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
actual_batch_size = len(imgs)
if actual_batch_size > max_batch_size:
raise ValueError(f"image_paths list bigger ({actual_batch_size}) than \
engine max batch size ({max_batch_size})")
self.numpy_array[:actual_batch_size] = imgs.reshape(actual_batch_size, -1)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, self.numpy_array.ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
y_pred = [i[:actual_batch_size] for i in results]
# Process TRT outputs to proper format
return trt_output_process_fn(y_pred)
def __del__(self):
"""Clear things up on object deletion."""
# Clear session and buffer
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.engine:
del self.engine
if self.stream:
del self.stream
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FRCNN TensorRT engine builder."""
import logging
import os
import random
from six.moves import xrange
import sys
import traceback
from tqdm import tqdm
try:
from uff.model.uff_pb2 import MetaGraph
except ImportError:
print("Loading uff directly from the package source code")
# @scha: To disable tensorflow import issue
import importlib
import types
import pkgutil
package = pkgutil.get_loader("uff")
# Returns __init__.py path
src_code = package.get_filename().replace('__init__.py', 'model/uff_pb2.py')
loader = importlib.machinery.SourceFileLoader('helper', src_code)
helper = types.ModuleType(loader.name)
loader.exec_module(helper)
MetaGraph = helper.MetaGraph
import numpy as np
import onnx
import tensorrt as trt
from nvidia_tao_deploy.engine.builder import EngineBuilder
from nvidia_tao_deploy.engine.tensorfile import TensorFile
from nvidia_tao_deploy.engine.tensorfile_calibrator import TensorfileCalibrator
from nvidia_tao_deploy.engine.utils import generate_random_tensorfile, prepare_chunk
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class FRCNNEngineBuilder(EngineBuilder):
"""Parses an UFF graph and builds a TensorRT engine from it."""
def __init__(
self,
data_format="channels_first",
**kwargs
):
"""Init.
Args:
data_format (str): data_format.
"""
super().__init__(**kwargs)
self._data_format = data_format
def set_input_output_node_names(self):
"""Set input output node names."""
self._output_node_names = ["NMS"]
self._input_node_names = ["input_image"]
def get_input_dims(self, model_path):
"""Get input dimension of UFF model."""
metagraph = MetaGraph()
with open(model_path, "rb") as f:
metagraph.ParseFromString(f.read())
for node in metagraph.graphs[0].nodes:
if node.operation == "Input":
return np.array(node.fields['shape'].i_list.val)[1:]
raise ValueError("Input dimension is not found in the UFF metagraph.")
def get_onnx_input_dims(self, model_path):
"""Get input dimension of ONNX model."""
onnx_model = onnx.load(model_path)
onnx_inputs = onnx_model.graph.input
logger.info('ONNX model inputs: ')
for i, inputs in enumerate(onnx_inputs):
logger.info('Input %s: %s.', i, inputs.name)
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim])
return [i.dim_value for i in inputs.type.tensor_type.shape.dim][:]
def create_network(self, model_path, file_format="uff"):
"""Parse the ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the UFF/ONNX graph to load.
"""
if file_format == "uff":
logger.info("Parsing UFF model")
self.network = self.builder.create_network()
self.parser = trt.UffParser()
self.set_input_output_node_names()
in_tensor_name = self._input_node_names[0]
self._input_dims = self.get_input_dims(model_path)
input_dict = {in_tensor_name: self._input_dims}
for key, value in input_dict.items():
if self._data_format == "channels_first":
self.parser.register_input(key, value, trt.UffInputOrder(0))
else:
self.parser.register_input(key, value, trt.UffInputOrder(1))
for name in self._output_node_names:
self.parser.register_output(name)
self.builder.max_batch_size = self.max_batch_size
try:
assert self.parser.parse(model_path, self.network, trt.DataType.FLOAT)
except AssertionError as e:
logger.error("Failed to parse UFF File")
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
_, line, _, text = tb_info[-1]
raise AssertionError(
f"UFF parsing failed on line {line} in statement {text}"
) from e
elif file_format == "onnx":
logger.info("Parsing ONNX model")
self._input_dims = self.get_onnx_input_dims(model_path)
self.batch_size = self._input_dims[0]
self._input_dims = self._input_dims[1:]
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
model_path = os.path.realpath(model_path)
with open(model_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: %s", model_path)
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
logger.info("Network Description")
for input in inputs: # noqa pylint: disable=W0622
logger.info("Input '%s' with shape %s and dtype %s", input.name, input.shape, input.dtype)
for output in outputs:
logger.info("Output '%s' with shape %s and dtype %s", output.name, output.shape, output.dtype)
if self.batch_size <= 0: # dynamic batch size
logger.info("dynamic batch size handling")
opt_profile = self.builder.create_optimization_profile()
model_input = self.network.get_input(0)
input_shape = model_input.shape
input_name = model_input.name
real_shape_min = (self.min_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_opt = (self.opt_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_max = (self.max_batch_size, input_shape[1],
input_shape[2], input_shape[3])
opt_profile.set_shape(input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max)
self.config.add_optimization_profile(opt_profile)
else:
raise NotImplementedError(f"Model format {file_format} for FasterRCNN is not supported")
def set_calibrator(self,
inputs=None,
calib_cache=None,
calib_input=None,
calib_num_images=5000,
calib_batch_size=8,
calib_data_file=None,
image_mean=None):
"""Simple function to set an Tensorfile based int8 calibrator.
Args:
calib_data_file: Path to the TensorFile. If the tensorfile doesn't exist
at this path, then one is created with either n_batches
of random tensors, images from the file in calib_input of dimensions
(batch_size,) + (input_dims).
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
image_mean: Image mean per channel.
Returns:
No explicit returns.
"""
logger.info("Calibrating using TensorfileCalibrator")
n_batches = calib_num_images // calib_batch_size
if not os.path.exists(calib_data_file):
self.generate_tensor_file(calib_data_file,
calib_input,
self._input_dims,
n_batches=n_batches,
batch_size=calib_batch_size,
image_mean=image_mean)
self.config.int8_calibrator = TensorfileCalibrator(calib_data_file,
calib_cache,
n_batches,
calib_batch_size)
def generate_tensor_file(self, data_file_name,
calibration_images_dir,
input_dims, n_batches=10,
batch_size=1, image_mean=None):
"""Generate calibration Tensorfile for int8 calibrator.
This function generates a calibration tensorfile from a directory of images, or dumps
n_batches of random numpy arrays of shape (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to the output tensorfile to be saved.
calibration_images_dir (str): Path to the images to generate a tensorfile from.
input_dims (list): Input shape in CHW order.
n_batches (int): Number of batches to be saved.
batch_size (int): Number of images per batch.
image_mean (list): Image mean per channel.
Returns:
No explicit returns.
"""
if not os.path.exists(calibration_images_dir):
logger.info("Generating a tensorfile with random tensor images. This may work well as "
"a profiling tool, however, it may result in inaccurate results at "
"inference. Please generate a tensorfile using the tlt-int8-tensorfile, "
"or provide a custom directory of images for best performance.")
generate_random_tensorfile(data_file_name,
input_dims,
n_batches=n_batches,
batch_size=batch_size)
else:
# Preparing the list of images to be saved.
num_images = n_batches * batch_size
valid_image_ext = ('jpg', 'jpeg', 'png')
image_list = [os.path.join(calibration_images_dir, image)
for image in os.listdir(calibration_images_dir)
if image.lower().endswith(valid_image_ext)]
if len(image_list) < num_images:
raise ValueError('Not enough number of images provided:'
f' {len(image_list)} < {num_images}')
image_idx = random.sample(xrange(len(image_list)), num_images)
self.set_data_preprocessing_parameters(input_dims, image_mean)
# Writing out processed dump.
with TensorFile(data_file_name, 'w') as f:
for chunk in tqdm(image_idx[x:x + batch_size] for x in xrange(0, len(image_idx),
batch_size)):
dump_data = prepare_chunk(chunk, image_list,
image_width=input_dims[2],
image_height=input_dims[1],
channels=input_dims[0],
batch_size=batch_size,
**self.preprocessing_arguments)
f.write(dump_data)
f.closed
def set_data_preprocessing_parameters(self, input_dims, image_mean=None):
"""Set data pre-processing parameters for the int8 calibration."""
num_channels = input_dims[0]
if num_channels == 3:
if not image_mean:
means = [103.939, 116.779, 123.68]
else:
assert len(image_mean) == 3, "Image mean should have 3 values for RGB inputs."
means = image_mean
elif num_channels == 1:
if not image_mean:
means = [117.3786]
else:
assert len(image_mean) == 1, "Image mean should have 1 value for grayscale inputs."
means = image_mean
else:
raise NotImplementedError(
f"Invalid number of dimensions {num_channels}.")
self.preprocessing_arguments = {"scale": 1.0,
"means": means,
"flip_channel": True}
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy FRCNN."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FRCNN loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import cv2
import numpy as np
from nvidia_tao_deploy.dataloader.kitti import KITTILoader
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
level="DEBUG")
logger = logging.getLogger(__name__)
def aug_letterbox_resize(img, boxes, num_channels=3, resize_shape=(512, 512)):
"""Apply letter box. resize image to resize_shape, not changing aspect ratio.
Args:
img (PIL.Image): RGB image
boxes (np.array): (N, 4) numpy arrays (xmin, ymin, xmax, ymax) containing bboxes. {x,y}{min,max} is
in [0, 1] range.
resize_shape (int, int): (w, h) of new image
Returns:
aug_img: img after resize
aug_boxes: boxes after resize
"""
img = np.array(img).astype(np.float32)
if num_channels == 1:
new_img = np.zeros((resize_shape[1], resize_shape[0]), dtype=np.float)
else:
new_img = np.zeros((resize_shape[1], resize_shape[0], 3), dtype=np.float)
new_img += np.mean(img, axis=(0, 1), keepdims=True)
h, w = img.shape[0], img.shape[1]
ratio = min(float(resize_shape[1]) / h, float(resize_shape[0]) / w)
new_h = int(round(ratio * h))
new_w = int(round(ratio * w))
l_shift = (resize_shape[0] - new_w) // 2
t_shift = (resize_shape[1] - new_h) // 2
img = cv2.resize(img, (new_w, new_h), cv2.INTER_LINEAR)
new_img[t_shift: t_shift + new_h, l_shift: l_shift + new_w] = img.astype(np.float)
xmin = (boxes[:, 0] * new_w + l_shift) / float(resize_shape[0])
xmax = (boxes[:, 2] * new_w + l_shift) / float(resize_shape[0])
ymin = (boxes[:, 1] * new_h + t_shift) / float(resize_shape[1])
ymax = (boxes[:, 3] * new_h + t_shift) / float(resize_shape[1])
return new_img, np.stack([xmin, ymin, xmax, ymax], axis=-1), \
[l_shift, t_shift, l_shift + new_w, t_shift + new_h]
class FRCNNKITTILoader(KITTILoader):
"""FRCNN Dataloader."""
def __init__(self,
**kwargs):
"""Init."""
super().__init__(**kwargs)
# FRCNN starts label index from 0
classes = sorted({str(x).lower() for x in self.mapping_dict.values()})
self.classes = dict(zip(classes, range(len(classes))))
self.class_mapping = {key.lower(): self.classes[str(val.lower())]
for key, val in self.mapping_dict.items()}
def _filter_invalid_labels(self, labels):
"""filter out invalid labels.
Arg:
labels: size (N, 6), where bboxes is normalized to 0~1.
Returns:
labels: size (M, 6), filtered bboxes with clipped boxes.
"""
labels[:, -4:] = np.clip(labels[:, -4:], 0, 1)
# exclude invalid boxes
difficult_cond = (labels[:, 1] < 0.5) | (not self.exclude_difficult)
if np.any(difficult_cond == 0):
logger.warning(
"Got label marked as difficult(occlusion > 0), "
"please set occlusion field in KITTI label to 0 "
"or set `dataset_config.include_difficult_in_training` to True "
"in spec file, if you want to include it in training."
)
x_cond = labels[:, 4] - labels[:, 2] > 1e-3
y_cond = labels[:, 5] - labels[:, 3] > 1e-3
return labels[difficult_cond & x_cond & y_cond]
def preprocessing(self, image, label):
"""The image preprocessor loads an image from disk and prepares it as needed for batching.
This includes padding, resizing, normalization, data type casting, and transposing.
Args:
image (PIL.image): The Pillow image on disk to load.
label (np.array): labels
Returns:
image (np.array): A numpy array holding the image sample, ready to be concatenated
into the rest of the batch
label (np.array): labels
"""
# change bbox to 0~1
w, h = image.size
label[:, 2] /= w
label[:, 3] /= h
label[:, 4] /= w
label[:, 5] /= h
bboxes = label[:, -4:]
image, bboxes, _ = aug_letterbox_resize(image,
bboxes,
num_channels=self.num_channels,
resize_shape=(self.width, self.height))
label[:, -4:] = bboxes
# Handle Grayscale
if self.num_channels == 1:
image = np.expand_dims(image, axis=2)
# Filter invalid labels
label = self._filter_invalid_labels(label)
return image, label
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/dataloader.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/training.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.common.proto import visualizer_config_pb2 as nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2
from nvidia_tao_deploy.cv.faster_rcnn.proto import regularizer_config_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_regularizer__config__pb2
from nvidia_tao_deploy.cv.faster_rcnn.proto import optimizer_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_optimizer__pb2
from nvidia_tao_deploy.cv.faster_rcnn.proto import learning_rate_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_learning__rate__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/training.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n5nvidia_tao_deploy/cv/faster_rcnn/proto/training.proto\x1a\x39nvidia_tao_deploy/cv/common/proto/visualizer_config.proto\x1a?nvidia_tao_deploy/cv/faster_rcnn/proto/regularizer_config.proto\x1a\x36nvidia_tao_deploy/cv/faster_rcnn/proto/optimizer.proto\x1a:nvidia_tao_deploy/cv/faster_rcnn/proto/learning_rate.proto\"E\n\rEarlyStopping\x12\x0f\n\x07monitor\x18\x01 \x01(\t\x12\x11\n\tmin_delta\x18\x02 \x01(\x02\x12\x10\n\x08patience\x18\x03 \x01(\r\"\xc3\x07\n\x0eTrainingConfig\x12\x1b\n\x13\x65nable_augmentation\x18\x30 \x01(\x08\x12\x1c\n\x14retrain_pruned_model\x18\x03 \x01(\t\x12\x1a\n\x12pretrained_weights\x18\n \x01(\t\x12\x19\n\x11resume_from_model\x18/ \x01(\t\x12\x17\n\x0frpn_min_overlap\x18\x04 \x01(\x02\x12\x17\n\x0frpn_max_overlap\x18\x05 \x01(\x02\x12\x1e\n\x16\x63lassifier_min_overlap\x18\x06 \x01(\x02\x12\x1e\n\x16\x63lassifier_max_overlap\x18\x07 \x01(\x02\x12\x11\n\tgt_as_roi\x18& \x01(\x08\x12\x13\n\x0bstd_scaling\x18\x08 \x01(\x02\x12\x43\n\x13\x63lassifier_regr_std\x18\t \x03(\x0b\x32&.TrainingConfig.ClassifierRegrStdEntry\x12\x1a\n\x12\x62\x61tch_size_per_gpu\x18. \x01(\r\x12\x12\n\nnum_epochs\x18\x0b \x01(\r\x12\x14\n\x0coutput_model\x18\r \x01(\t\x12\x1b\n\x13\x63heckpoint_interval\x18\x31 \x01(\r\x12\x19\n\x11rpn_pre_nms_top_N\x18# \x01(\r\x12\x16\n\x0erpn_mini_batch\x18$ \x01(\r\x12\x19\n\x11rpn_nms_max_boxes\x18\x10 \x01(\r\x12!\n\x19rpn_nms_overlap_threshold\x18\x11 \x01(\x02\x12\'\n\x0bregularizer\x18\x14 \x01(\x0b\x32\x12.RegularizerConfig\x12#\n\toptimizer\x18+ \x01(\x0b\x32\x10.OptimizerConfig\x12 \n\rlearning_rate\x18- \x01(\x0b\x32\t.LRConfig\x12\x17\n\x0flambda_rpn_regr\x18\' \x01(\x02\x12\x18\n\x10lambda_rpn_class\x18( \x01(\x02\x12\x17\n\x0flambda_cls_regr\x18) \x01(\x02\x12\x18\n\x10lambda_cls_class\x18* \x01(\x02\x12\x12\n\nenable_qat\x18\x32 \x01(\x08\x12\x19\n\x11model_parallelism\x18\x33 \x03(\x02\x12&\n\x0e\x65\x61rly_stopping\x18\x34 \x01(\x0b\x32\x0e.EarlyStopping\x12%\n\nvisualizer\x18\x35 \x01(\x0b\x32\x11.VisualizerConfig\x1a\x38\n\x16\x43lassifierRegrStdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x62\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_regularizer__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_optimizer__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_learning__rate__pb2.DESCRIPTOR,])
_EARLYSTOPPING = _descriptor.Descriptor(
name='EarlyStopping',
full_name='EarlyStopping',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='monitor', full_name='EarlyStopping.monitor', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_delta', full_name='EarlyStopping.min_delta', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patience', full_name='EarlyStopping.patience', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=297,
serialized_end=366,
)
_TRAININGCONFIG_CLASSIFIERREGRSTDENTRY = _descriptor.Descriptor(
name='ClassifierRegrStdEntry',
full_name='TrainingConfig.ClassifierRegrStdEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='TrainingConfig.ClassifierRegrStdEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='TrainingConfig.ClassifierRegrStdEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1276,
serialized_end=1332,
)
_TRAININGCONFIG = _descriptor.Descriptor(
name='TrainingConfig',
full_name='TrainingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_augmentation', full_name='TrainingConfig.enable_augmentation', index=0,
number=48, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retrain_pruned_model', full_name='TrainingConfig.retrain_pruned_model', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pretrained_weights', full_name='TrainingConfig.pretrained_weights', index=2,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resume_from_model', full_name='TrainingConfig.resume_from_model', index=3,
number=47, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_min_overlap', full_name='TrainingConfig.rpn_min_overlap', index=4,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_max_overlap', full_name='TrainingConfig.rpn_max_overlap', index=5,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_min_overlap', full_name='TrainingConfig.classifier_min_overlap', index=6,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_max_overlap', full_name='TrainingConfig.classifier_max_overlap', index=7,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gt_as_roi', full_name='TrainingConfig.gt_as_roi', index=8,
number=38, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='std_scaling', full_name='TrainingConfig.std_scaling', index=9,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_regr_std', full_name='TrainingConfig.classifier_regr_std', index=10,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size_per_gpu', full_name='TrainingConfig.batch_size_per_gpu', index=11,
number=46, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_epochs', full_name='TrainingConfig.num_epochs', index=12,
number=11, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_model', full_name='TrainingConfig.output_model', index=13,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='checkpoint_interval', full_name='TrainingConfig.checkpoint_interval', index=14,
number=49, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_pre_nms_top_N', full_name='TrainingConfig.rpn_pre_nms_top_N', index=15,
number=35, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_mini_batch', full_name='TrainingConfig.rpn_mini_batch', index=16,
number=36, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_max_boxes', full_name='TrainingConfig.rpn_nms_max_boxes', index=17,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_overlap_threshold', full_name='TrainingConfig.rpn_nms_overlap_threshold', index=18,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='regularizer', full_name='TrainingConfig.regularizer', index=19,
number=20, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='TrainingConfig.optimizer', index=20,
number=43, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='TrainingConfig.learning_rate', index=21,
number=45, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lambda_rpn_regr', full_name='TrainingConfig.lambda_rpn_regr', index=22,
number=39, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lambda_rpn_class', full_name='TrainingConfig.lambda_rpn_class', index=23,
number=40, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lambda_cls_regr', full_name='TrainingConfig.lambda_cls_regr', index=24,
number=41, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lambda_cls_class', full_name='TrainingConfig.lambda_cls_class', index=25,
number=42, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_qat', full_name='TrainingConfig.enable_qat', index=26,
number=50, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_parallelism', full_name='TrainingConfig.model_parallelism', index=27,
number=51, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='early_stopping', full_name='TrainingConfig.early_stopping', index=28,
number=52, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualizer', full_name='TrainingConfig.visualizer', index=29,
number=53, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_TRAININGCONFIG_CLASSIFIERREGRSTDENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=369,
serialized_end=1332,
)
_TRAININGCONFIG_CLASSIFIERREGRSTDENTRY.containing_type = _TRAININGCONFIG
_TRAININGCONFIG.fields_by_name['classifier_regr_std'].message_type = _TRAININGCONFIG_CLASSIFIERREGRSTDENTRY
_TRAININGCONFIG.fields_by_name['regularizer'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_regularizer__config__pb2._REGULARIZERCONFIG
_TRAININGCONFIG.fields_by_name['optimizer'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_optimizer__pb2._OPTIMIZERCONFIG
_TRAININGCONFIG.fields_by_name['learning_rate'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_learning__rate__pb2._LRCONFIG
_TRAININGCONFIG.fields_by_name['early_stopping'].message_type = _EARLYSTOPPING
_TRAININGCONFIG.fields_by_name['visualizer'].message_type = nvidia__tao__deploy_dot_cv_dot_common_dot_proto_dot_visualizer__config__pb2._VISUALIZERCONFIG
DESCRIPTOR.message_types_by_name['EarlyStopping'] = _EARLYSTOPPING
DESCRIPTOR.message_types_by_name['TrainingConfig'] = _TRAININGCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
EarlyStopping = _reflection.GeneratedProtocolMessageType('EarlyStopping', (_message.Message,), dict(
DESCRIPTOR = _EARLYSTOPPING,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.training_pb2'
# @@protoc_insertion_point(class_scope:EarlyStopping)
))
_sym_db.RegisterMessage(EarlyStopping)
TrainingConfig = _reflection.GeneratedProtocolMessageType('TrainingConfig', (_message.Message,), dict(
ClassifierRegrStdEntry = _reflection.GeneratedProtocolMessageType('ClassifierRegrStdEntry', (_message.Message,), dict(
DESCRIPTOR = _TRAININGCONFIG_CLASSIFIERREGRSTDENTRY,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.training_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig.ClassifierRegrStdEntry)
))
,
DESCRIPTOR = _TRAININGCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.training_pb2'
# @@protoc_insertion_point(class_scope:TrainingConfig)
))
_sym_db.RegisterMessage(TrainingConfig)
_sym_db.RegisterMessage(TrainingConfig.ClassifierRegrStdEntry)
_TRAININGCONFIG_CLASSIFIERREGRSTDENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/training_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/trt_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/trt_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_deploy/cv/faster_rcnn/proto/trt_config.proto\"\"\n\x0cTrtInference\x12\x12\n\ntrt_engine\x18\x01 \x01(\tb\x06proto3')
)
_TRTINFERENCE = _descriptor.Descriptor(
name='TrtInference',
full_name='TrtInference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trt_engine', full_name='TrtInference.trt_engine', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=93,
)
DESCRIPTOR.message_types_by_name['TrtInference'] = _TRTINFERENCE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TrtInference = _reflection.GeneratedProtocolMessageType('TrtInference', (_message.Message,), dict(
DESCRIPTOR = _TRTINFERENCE,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.trt_config_pb2'
# @@protoc_insertion_point(class_scope:TrtInference)
))
_sym_db.RegisterMessage(TrtInference)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/trt_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/input_image.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/input_image.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n8nvidia_tao_deploy/cv/faster_rcnn/proto/input_image.proto\"!\n\x12ImageSizeConfigMin\x12\x0b\n\x03min\x18\x01 \x01(\r\";\n\x1aImageSizeConfigHeightWidth\x12\x0e\n\x06height\x18\x01 \x01(\r\x12\r\n\x05width\x18\x02 \x01(\r\"\x86\x03\n\x10InputImageConfig\x12\x1e\n\nimage_type\x18\x06 \x01(\x0e\x32\n.ImageType\x12\'\n\x08size_min\x18\x01 \x01(\x0b\x32\x13.ImageSizeConfigMinH\x00\x12\x38\n\x11size_height_width\x18\x02 \x01(\x0b\x32\x1b.ImageSizeConfigHeightWidthH\x00\x12\x1b\n\x13image_channel_order\x18\x05 \x01(\t\x12\x43\n\x12image_channel_mean\x18\x03 \x03(\x0b\x32\'.InputImageConfig.ImageChannelMeanEntry\x12\x1c\n\x14image_scaling_factor\x18\x04 \x01(\x02\x12!\n\x19max_objects_num_per_image\x18\x07 \x01(\r\x1a\x37\n\x15ImageChannelMeanEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x42\x13\n\x11image_size_config*$\n\tImageType\x12\x07\n\x03RGB\x10\x00\x12\x0e\n\nGRAY_SCALE\x10\x01\x62\x06proto3')
)
_IMAGETYPE = _descriptor.EnumDescriptor(
name='ImageType',
full_name='ImageType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='RGB', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GRAY_SCALE', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=549,
serialized_end=585,
)
_sym_db.RegisterEnumDescriptor(_IMAGETYPE)
ImageType = enum_type_wrapper.EnumTypeWrapper(_IMAGETYPE)
RGB = 0
GRAY_SCALE = 1
_IMAGESIZECONFIGMIN = _descriptor.Descriptor(
name='ImageSizeConfigMin',
full_name='ImageSizeConfigMin',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min', full_name='ImageSizeConfigMin.min', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=93,
)
_IMAGESIZECONFIGHEIGHTWIDTH = _descriptor.Descriptor(
name='ImageSizeConfigHeightWidth',
full_name='ImageSizeConfigHeightWidth',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='ImageSizeConfigHeightWidth.height', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='ImageSizeConfigHeightWidth.width', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=154,
)
_INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY = _descriptor.Descriptor(
name='ImageChannelMeanEntry',
full_name='InputImageConfig.ImageChannelMeanEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='InputImageConfig.ImageChannelMeanEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='InputImageConfig.ImageChannelMeanEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=471,
serialized_end=526,
)
_INPUTIMAGECONFIG = _descriptor.Descriptor(
name='InputImageConfig',
full_name='InputImageConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image_type', full_name='InputImageConfig.image_type', index=0,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size_min', full_name='InputImageConfig.size_min', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size_height_width', full_name='InputImageConfig.size_height_width', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_channel_order', full_name='InputImageConfig.image_channel_order', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_channel_mean', full_name='InputImageConfig.image_channel_mean', index=4,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_scaling_factor', full_name='InputImageConfig.image_scaling_factor', index=5,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_objects_num_per_image', full_name='InputImageConfig.max_objects_num_per_image', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='image_size_config', full_name='InputImageConfig.image_size_config',
index=0, containing_type=None, fields=[]),
],
serialized_start=157,
serialized_end=547,
)
_INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY.containing_type = _INPUTIMAGECONFIG
_INPUTIMAGECONFIG.fields_by_name['image_type'].enum_type = _IMAGETYPE
_INPUTIMAGECONFIG.fields_by_name['size_min'].message_type = _IMAGESIZECONFIGMIN
_INPUTIMAGECONFIG.fields_by_name['size_height_width'].message_type = _IMAGESIZECONFIGHEIGHTWIDTH
_INPUTIMAGECONFIG.fields_by_name['image_channel_mean'].message_type = _INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY
_INPUTIMAGECONFIG.oneofs_by_name['image_size_config'].fields.append(
_INPUTIMAGECONFIG.fields_by_name['size_min'])
_INPUTIMAGECONFIG.fields_by_name['size_min'].containing_oneof = _INPUTIMAGECONFIG.oneofs_by_name['image_size_config']
_INPUTIMAGECONFIG.oneofs_by_name['image_size_config'].fields.append(
_INPUTIMAGECONFIG.fields_by_name['size_height_width'])
_INPUTIMAGECONFIG.fields_by_name['size_height_width'].containing_oneof = _INPUTIMAGECONFIG.oneofs_by_name['image_size_config']
DESCRIPTOR.message_types_by_name['ImageSizeConfigMin'] = _IMAGESIZECONFIGMIN
DESCRIPTOR.message_types_by_name['ImageSizeConfigHeightWidth'] = _IMAGESIZECONFIGHEIGHTWIDTH
DESCRIPTOR.message_types_by_name['InputImageConfig'] = _INPUTIMAGECONFIG
DESCRIPTOR.enum_types_by_name['ImageType'] = _IMAGETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImageSizeConfigMin = _reflection.GeneratedProtocolMessageType('ImageSizeConfigMin', (_message.Message,), dict(
DESCRIPTOR = _IMAGESIZECONFIGMIN,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.input_image_pb2'
# @@protoc_insertion_point(class_scope:ImageSizeConfigMin)
))
_sym_db.RegisterMessage(ImageSizeConfigMin)
ImageSizeConfigHeightWidth = _reflection.GeneratedProtocolMessageType('ImageSizeConfigHeightWidth', (_message.Message,), dict(
DESCRIPTOR = _IMAGESIZECONFIGHEIGHTWIDTH,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.input_image_pb2'
# @@protoc_insertion_point(class_scope:ImageSizeConfigHeightWidth)
))
_sym_db.RegisterMessage(ImageSizeConfigHeightWidth)
InputImageConfig = _reflection.GeneratedProtocolMessageType('InputImageConfig', (_message.Message,), dict(
ImageChannelMeanEntry = _reflection.GeneratedProtocolMessageType('ImageChannelMeanEntry', (_message.Message,), dict(
DESCRIPTOR = _INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.input_image_pb2'
# @@protoc_insertion_point(class_scope:InputImageConfig.ImageChannelMeanEntry)
))
,
DESCRIPTOR = _INPUTIMAGECONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.input_image_pb2'
# @@protoc_insertion_point(class_scope:InputImageConfig)
))
_sym_db.RegisterMessage(InputImageConfig)
_sym_db.RegisterMessage(InputImageConfig.ImageChannelMeanEntry)
_INPUTIMAGECONFIG_IMAGECHANNELMEANENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/input_image_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/regularizer_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/regularizer_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n?nvidia_tao_deploy/cv/faster_rcnn/proto/regularizer_config.proto\"\x8a\x01\n\x11RegularizerConfig\x12\x33\n\x04type\x18\x01 \x01(\x0e\x32%.RegularizerConfig.RegularizationType\x12\x0e\n\x06weight\x18\x02 \x01(\x02\"0\n\x12RegularizationType\x12\n\n\x06NO_REG\x10\x00\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\x62\x06proto3')
)
_REGULARIZERCONFIG_REGULARIZATIONTYPE = _descriptor.EnumDescriptor(
name='RegularizationType',
full_name='RegularizerConfig.RegularizationType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NO_REG', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L1', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L2', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=158,
serialized_end=206,
)
_sym_db.RegisterEnumDescriptor(_REGULARIZERCONFIG_REGULARIZATIONTYPE)
_REGULARIZERCONFIG = _descriptor.Descriptor(
name='RegularizerConfig',
full_name='RegularizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='RegularizerConfig.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='RegularizerConfig.weight', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_REGULARIZERCONFIG_REGULARIZATIONTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=206,
)
_REGULARIZERCONFIG.fields_by_name['type'].enum_type = _REGULARIZERCONFIG_REGULARIZATIONTYPE
_REGULARIZERCONFIG_REGULARIZATIONTYPE.containing_type = _REGULARIZERCONFIG
DESCRIPTOR.message_types_by_name['RegularizerConfig'] = _REGULARIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RegularizerConfig = _reflection.GeneratedProtocolMessageType('RegularizerConfig', (_message.Message,), dict(
DESCRIPTOR = _REGULARIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.regularizer_config_pb2'
# @@protoc_insertion_point(class_scope:RegularizerConfig)
))
_sym_db.RegisterMessage(RegularizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/regularizer_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy FRCNN Proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/__init__.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/augmentation_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/augmentation_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n@nvidia_tao_deploy/cv/faster_rcnn/proto/augmentation_config.proto\"\xb2\x07\n\x12\x41ugmentationConfig\x12\x38\n\rpreprocessing\x18\x01 \x01(\x0b\x32!.AugmentationConfig.Preprocessing\x12\x45\n\x14spatial_augmentation\x18\x02 \x01(\x0b\x32\'.AugmentationConfig.SpatialAugmentation\x12\x41\n\x12\x63olor_augmentation\x18\x03 \x01(\x0b\x32%.AugmentationConfig.ColorAugmentation\x1a\xe0\x02\n\rPreprocessing\x12\x1a\n\x12output_image_width\x18\x01 \x01(\r\x12\x1b\n\x13output_image_height\x18\x02 \x01(\r\x12\x18\n\x10output_image_min\x18\x0e \x01(\r\x12\x18\n\x10output_image_max\x18\x0f \x01(\r\x12\x1a\n\x12\x65nable_auto_resize\x18\x10 \x01(\x08\x12\x1c\n\x14output_image_channel\x18\r \x01(\r\x12\x11\n\tcrop_left\x18\x04 \x01(\r\x12\x10\n\x08\x63rop_top\x18\x05 \x01(\r\x12\x12\n\ncrop_right\x18\x06 \x01(\r\x12\x13\n\x0b\x63rop_bottom\x18\x07 \x01(\r\x12\x16\n\x0emin_bbox_width\x18\x08 \x01(\x02\x12\x17\n\x0fmin_bbox_height\x18\t \x01(\x02\x12\x13\n\x0bscale_width\x18\n \x01(\x02\x12\x14\n\x0cscale_height\x18\x0b \x01(\x02\x1a\xd5\x01\n\x13SpatialAugmentation\x12\x19\n\x11hflip_probability\x18\x01 \x01(\x02\x12\x19\n\x11vflip_probability\x18\x02 \x01(\x02\x12\x10\n\x08zoom_min\x18\x03 \x01(\x02\x12\x10\n\x08zoom_max\x18\x04 \x01(\x02\x12\x17\n\x0ftranslate_max_x\x18\x05 \x01(\x02\x12\x17\n\x0ftranslate_max_y\x18\x06 \x01(\x02\x12\x16\n\x0erotate_rad_max\x18\x07 \x01(\x02\x12\x1a\n\x12rotate_probability\x18\x08 \x01(\x02\x1a\x9c\x01\n\x11\x43olorAugmentation\x12\x1a\n\x12\x63olor_shift_stddev\x18\x01 \x01(\x02\x12\x18\n\x10hue_rotation_max\x18\x02 \x01(\x02\x12\x1c\n\x14saturation_shift_max\x18\x03 \x01(\x02\x12\x1a\n\x12\x63ontrast_scale_max\x18\x05 \x01(\x02\x12\x17\n\x0f\x63ontrast_center\x18\x08 \x01(\x02\x62\x06proto3')
)
_AUGMENTATIONCONFIG_PREPROCESSING = _descriptor.Descriptor(
name='Preprocessing',
full_name='AugmentationConfig.Preprocessing',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='output_image_width', full_name='AugmentationConfig.Preprocessing.output_image_width', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_image_height', full_name='AugmentationConfig.Preprocessing.output_image_height', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_image_min', full_name='AugmentationConfig.Preprocessing.output_image_min', index=2,
number=14, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_image_max', full_name='AugmentationConfig.Preprocessing.output_image_max', index=3,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enable_auto_resize', full_name='AugmentationConfig.Preprocessing.enable_auto_resize', index=4,
number=16, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_image_channel', full_name='AugmentationConfig.Preprocessing.output_image_channel', index=5,
number=13, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_left', full_name='AugmentationConfig.Preprocessing.crop_left', index=6,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_top', full_name='AugmentationConfig.Preprocessing.crop_top', index=7,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_right', full_name='AugmentationConfig.Preprocessing.crop_right', index=8,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_bottom', full_name='AugmentationConfig.Preprocessing.crop_bottom', index=9,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_bbox_width', full_name='AugmentationConfig.Preprocessing.min_bbox_width', index=10,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_bbox_height', full_name='AugmentationConfig.Preprocessing.min_bbox_height', index=11,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale_width', full_name='AugmentationConfig.Preprocessing.scale_width', index=12,
number=10, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scale_height', full_name='AugmentationConfig.Preprocessing.scale_height', index=13,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=288,
serialized_end=640,
)
_AUGMENTATIONCONFIG_SPATIALAUGMENTATION = _descriptor.Descriptor(
name='SpatialAugmentation',
full_name='AugmentationConfig.SpatialAugmentation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hflip_probability', full_name='AugmentationConfig.SpatialAugmentation.hflip_probability', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vflip_probability', full_name='AugmentationConfig.SpatialAugmentation.vflip_probability', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zoom_min', full_name='AugmentationConfig.SpatialAugmentation.zoom_min', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='zoom_max', full_name='AugmentationConfig.SpatialAugmentation.zoom_max', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='translate_max_x', full_name='AugmentationConfig.SpatialAugmentation.translate_max_x', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='translate_max_y', full_name='AugmentationConfig.SpatialAugmentation.translate_max_y', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rotate_rad_max', full_name='AugmentationConfig.SpatialAugmentation.rotate_rad_max', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rotate_probability', full_name='AugmentationConfig.SpatialAugmentation.rotate_probability', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=643,
serialized_end=856,
)
_AUGMENTATIONCONFIG_COLORAUGMENTATION = _descriptor.Descriptor(
name='ColorAugmentation',
full_name='AugmentationConfig.ColorAugmentation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='color_shift_stddev', full_name='AugmentationConfig.ColorAugmentation.color_shift_stddev', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hue_rotation_max', full_name='AugmentationConfig.ColorAugmentation.hue_rotation_max', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='saturation_shift_max', full_name='AugmentationConfig.ColorAugmentation.saturation_shift_max', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contrast_scale_max', full_name='AugmentationConfig.ColorAugmentation.contrast_scale_max', index=3,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contrast_center', full_name='AugmentationConfig.ColorAugmentation.contrast_center', index=4,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=859,
serialized_end=1015,
)
_AUGMENTATIONCONFIG = _descriptor.Descriptor(
name='AugmentationConfig',
full_name='AugmentationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='preprocessing', full_name='AugmentationConfig.preprocessing', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='spatial_augmentation', full_name='AugmentationConfig.spatial_augmentation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='color_augmentation', full_name='AugmentationConfig.color_augmentation', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AUGMENTATIONCONFIG_PREPROCESSING, _AUGMENTATIONCONFIG_SPATIALAUGMENTATION, _AUGMENTATIONCONFIG_COLORAUGMENTATION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=1015,
)
_AUGMENTATIONCONFIG_PREPROCESSING.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG_SPATIALAUGMENTATION.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG_COLORAUGMENTATION.containing_type = _AUGMENTATIONCONFIG
_AUGMENTATIONCONFIG.fields_by_name['preprocessing'].message_type = _AUGMENTATIONCONFIG_PREPROCESSING
_AUGMENTATIONCONFIG.fields_by_name['spatial_augmentation'].message_type = _AUGMENTATIONCONFIG_SPATIALAUGMENTATION
_AUGMENTATIONCONFIG.fields_by_name['color_augmentation'].message_type = _AUGMENTATIONCONFIG_COLORAUGMENTATION
DESCRIPTOR.message_types_by_name['AugmentationConfig'] = _AUGMENTATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AugmentationConfig = _reflection.GeneratedProtocolMessageType('AugmentationConfig', (_message.Message,), dict(
Preprocessing = _reflection.GeneratedProtocolMessageType('Preprocessing', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_PREPROCESSING,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.Preprocessing)
))
,
SpatialAugmentation = _reflection.GeneratedProtocolMessageType('SpatialAugmentation', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_SPATIALAUGMENTATION,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.SpatialAugmentation)
))
,
ColorAugmentation = _reflection.GeneratedProtocolMessageType('ColorAugmentation', (_message.Message,), dict(
DESCRIPTOR = _AUGMENTATIONCONFIG_COLORAUGMENTATION,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig.ColorAugmentation)
))
,
DESCRIPTOR = _AUGMENTATIONCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.augmentation_config_pb2'
# @@protoc_insertion_point(class_scope:AugmentationConfig)
))
_sym_db.RegisterMessage(AugmentationConfig)
_sym_db.RegisterMessage(AugmentationConfig.Preprocessing)
_sym_db.RegisterMessage(AugmentationConfig.SpatialAugmentation)
_sym_db.RegisterMessage(AugmentationConfig.ColorAugmentation)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/augmentation_config_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/inference.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.faster_rcnn.proto import trt_config_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/inference.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_deploy/cv/faster_rcnn/proto/inference.proto\x1a\x37nvidia_tao_deploy/cv/faster_rcnn/proto/trt_config.proto\"\xc4\x03\n\x0fInferenceConfig\x12\x12\n\nimages_dir\x18\x01 \x01(\t\x12\r\n\x05model\x18\x02 \x01(\t\x12\x12\n\nbatch_size\x18\x0f \x01(\r\x12\x19\n\x11rpn_pre_nms_top_N\x18\n \x01(\r\x12\x19\n\x11rpn_nms_max_boxes\x18\x07 \x01(\r\x12!\n\x19rpn_nms_overlap_threshold\x18\x08 \x01(\x02\x12 \n\x18\x62\x62ox_visualize_threshold\x18\x05 \x01(\x02\x12\x1f\n\x17object_confidence_thres\x18\x10 \x01(\x02\x12 \n\x18\x63lassifier_nms_max_boxes\x18\t \x01(\r\x12(\n classifier_nms_overlap_threshold\x18\x06 \x01(\x02\x12\"\n\x1a\x64\x65tection_image_output_dir\x18\x0b \x01(\t\x12\x17\n\x0f\x62\x62ox_caption_on\x18\x0c \x01(\x08\x12\x17\n\x0flabels_dump_dir\x18\r \x01(\t\x12$\n\rtrt_inference\x18\x0e \x01(\x0b\x32\r.TrtInference\x12\x16\n\x0enms_score_bits\x18\x11 \x01(\rb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2.DESCRIPTOR,])
_INFERENCECONFIG = _descriptor.Descriptor(
name='InferenceConfig',
full_name='InferenceConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='images_dir', full_name='InferenceConfig.images_dir', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model', full_name='InferenceConfig.model', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='InferenceConfig.batch_size', index=2,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_pre_nms_top_N', full_name='InferenceConfig.rpn_pre_nms_top_N', index=3,
number=10, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_max_boxes', full_name='InferenceConfig.rpn_nms_max_boxes', index=4,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_overlap_threshold', full_name='InferenceConfig.rpn_nms_overlap_threshold', index=5,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_visualize_threshold', full_name='InferenceConfig.bbox_visualize_threshold', index=6,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_confidence_thres', full_name='InferenceConfig.object_confidence_thres', index=7,
number=16, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_nms_max_boxes', full_name='InferenceConfig.classifier_nms_max_boxes', index=8,
number=9, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_nms_overlap_threshold', full_name='InferenceConfig.classifier_nms_overlap_threshold', index=9,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detection_image_output_dir', full_name='InferenceConfig.detection_image_output_dir', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bbox_caption_on', full_name='InferenceConfig.bbox_caption_on', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels_dump_dir', full_name='InferenceConfig.labels_dump_dir', index=12,
number=13, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trt_inference', full_name='InferenceConfig.trt_inference', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nms_score_bits', full_name='InferenceConfig.nms_score_bits', index=14,
number=17, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=568,
)
_INFERENCECONFIG.fields_by_name['trt_inference'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2._TRTINFERENCE
DESCRIPTOR.message_types_by_name['InferenceConfig'] = _INFERENCECONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InferenceConfig = _reflection.GeneratedProtocolMessageType('InferenceConfig', (_message.Message,), dict(
DESCRIPTOR = _INFERENCECONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.inference_pb2'
# @@protoc_insertion_point(class_scope:InferenceConfig)
))
_sym_db.RegisterMessage(InferenceConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/inference_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/experiment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.faster_rcnn.proto import augmentation_config_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_augmentation__config__pb2
from nvidia_tao_deploy.cv.faster_rcnn.proto import dataset_config_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_dataset__config__pb2
from nvidia_tao_deploy.cv.faster_rcnn.proto import training_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_training__pb2
from nvidia_tao_deploy.cv.faster_rcnn.proto import model_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_model__pb2
from nvidia_tao_deploy.cv.faster_rcnn.proto import inference_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_inference__pb2
from nvidia_tao_deploy.cv.faster_rcnn.proto import evaluation_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_evaluation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/experiment.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_deploy/cv/faster_rcnn/proto/experiment.proto\x1a@nvidia_tao_deploy/cv/faster_rcnn/proto/augmentation_config.proto\x1a;nvidia_tao_deploy/cv/faster_rcnn/proto/dataset_config.proto\x1a\x35nvidia_tao_deploy/cv/faster_rcnn/proto/training.proto\x1a\x32nvidia_tao_deploy/cv/faster_rcnn/proto/model.proto\x1a\x36nvidia_tao_deploy/cv/faster_rcnn/proto/inference.proto\x1a\x37nvidia_tao_deploy/cv/faster_rcnn/proto/evaluation.proto\"\xc5\x02\n\nExperiment\x12\x13\n\x0brandom_seed\x18\x01 \x01(\r\x12\x0f\n\x07verbose\x18\x02 \x01(\x08\x12\x0f\n\x07\x65nc_key\x18\x06 \x01(\t\x12&\n\x0e\x64\x61taset_config\x18\x05 \x01(\x0b\x32\x0e.DatasetConfig\x12\x30\n\x13\x61ugmentation_config\x18\x07 \x01(\x0b\x32\x13.AugmentationConfig\x12\"\n\x0cmodel_config\x18\x03 \x01(\x0b\x32\x0c.ModelConfig\x12(\n\x0ftraining_config\x18\x04 \x01(\x0b\x32\x0f.TrainingConfig\x12*\n\x10inference_config\x18\x08 \x01(\x0b\x32\x10.InferenceConfig\x12,\n\x11\x65valuation_config\x18\t \x01(\x0b\x32\x11.EvaluationConfigb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_augmentation__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_dataset__config__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_training__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_model__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_inference__pb2.DESCRIPTOR,nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_evaluation__pb2.DESCRIPTOR,])
_EXPERIMENT = _descriptor.Descriptor(
name='Experiment',
full_name='Experiment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='random_seed', full_name='Experiment.random_seed', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='verbose', full_name='Experiment.verbose', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enc_key', full_name='Experiment.enc_key', index=2,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataset_config', full_name='Experiment.dataset_config', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='augmentation_config', full_name='Experiment.augmentation_config', index=4,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='model_config', full_name='Experiment.model_config', index=5,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='training_config', full_name='Experiment.training_config', index=6,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inference_config', full_name='Experiment.inference_config', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='evaluation_config', full_name='Experiment.evaluation_config', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=407,
serialized_end=732,
)
_EXPERIMENT.fields_by_name['dataset_config'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_dataset__config__pb2._DATASETCONFIG
_EXPERIMENT.fields_by_name['augmentation_config'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_augmentation__config__pb2._AUGMENTATIONCONFIG
_EXPERIMENT.fields_by_name['model_config'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_model__pb2._MODELCONFIG
_EXPERIMENT.fields_by_name['training_config'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_training__pb2._TRAININGCONFIG
_EXPERIMENT.fields_by_name['inference_config'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_inference__pb2._INFERENCECONFIG
_EXPERIMENT.fields_by_name['evaluation_config'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_evaluation__pb2._EVALUATIONCONFIG
DESCRIPTOR.message_types_by_name['Experiment'] = _EXPERIMENT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Experiment = _reflection.GeneratedProtocolMessageType('Experiment', (_message.Message,), dict(
DESCRIPTOR = _EXPERIMENT,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.experiment_pb2'
# @@protoc_insertion_point(class_scope:Experiment)
))
_sym_db.RegisterMessage(Experiment)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/experiment_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Config Base Utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf.text_format import Merge as merge_text_proto
from nvidia_tao_deploy.cv.faster_rcnn.proto.experiment_pb2 import Experiment
def load_proto(config):
"""Load the experiment proto."""
proto = Experiment()
def _load_from_file(filename, pb2):
if not os.path.exists(filename):
raise IOError(f"Specfile not found at: {filename}")
with open(filename, "r", encoding="utf-8") as f:
merge_text_proto(f.read(), pb2)
_load_from_file(config, proto)
return proto
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/utils.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/model.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.faster_rcnn.proto import input_image_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_input__image__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/model.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n2nvidia_tao_deploy/cv/faster_rcnn/proto/model.proto\x1a\x38nvidia_tao_deploy/cv/faster_rcnn/proto/input_image.proto\"/\n\x0f\x41nchorBoxConfig\x12\r\n\x05scale\x18\x01 \x03(\x02\x12\r\n\x05ratio\x18\x02 \x03(\x02\";\n\x10RoiPoolingConfig\x12\x11\n\tpool_size\x18\x01 \x01(\r\x12\x14\n\x0cpool_size_2x\x18\x02 \x01(\x08\"\xa8\x01\n\nActivation\x12\x17\n\x0f\x61\x63tivation_type\x18\x01 \x01(\t\x12\x44\n\x15\x61\x63tivation_parameters\x18\x02 \x03(\x0b\x32%.Activation.ActivationParametersEntry\x1a;\n\x19\x41\x63tivationParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\"\xdd\x03\n\x0bModelConfig\x12-\n\x12input_image_config\x18\x01 \x01(\x0b\x32\x11.InputImageConfig\x12\x0c\n\x04\x61rch\x18\x02 \x01(\t\x12+\n\x11\x61nchor_box_config\x18\x03 \x01(\x0b\x32\x10.AnchorBoxConfig\x12\x16\n\x0eroi_mini_batch\x18\x04 \x01(\r\x12\x12\n\nrpn_stride\x18\x05 \x01(\r\x12\x11\n\tfreeze_bn\x18\x06 \x01(\x08\x12\x14\n\x0c\x64ropout_rate\x18\x11 \x01(\x02\x12\x19\n\x11\x64rop_connect_rate\x18\x12 \x01(\x02\x12\x1f\n\x17rpn_cls_activation_type\x18\x07 \x01(\t\x12\x15\n\rfreeze_blocks\x18\t \x03(\x02\x12\x10\n\x08use_bias\x18\n \x01(\x08\x12-\n\x12roi_pooling_config\x18\x0b \x01(\x0b\x32\x11.RoiPoolingConfig\x12\x11\n\trfcn_mode\x18\x0c \x01(\x08\x12\x19\n\x11tf_proposal_layer\x18\r \x01(\x08\x12\x17\n\x0f\x61ll_projections\x18\x0e \x01(\x08\x12\x13\n\x0buse_pooling\x18\x0f \x01(\x08\x12\x1f\n\nactivation\x18\x13 \x01(\x0b\x32\x0b.Activationb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_input__image__pb2.DESCRIPTOR,])
_ANCHORBOXCONFIG = _descriptor.Descriptor(
name='AnchorBoxConfig',
full_name='AnchorBoxConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scale', full_name='AnchorBoxConfig.scale', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ratio', full_name='AnchorBoxConfig.ratio', index=1,
number=2, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=112,
serialized_end=159,
)
_ROIPOOLINGCONFIG = _descriptor.Descriptor(
name='RoiPoolingConfig',
full_name='RoiPoolingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pool_size', full_name='RoiPoolingConfig.pool_size', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='pool_size_2x', full_name='RoiPoolingConfig.pool_size_2x', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=161,
serialized_end=220,
)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY = _descriptor.Descriptor(
name='ActivationParametersEntry',
full_name='Activation.ActivationParametersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='Activation.ActivationParametersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='Activation.ActivationParametersEntry.value', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=332,
serialized_end=391,
)
_ACTIVATION = _descriptor.Descriptor(
name='Activation',
full_name='Activation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='activation_type', full_name='Activation.activation_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation_parameters', full_name='Activation.activation_parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ACTIVATION_ACTIVATIONPARAMETERSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=223,
serialized_end=391,
)
_MODELCONFIG = _descriptor.Descriptor(
name='ModelConfig',
full_name='ModelConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_image_config', full_name='ModelConfig.input_image_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='arch', full_name='ModelConfig.arch', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='anchor_box_config', full_name='ModelConfig.anchor_box_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='roi_mini_batch', full_name='ModelConfig.roi_mini_batch', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_stride', full_name='ModelConfig.rpn_stride', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_bn', full_name='ModelConfig.freeze_bn', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_rate', full_name='ModelConfig.dropout_rate', index=6,
number=17, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='drop_connect_rate', full_name='ModelConfig.drop_connect_rate', index=7,
number=18, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_cls_activation_type', full_name='ModelConfig.rpn_cls_activation_type', index=8,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='freeze_blocks', full_name='ModelConfig.freeze_blocks', index=9,
number=9, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_bias', full_name='ModelConfig.use_bias', index=10,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='roi_pooling_config', full_name='ModelConfig.roi_pooling_config', index=11,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rfcn_mode', full_name='ModelConfig.rfcn_mode', index=12,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tf_proposal_layer', full_name='ModelConfig.tf_proposal_layer', index=13,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_projections', full_name='ModelConfig.all_projections', index=14,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_pooling', full_name='ModelConfig.use_pooling', index=15,
number=15, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='activation', full_name='ModelConfig.activation', index=16,
number=19, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=394,
serialized_end=871,
)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY.containing_type = _ACTIVATION
_ACTIVATION.fields_by_name['activation_parameters'].message_type = _ACTIVATION_ACTIVATIONPARAMETERSENTRY
_MODELCONFIG.fields_by_name['input_image_config'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_input__image__pb2._INPUTIMAGECONFIG
_MODELCONFIG.fields_by_name['anchor_box_config'].message_type = _ANCHORBOXCONFIG
_MODELCONFIG.fields_by_name['roi_pooling_config'].message_type = _ROIPOOLINGCONFIG
_MODELCONFIG.fields_by_name['activation'].message_type = _ACTIVATION
DESCRIPTOR.message_types_by_name['AnchorBoxConfig'] = _ANCHORBOXCONFIG
DESCRIPTOR.message_types_by_name['RoiPoolingConfig'] = _ROIPOOLINGCONFIG
DESCRIPTOR.message_types_by_name['Activation'] = _ACTIVATION
DESCRIPTOR.message_types_by_name['ModelConfig'] = _MODELCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AnchorBoxConfig = _reflection.GeneratedProtocolMessageType('AnchorBoxConfig', (_message.Message,), dict(
DESCRIPTOR = _ANCHORBOXCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:AnchorBoxConfig)
))
_sym_db.RegisterMessage(AnchorBoxConfig)
RoiPoolingConfig = _reflection.GeneratedProtocolMessageType('RoiPoolingConfig', (_message.Message,), dict(
DESCRIPTOR = _ROIPOOLINGCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:RoiPoolingConfig)
))
_sym_db.RegisterMessage(RoiPoolingConfig)
Activation = _reflection.GeneratedProtocolMessageType('Activation', (_message.Message,), dict(
ActivationParametersEntry = _reflection.GeneratedProtocolMessageType('ActivationParametersEntry', (_message.Message,), dict(
DESCRIPTOR = _ACTIVATION_ACTIVATIONPARAMETERSENTRY,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:Activation.ActivationParametersEntry)
))
,
DESCRIPTOR = _ACTIVATION,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:Activation)
))
_sym_db.RegisterMessage(Activation)
_sym_db.RegisterMessage(Activation.ActivationParametersEntry)
ModelConfig = _reflection.GeneratedProtocolMessageType('ModelConfig', (_message.Message,), dict(
DESCRIPTOR = _MODELCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.model_pb2'
# @@protoc_insertion_point(class_scope:ModelConfig)
))
_sym_db.RegisterMessage(ModelConfig)
_ACTIVATION_ACTIVATIONPARAMETERSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/model_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/optimizer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/optimizer.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n6nvidia_tao_deploy/cv/faster_rcnn/proto/optimizer.proto\"r\n\x13\x41\x64\x61mOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\x0e\n\x06\x62\x65ta_1\x18\x02 \x01(\x02\x12\x0e\n\x06\x62\x65ta_2\x18\x03 \x01(\x02\x12\x0f\n\x07\x65psilon\x18\x04 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x05 \x01(\x02\x12\x0f\n\x07\x61msgrad\x18\x06 \x01(\x08\"S\n\x12SgdOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\x12\x10\n\x08momentum\x18\x02 \x01(\x02\x12\r\n\x05\x64\x65\x63\x61y\x18\x03 \x01(\x02\x12\x10\n\x08nesterov\x18\x04 \x01(\x08\"$\n\x16RmspropOptimizerConfig\x12\n\n\x02lr\x18\x01 \x01(\x02\"\x90\x01\n\x0fOptimizerConfig\x12$\n\x04\x61\x64\x61m\x18\x01 \x01(\x0b\x32\x14.AdamOptimizerConfigH\x00\x12\"\n\x03sgd\x18\x02 \x01(\x0b\x32\x13.SgdOptimizerConfigH\x00\x12*\n\x07rmsprop\x18\x03 \x01(\x0b\x32\x17.RmspropOptimizerConfigH\x00\x42\x07\n\x05optimb\x06proto3')
)
_ADAMOPTIMIZERCONFIG = _descriptor.Descriptor(
name='AdamOptimizerConfig',
full_name='AdamOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='AdamOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta_1', full_name='AdamOptimizerConfig.beta_1', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='beta_2', full_name='AdamOptimizerConfig.beta_2', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='epsilon', full_name='AdamOptimizerConfig.epsilon', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='AdamOptimizerConfig.decay', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='amsgrad', full_name='AdamOptimizerConfig.amsgrad', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=58,
serialized_end=172,
)
_SGDOPTIMIZERCONFIG = _descriptor.Descriptor(
name='SgdOptimizerConfig',
full_name='SgdOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='SgdOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='momentum', full_name='SgdOptimizerConfig.momentum', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='decay', full_name='SgdOptimizerConfig.decay', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='nesterov', full_name='SgdOptimizerConfig.nesterov', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=174,
serialized_end=257,
)
_RMSPROPOPTIMIZERCONFIG = _descriptor.Descriptor(
name='RmspropOptimizerConfig',
full_name='RmspropOptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lr', full_name='RmspropOptimizerConfig.lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=259,
serialized_end=295,
)
_OPTIMIZERCONFIG = _descriptor.Descriptor(
name='OptimizerConfig',
full_name='OptimizerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adam', full_name='OptimizerConfig.adam', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sgd', full_name='OptimizerConfig.sgd', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rmsprop', full_name='OptimizerConfig.rmsprop', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='optim', full_name='OptimizerConfig.optim',
index=0, containing_type=None, fields=[]),
],
serialized_start=298,
serialized_end=442,
)
_OPTIMIZERCONFIG.fields_by_name['adam'].message_type = _ADAMOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['sgd'].message_type = _SGDOPTIMIZERCONFIG
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].message_type = _RMSPROPOPTIMIZERCONFIG
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['adam'])
_OPTIMIZERCONFIG.fields_by_name['adam'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['sgd'])
_OPTIMIZERCONFIG.fields_by_name['sgd'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
_OPTIMIZERCONFIG.oneofs_by_name['optim'].fields.append(
_OPTIMIZERCONFIG.fields_by_name['rmsprop'])
_OPTIMIZERCONFIG.fields_by_name['rmsprop'].containing_oneof = _OPTIMIZERCONFIG.oneofs_by_name['optim']
DESCRIPTOR.message_types_by_name['AdamOptimizerConfig'] = _ADAMOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['SgdOptimizerConfig'] = _SGDOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['RmspropOptimizerConfig'] = _RMSPROPOPTIMIZERCONFIG
DESCRIPTOR.message_types_by_name['OptimizerConfig'] = _OPTIMIZERCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdamOptimizerConfig = _reflection.GeneratedProtocolMessageType('AdamOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _ADAMOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.optimizer_pb2'
# @@protoc_insertion_point(class_scope:AdamOptimizerConfig)
))
_sym_db.RegisterMessage(AdamOptimizerConfig)
SgdOptimizerConfig = _reflection.GeneratedProtocolMessageType('SgdOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _SGDOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.optimizer_pb2'
# @@protoc_insertion_point(class_scope:SgdOptimizerConfig)
))
_sym_db.RegisterMessage(SgdOptimizerConfig)
RmspropOptimizerConfig = _reflection.GeneratedProtocolMessageType('RmspropOptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _RMSPROPOPTIMIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.optimizer_pb2'
# @@protoc_insertion_point(class_scope:RmspropOptimizerConfig)
))
_sym_db.RegisterMessage(RmspropOptimizerConfig)
OptimizerConfig = _reflection.GeneratedProtocolMessageType('OptimizerConfig', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZERCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.optimizer_pb2'
# @@protoc_insertion_point(class_scope:OptimizerConfig)
))
_sym_db.RegisterMessage(OptimizerConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/optimizer_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/learning_rate.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/learning_rate.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n:nvidia_tao_deploy/cv/faster_rcnn/proto/learning_rate.proto\"\x86\x01\n\x18SoftStartAnnealingConfig\x12\x0f\n\x07\x62\x61se_lr\x18\x01 \x01(\x02\x12\x10\n\x08start_lr\x18\x02 \x01(\x02\x12\x12\n\nsoft_start\x18\x03 \x01(\x02\x12\x18\n\x10\x61nnealing_points\x18\x04 \x03(\x02\x12\x19\n\x11\x61nnealing_divider\x18\x05 \x01(\x02\"A\n\x0cStepLrConfig\x12\x0f\n\x07\x62\x61se_lr\x18\x01 \x01(\x02\x12\r\n\x05gamma\x18\x02 \x01(\x02\x12\x11\n\tstep_size\x18\x03 \x01(\x02\"g\n\x08LRConfig\x12/\n\nsoft_start\x18\x01 \x01(\x0b\x32\x19.SoftStartAnnealingConfigH\x00\x12\x1d\n\x04step\x18\x02 \x01(\x0b\x32\r.StepLrConfigH\x00\x42\x0b\n\tlr_configb\x06proto3')
)
_SOFTSTARTANNEALINGCONFIG = _descriptor.Descriptor(
name='SoftStartAnnealingConfig',
full_name='SoftStartAnnealingConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base_lr', full_name='SoftStartAnnealingConfig.base_lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='start_lr', full_name='SoftStartAnnealingConfig.start_lr', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='soft_start', full_name='SoftStartAnnealingConfig.soft_start', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_points', full_name='SoftStartAnnealingConfig.annealing_points', index=3,
number=4, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='annealing_divider', full_name='SoftStartAnnealingConfig.annealing_divider', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=197,
)
_STEPLRCONFIG = _descriptor.Descriptor(
name='StepLrConfig',
full_name='StepLrConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='base_lr', full_name='StepLrConfig.base_lr', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gamma', full_name='StepLrConfig.gamma', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step_size', full_name='StepLrConfig.step_size', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=199,
serialized_end=264,
)
_LRCONFIG = _descriptor.Descriptor(
name='LRConfig',
full_name='LRConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='soft_start', full_name='LRConfig.soft_start', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='LRConfig.step', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='lr_config', full_name='LRConfig.lr_config',
index=0, containing_type=None, fields=[]),
],
serialized_start=266,
serialized_end=369,
)
_LRCONFIG.fields_by_name['soft_start'].message_type = _SOFTSTARTANNEALINGCONFIG
_LRCONFIG.fields_by_name['step'].message_type = _STEPLRCONFIG
_LRCONFIG.oneofs_by_name['lr_config'].fields.append(
_LRCONFIG.fields_by_name['soft_start'])
_LRCONFIG.fields_by_name['soft_start'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_config']
_LRCONFIG.oneofs_by_name['lr_config'].fields.append(
_LRCONFIG.fields_by_name['step'])
_LRCONFIG.fields_by_name['step'].containing_oneof = _LRCONFIG.oneofs_by_name['lr_config']
DESCRIPTOR.message_types_by_name['SoftStartAnnealingConfig'] = _SOFTSTARTANNEALINGCONFIG
DESCRIPTOR.message_types_by_name['StepLrConfig'] = _STEPLRCONFIG
DESCRIPTOR.message_types_by_name['LRConfig'] = _LRCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SoftStartAnnealingConfig = _reflection.GeneratedProtocolMessageType('SoftStartAnnealingConfig', (_message.Message,), dict(
DESCRIPTOR = _SOFTSTARTANNEALINGCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.learning_rate_pb2'
# @@protoc_insertion_point(class_scope:SoftStartAnnealingConfig)
))
_sym_db.RegisterMessage(SoftStartAnnealingConfig)
StepLrConfig = _reflection.GeneratedProtocolMessageType('StepLrConfig', (_message.Message,), dict(
DESCRIPTOR = _STEPLRCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.learning_rate_pb2'
# @@protoc_insertion_point(class_scope:StepLrConfig)
))
_sym_db.RegisterMessage(StepLrConfig)
LRConfig = _reflection.GeneratedProtocolMessageType('LRConfig', (_message.Message,), dict(
DESCRIPTOR = _LRCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.learning_rate_pb2'
# @@protoc_insertion_point(class_scope:LRConfig)
))
_sym_db.RegisterMessage(LRConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/learning_rate_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/evaluation.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from nvidia_tao_deploy.cv.faster_rcnn.proto import trt_config_pb2 as nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/evaluation.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n7nvidia_tao_deploy/cv/faster_rcnn/proto/evaluation.proto\x1a\x37nvidia_tao_deploy/cv/faster_rcnn/proto/trt_config.proto\"=\n\x11IoUThresholdRange\x12\r\n\x05start\x18\x01 \x01(\x02\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x02\x12\x0c\n\x04step\x18\x03 \x01(\x02\"\x87\x04\n\x10\x45valuationConfig\x12\r\n\x05model\x18\x03 \x01(\t\x12\x19\n\x11rpn_pre_nms_top_N\x18\x0c \x01(\r\x12\x19\n\x11rpn_nms_max_boxes\x18\x06 \x01(\r\x12!\n\x19rpn_nms_overlap_threshold\x18\x07 \x01(\x02\x12 \n\x18\x63lassifier_nms_max_boxes\x18\x08 \x01(\r\x12(\n classifier_nms_overlap_threshold\x18\t \x01(\x02\x12\x1f\n\x17object_confidence_thres\x18\x0b \x01(\x02\x12 \n\x18use_voc07_11point_metric\x18\r \x01(\x08\x12)\n!validation_period_during_training\x18\x0f \x01(\r\x12\x12\n\nbatch_size\x18\x10 \x01(\r\x12%\n\x0etrt_evaluation\x18\x11 \x01(\x0b\x32\r.TrtInference\x12=\n\x1fgt_matching_iou_threshold_range\x18\x12 \x01(\x0b\x32\x12.IoUThresholdRangeH\x00\x12#\n\x19gt_matching_iou_threshold\x18\x13 \x01(\x02H\x00\x12\x1a\n\x12visualize_pr_curve\x18\x14 \x01(\x08\x42\x16\n\x14iou_threshold_configb\x06proto3')
,
dependencies=[nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2.DESCRIPTOR,])
_IOUTHRESHOLDRANGE = _descriptor.Descriptor(
name='IoUThresholdRange',
full_name='IoUThresholdRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='IoUThresholdRange.start', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='end', full_name='IoUThresholdRange.end', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='step', full_name='IoUThresholdRange.step', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=177,
)
_EVALUATIONCONFIG = _descriptor.Descriptor(
name='EvaluationConfig',
full_name='EvaluationConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model', full_name='EvaluationConfig.model', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_pre_nms_top_N', full_name='EvaluationConfig.rpn_pre_nms_top_N', index=1,
number=12, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_max_boxes', full_name='EvaluationConfig.rpn_nms_max_boxes', index=2,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rpn_nms_overlap_threshold', full_name='EvaluationConfig.rpn_nms_overlap_threshold', index=3,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_nms_max_boxes', full_name='EvaluationConfig.classifier_nms_max_boxes', index=4,
number=8, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='classifier_nms_overlap_threshold', full_name='EvaluationConfig.classifier_nms_overlap_threshold', index=5,
number=9, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='object_confidence_thres', full_name='EvaluationConfig.object_confidence_thres', index=6,
number=11, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_voc07_11point_metric', full_name='EvaluationConfig.use_voc07_11point_metric', index=7,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_period_during_training', full_name='EvaluationConfig.validation_period_during_training', index=8,
number=15, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='EvaluationConfig.batch_size', index=9,
number=16, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='trt_evaluation', full_name='EvaluationConfig.trt_evaluation', index=10,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gt_matching_iou_threshold_range', full_name='EvaluationConfig.gt_matching_iou_threshold_range', index=11,
number=18, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='gt_matching_iou_threshold', full_name='EvaluationConfig.gt_matching_iou_threshold', index=12,
number=19, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='visualize_pr_curve', full_name='EvaluationConfig.visualize_pr_curve', index=13,
number=20, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='iou_threshold_config', full_name='EvaluationConfig.iou_threshold_config',
index=0, containing_type=None, fields=[]),
],
serialized_start=180,
serialized_end=699,
)
_EVALUATIONCONFIG.fields_by_name['trt_evaluation'].message_type = nvidia__tao__deploy_dot_cv_dot_faster__rcnn_dot_proto_dot_trt__config__pb2._TRTINFERENCE
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold_range'].message_type = _IOUTHRESHOLDRANGE
_EVALUATIONCONFIG.oneofs_by_name['iou_threshold_config'].fields.append(
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold_range'])
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold_range'].containing_oneof = _EVALUATIONCONFIG.oneofs_by_name['iou_threshold_config']
_EVALUATIONCONFIG.oneofs_by_name['iou_threshold_config'].fields.append(
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold'])
_EVALUATIONCONFIG.fields_by_name['gt_matching_iou_threshold'].containing_oneof = _EVALUATIONCONFIG.oneofs_by_name['iou_threshold_config']
DESCRIPTOR.message_types_by_name['IoUThresholdRange'] = _IOUTHRESHOLDRANGE
DESCRIPTOR.message_types_by_name['EvaluationConfig'] = _EVALUATIONCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IoUThresholdRange = _reflection.GeneratedProtocolMessageType('IoUThresholdRange', (_message.Message,), dict(
DESCRIPTOR = _IOUTHRESHOLDRANGE,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.evaluation_pb2'
# @@protoc_insertion_point(class_scope:IoUThresholdRange)
))
_sym_db.RegisterMessage(IoUThresholdRange)
EvaluationConfig = _reflection.GeneratedProtocolMessageType('EvaluationConfig', (_message.Message,), dict(
DESCRIPTOR = _EVALUATIONCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.evaluation_pb2'
# @@protoc_insertion_point(class_scope:EvaluationConfig)
))
_sym_db.RegisterMessage(EvaluationConfig)
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/evaluation_pb2.py |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: nvidia_tao_deploy/cv/faster_rcnn/proto/dataset_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='nvidia_tao_deploy/cv/faster_rcnn/proto/dataset_config.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n;nvidia_tao_deploy/cv/faster_rcnn/proto/dataset_config.proto\"Y\n\nDataSource\x12\x16\n\x0etfrecords_path\x18\x01 \x01(\t\x12\x1c\n\x14image_directory_path\x18\x02 \x01(\t\x12\x15\n\rsource_weight\x18\x03 \x01(\x02\"\x99\x04\n\rDatasetConfig\x12!\n\x0c\x64\x61ta_sources\x18\x01 \x03(\x0b\x32\x0b.DataSource\x12\x17\n\x0fimage_extension\x18\x02 \x01(\t\x12\x44\n\x14target_class_mapping\x18\x03 \x03(\x0b\x32&.DatasetConfig.TargetClassMappingEntry\x12\x19\n\x0fvalidation_fold\x18\x04 \x01(\rH\x00\x12-\n\x16validation_data_source\x18\x05 \x01(\x0b\x32\x0b.DataSourceH\x00\x12\x37\n\x0f\x64\x61taloader_mode\x18\x06 \x01(\x0e\x32\x1e.DatasetConfig.DATALOADER_MODE\x12\x33\n\rsampling_mode\x18\x07 \x01(\x0e\x32\x1c.DatasetConfig.SAMPLING_MODE\x1a\x39\n\x17TargetClassMappingEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\";\n\x0f\x44\x41TALOADER_MODE\x12\x0f\n\x0bMULTISOURCE\x10\x00\x12\n\n\x06LEGACY\x10\x01\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x02\"@\n\rSAMPLING_MODE\x12\x10\n\x0cUSER_DEFINED\x10\x00\x12\x10\n\x0cPROPORTIONAL\x10\x01\x12\x0b\n\x07UNIFORM\x10\x02\x42\x14\n\x12\x64\x61taset_split_typeb\x06proto3')
)
_DATASETCONFIG_DATALOADER_MODE = _descriptor.EnumDescriptor(
name='DATALOADER_MODE',
full_name='DatasetConfig.DATALOADER_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MULTISOURCE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEGACY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=545,
serialized_end=604,
)
_sym_db.RegisterEnumDescriptor(_DATASETCONFIG_DATALOADER_MODE)
_DATASETCONFIG_SAMPLING_MODE = _descriptor.EnumDescriptor(
name='SAMPLING_MODE',
full_name='DatasetConfig.SAMPLING_MODE',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='USER_DEFINED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROPORTIONAL', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNIFORM', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=606,
serialized_end=670,
)
_sym_db.RegisterEnumDescriptor(_DATASETCONFIG_SAMPLING_MODE)
_DATASOURCE = _descriptor.Descriptor(
name='DataSource',
full_name='DataSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tfrecords_path', full_name='DataSource.tfrecords_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_directory_path', full_name='DataSource.image_directory_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='source_weight', full_name='DataSource.source_weight', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=152,
)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY = _descriptor.Descriptor(
name='TargetClassMappingEntry',
full_name='DatasetConfig.TargetClassMappingEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='DatasetConfig.TargetClassMappingEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='DatasetConfig.TargetClassMappingEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=486,
serialized_end=543,
)
_DATASETCONFIG = _descriptor.Descriptor(
name='DatasetConfig',
full_name='DatasetConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='data_sources', full_name='DatasetConfig.data_sources', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image_extension', full_name='DatasetConfig.image_extension', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_class_mapping', full_name='DatasetConfig.target_class_mapping', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_fold', full_name='DatasetConfig.validation_fold', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validation_data_source', full_name='DatasetConfig.validation_data_source', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dataloader_mode', full_name='DatasetConfig.dataloader_mode', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sampling_mode', full_name='DatasetConfig.sampling_mode', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DATASETCONFIG_TARGETCLASSMAPPINGENTRY, ],
enum_types=[
_DATASETCONFIG_DATALOADER_MODE,
_DATASETCONFIG_SAMPLING_MODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='dataset_split_type', full_name='DatasetConfig.dataset_split_type',
index=0, containing_type=None, fields=[]),
],
serialized_start=155,
serialized_end=692,
)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY.containing_type = _DATASETCONFIG
_DATASETCONFIG.fields_by_name['data_sources'].message_type = _DATASOURCE
_DATASETCONFIG.fields_by_name['target_class_mapping'].message_type = _DATASETCONFIG_TARGETCLASSMAPPINGENTRY
_DATASETCONFIG.fields_by_name['validation_data_source'].message_type = _DATASOURCE
_DATASETCONFIG.fields_by_name['dataloader_mode'].enum_type = _DATASETCONFIG_DATALOADER_MODE
_DATASETCONFIG.fields_by_name['sampling_mode'].enum_type = _DATASETCONFIG_SAMPLING_MODE
_DATASETCONFIG_DATALOADER_MODE.containing_type = _DATASETCONFIG
_DATASETCONFIG_SAMPLING_MODE.containing_type = _DATASETCONFIG
_DATASETCONFIG.oneofs_by_name['dataset_split_type'].fields.append(
_DATASETCONFIG.fields_by_name['validation_fold'])
_DATASETCONFIG.fields_by_name['validation_fold'].containing_oneof = _DATASETCONFIG.oneofs_by_name['dataset_split_type']
_DATASETCONFIG.oneofs_by_name['dataset_split_type'].fields.append(
_DATASETCONFIG.fields_by_name['validation_data_source'])
_DATASETCONFIG.fields_by_name['validation_data_source'].containing_oneof = _DATASETCONFIG.oneofs_by_name['dataset_split_type']
DESCRIPTOR.message_types_by_name['DataSource'] = _DATASOURCE
DESCRIPTOR.message_types_by_name['DatasetConfig'] = _DATASETCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DataSource = _reflection.GeneratedProtocolMessageType('DataSource', (_message.Message,), dict(
DESCRIPTOR = _DATASOURCE,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DataSource)
))
_sym_db.RegisterMessage(DataSource)
DatasetConfig = _reflection.GeneratedProtocolMessageType('DatasetConfig', (_message.Message,), dict(
TargetClassMappingEntry = _reflection.GeneratedProtocolMessageType('TargetClassMappingEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASETCONFIG_TARGETCLASSMAPPINGENTRY,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig.TargetClassMappingEntry)
))
,
DESCRIPTOR = _DATASETCONFIG,
__module__ = 'nvidia_tao_deploy.cv.faster_rcnn.proto.dataset_config_pb2'
# @@protoc_insertion_point(class_scope:DatasetConfig)
))
_sym_db.RegisterMessage(DatasetConfig)
_sym_db.RegisterMessage(DatasetConfig.TargetClassMappingEntry)
_DATASETCONFIG_TARGETCLASSMAPPINGENTRY._options = None
# @@protoc_insertion_point(module_scope)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/proto/dataset_config_pb2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FRCNN convert etlt/onnx model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.faster_rcnn.proto.utils import load_proto
from nvidia_tao_deploy.cv.faster_rcnn.engine_builder import FRCNNEngineBuilder
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
@monitor_status(name='faster_rcnn', mode='gen_trt_engine')
def main(args):
"""FRCNN TRT convert."""
# decrypt etlt
tmp_onnx_file, file_format = decode_model(args.model_path, args.key)
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
if args.engine_file is not None or args.data_type == 'int8':
if args.engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = args.engine_file
builder = FRCNNEngineBuilder(verbose=args.verbose,
is_qat=es.training_config.enable_qat,
workspace=args.max_workspace_size,
min_batch_size=args.min_batch_size,
opt_batch_size=args.opt_batch_size,
max_batch_size=args.max_batch_size,
strict_type_constraints=args.strict_type_constraints,
force_ptq=args.force_ptq)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
args.data_type,
calib_data_file=args.cal_data_file,
calib_input=args.cal_image_dir,
calib_cache=args.cal_cache_file,
calib_num_images=args.batch_size * args.batches,
calib_batch_size=args.batch_size,
calib_json_file=args.cal_json_file)
logging.info("Export finished successfully.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='gen_trt_engine', description='Generate TRT engine of FRCNN model.')
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to a FRCNN .etlt or .onnx model file.'
)
parser.add_argument(
'-k',
'--key',
type=str,
required=False,
help='Key to save or load a .etlt model.'
)
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
"--data_type",
type=str,
default="fp32",
help="Data type for the TensorRT export.",
choices=["fp32", "fp16", "int8"])
parser.add_argument(
"--cal_image_dir",
default="",
type=str,
help="Directory of images to run int8 calibration.")
parser.add_argument(
"--cal_data_file",
default=None,
type=str,
help="Tensorfile to run calibration for int8 optimization.")
parser.add_argument(
'--cal_cache_file',
default=None,
type=str,
help='Calibration cache file to write to.')
parser.add_argument(
'--cal_json_file',
default=None,
type=str,
help='Dictionary containing tensor scale for QAT models.')
parser.add_argument(
"--engine_file",
type=str,
default=None,
help="Path to the exported TRT engine.")
parser.add_argument(
"--max_batch_size",
type=int,
default=DEFAULT_MAX_BATCH_SIZE,
help="Max batch size for TensorRT engine builder.")
parser.add_argument(
"--min_batch_size",
type=int,
default=DEFAULT_MIN_BATCH_SIZE,
help="Min batch size for TensorRT engine builder.")
parser.add_argument(
"--opt_batch_size",
type=int,
default=DEFAULT_OPT_BATCH_SIZE,
help="Opt batch size for TensorRT engine builder.")
parser.add_argument(
"--batch_size",
type=int,
default=1,
help="Number of images per batch.")
parser.add_argument(
"--batches",
type=int,
default=10,
help="Number of batches to calibrate over.")
parser.add_argument(
"--max_workspace_size",
type=int,
default=2,
help="Max memory workspace size to allow in Gb for TensorRT engine builder (default: 2).")
parser.add_argument(
"-s",
"--strict_type_constraints",
action="store_true",
default=False,
help="A Boolean flag indicating whether to apply the \
TensorRT strict type constraints when building the TensorRT engine.")
parser.add_argument(
"--force_ptq",
action="store_true",
default=False,
help="Flag to force post training quantization for QAT models.")
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Verbosity of the logger.")
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy FRCNN scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
from PIL import Image
import numpy as np
from tqdm.auto import tqdm
import logging
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.faster_rcnn.dataloader import FRCNNKITTILoader, aug_letterbox_resize
from nvidia_tao_deploy.cv.faster_rcnn.inferencer import FRCNNInferencer
from nvidia_tao_deploy.cv.faster_rcnn.proto.utils import load_proto
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='faster_rcnn', mode='inference')
def main(args):
"""FRCNN TRT inference."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
infer_config = es.inference_config
dataset_config = es.dataset_config
batch_size = args.batch_size if args.batch_size else infer_config.batch_size
if batch_size <= 0:
raise ValueError(f"Inference batch size should be >=1, got {batch_size}, please check inference_config.batch_size")
trt_infer = FRCNNInferencer(args.model_path, batch_size=batch_size)
c, h, w = trt_infer._input_shape
img_mean = es.model_config.input_image_config.image_channel_mean
if c == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Load mapping_dict from the spec file
mapping_dict = dict(dataset_config.target_class_mapping)
# Override eval_dataset_path from spec file if image directory is provided
if args.image_dir:
image_dirs = args.image_dir
else:
image_dirs = infer_config.images_dir
dl = FRCNNKITTILoader(
shape=(c, h, w),
image_dirs=[image_dirs],
label_dirs=[None],
mapping_dict=mapping_dict,
exclude_difficult=True,
batch_size=batch_size,
is_inference=True,
image_mean=img_mean,
dtype=trt_infer.inputs[0].host.dtype)
inv_classes = {v: k for k, v in dl.classes.items()}
inv_classes[-1] = "background" # Dummy class to filter backgrounds
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
os.makedirs(results_dir, exist_ok=True)
if infer_config.detection_image_output_dir:
output_annotate_root = infer_config.detection_image_output_dir
else:
output_annotate_root = os.path.join(results_dir, "images_annotated")
os.makedirs(output_annotate_root, exist_ok=True)
if infer_config.labels_dump_dir:
output_label_root = infer_config.labels_dump_dir
else:
output_label_root = os.path.join(results_dir, "labels")
os.makedirs(output_label_root, exist_ok=True)
for i, (imgs, _) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
image_paths = dl.image_paths[np.arange(batch_size) + batch_size * i]
y_pred = trt_infer.infer(imgs)
for i in range(len(y_pred)):
y_pred_valid = y_pred[i]
target_size = np.array([w, h, w, h])
# Scale back bounding box coordinates
y_pred_valid[:, 2:6] *= target_size[None, :]
# Load image
img = Image.open(image_paths[i])
orig_width, orig_height = img.size
img, _, crop_coord = aug_letterbox_resize(img,
y_pred_valid[:, 2:6],
num_channels=c,
resize_shape=(trt_infer.width, trt_infer.height))
img = Image.fromarray(img.astype('uint8'))
# Store images
bbox_img, label_strings = trt_infer.draw_bbox(img, y_pred_valid, inv_classes, infer_config.bbox_visualize_threshold)
bbox_img = bbox_img.crop((crop_coord[0], crop_coord[1], crop_coord[2], crop_coord[3]))
bbox_img = bbox_img.resize((orig_width, orig_height))
img_filename = os.path.basename(image_paths[i])
bbox_img.save(os.path.join(output_annotate_root, img_filename))
# Store labels
filename, _ = os.path.splitext(img_filename)
label_file_name = os.path.join(output_label_root, filename + ".txt")
with open(label_file_name, "w", encoding="utf-8") as f:
for l_s in label_strings:
f.write(l_s)
logging.info("Finished inference.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='infer', description='Inference with a FRCNN TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the FRCNN TensorRT engine.'
)
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=None,
help='Batch size.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import json
import numpy as np
from tqdm.auto import tqdm
import logging
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.faster_rcnn.dataloader import FRCNNKITTILoader
from nvidia_tao_deploy.cv.faster_rcnn.inferencer import FRCNNInferencer
from nvidia_tao_deploy.cv.faster_rcnn.proto.utils import load_proto
from nvidia_tao_deploy.metrics.kitti_metric import KITTIMetric
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
@monitor_status(name='faster_rcnn', mode='evaluation')
def main(args):
"""FRCNN TRT evaluation."""
# Load from proto-based spec file
es = load_proto(args.experiment_spec)
eval_config = es.evaluation_config
dataset_config = es.dataset_config
batch_size = args.batch_size if args.batch_size else eval_config.batch_size
trt_infer = FRCNNInferencer(args.model_path, batch_size=batch_size)
c, h, w = trt_infer._input_shape
conf_thres = eval_config.object_confidence_thres if eval_config.object_confidence_thres else 0.0001
matching_iou_threshold = eval_config.gt_matching_iou_threshold if eval_config.gt_matching_iou_threshold else 0.5
img_mean = es.model_config.input_image_config.image_channel_mean
if c == 3:
if img_mean:
img_mean = [img_mean['b'], img_mean['g'], img_mean['r']]
else:
img_mean = [103.939, 116.779, 123.68]
else:
if img_mean:
img_mean = [img_mean['l']]
else:
img_mean = [117.3786]
# Load mapping_dict from the spec file
mapping_dict = dict(dataset_config.target_class_mapping)
# Override eval_dataset_path from spec file if image directory is provided
if args.image_dir:
image_dirs = args.image_dir
else:
image_dirs = dataset_config.validation_data_source.image_directory_path
dl = FRCNNKITTILoader(
shape=(c, h, w),
image_dirs=[image_dirs],
label_dirs=[args.label_dir],
mapping_dict=mapping_dict,
exclude_difficult=True,
batch_size=batch_size,
image_mean=img_mean,
dtype=trt_infer.inputs[0].host.dtype)
eval_metric = KITTIMetric(n_classes=len(dl.classes),
matching_iou_threshold=matching_iou_threshold,
conf_thres=conf_thres)
gt_labels = []
pred_labels = []
for i, (imgs, labels) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
gt_labels.extend(labels)
y_pred = trt_infer.infer(imgs)
for i in range(len(y_pred)):
y_pred_valid = y_pred[i][y_pred[i][:, 1] > eval_metric.conf_thres]
pred_labels.append(y_pred_valid)
m_ap, ap = eval_metric(gt_labels, pred_labels, verbose=True)
m_ap = np.mean(ap)
logging.info("*******************************")
class_mapping = {v: k for k, v in dl.classes.items()}
eval_results = {}
for i in range(len(dl.classes)):
eval_results['AP_' + class_mapping[i]] = np.float64(ap[i])
logging.info("{:<14}{:<6}{}".format(class_mapping[i], 'AP', round(ap[i], 5))) # noqa pylint: disable=C0209
logging.info("{:<14}{:<6}{}".format('', 'mAP', round(m_ap, 3))) # noqa pylint: disable=C0209
logging.info("*******************************")
# Store evaluation results into JSON
if args.results_dir is None:
results_dir = os.path.dirname(args.model_path)
else:
results_dir = args.results_dir
with open(os.path.join(results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
def build_command_line_parser(parser=None):
"""Build the command line parser using argparse.
Args:
parser (subparser): Provided from the wrapper script to build a chained
parser mechanism.
Returns:
parser
"""
if parser is None:
parser = argparse.ArgumentParser(prog='eval', description='Evaluate with a FRCNN TRT model.')
parser.add_argument(
'-i',
'--image_dir',
type=str,
required=False,
default=None,
help='Input directory of images')
parser.add_argument(
'-e',
'--experiment_spec',
type=str,
required=True,
help='Path to the experiment spec file.'
)
parser.add_argument(
'-m',
'--model_path',
type=str,
required=True,
help='Path to the FRCNN TensorRT engine.'
)
parser.add_argument(
'-l',
'--label_dir',
type=str,
required=True,
help='Label directory.')
parser.add_argument(
'-b',
'--batch_size',
type=int,
required=False,
default=None,
help='Batch size.')
parser.add_argument(
'-r',
'--results_dir',
type=str,
required=True,
default=None,
help='Output directory where the log is saved.'
)
return parser
def parse_command_line_arguments(args=None):
"""Simple function to parse command line arguments."""
parser = build_command_line_parser(args)
return parser.parse_args(args)
if __name__ == '__main__':
args = parse_command_line_arguments()
main(args)
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import sys
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_proto import launch_job
import nvidia_tao_deploy.cv.faster_rcnn.scripts
def main():
"""Function to launch the job."""
launch_job(nvidia_tao_deploy.cv.faster_rcnn.scripts, "faster_rcnn", sys.argv[1:])
if __name__ == "__main__":
main()
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/entrypoint/faster_rcnn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for faster rcnn."""
| tao_deploy-main | nvidia_tao_deploy/cv/faster_rcnn/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class for performing TensorRT image inference."""
import numpy as np
import tensorrt as trt
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
from nvidia_tao_deploy.inferencer.utils import allocate_buffers, do_inference
class OCRNetInferencer(TRTInferencer):
"""Manages TensorRT objects for model inference."""
def __init__(self, engine_path, input_shape=None, batch_size=None, data_format="channel_first"):
"""Initializes TensorRT objects needed for model inference.
Args:
engine_path (str): path where TensorRT engine should be stored
input_shape (tuple): (batch, channel, height, width) for dynamic shape engine
batch_size (int): batch size for dynamic shape engine
data_format (str): either channel_first or channel_last
"""
# Load TRT engine
super().__init__(engine_path)
self.max_batch_size = self.engine.max_batch_size
self.execute_v2 = False
# Execution context is needed for inference
self.context = None
# Allocate memory for multiple usage [e.g. multiple batch inference]
self._input_shape = []
for binding in range(self.engine.num_bindings):
if self.engine.binding_is_input(binding):
self._input_shape = self.engine.get_binding_shape(binding)[-3:]
assert len(self._input_shape) == 3, "Engine doesn't have valid input dimensions"
if data_format == "channel_first":
self.height = self._input_shape[1]
self.width = self._input_shape[2]
else:
self.height = self._input_shape[0]
self.width = self._input_shape[1]
# set binding_shape for dynamic input
if (input_shape is not None) or (batch_size is not None):
self.context = self.engine.create_execution_context()
if input_shape is not None:
self.context.set_binding_shape(0, input_shape)
self.max_batch_size = input_shape[0]
else:
self.context.set_binding_shape(0, [batch_size] + list(self._input_shape))
self.max_batch_size = batch_size
self.execute_v2 = True
# This allocates memory for network inputs/outputs on both CPU and GPU
self.inputs, self.outputs, self.bindings, self.stream = allocate_buffers(self.engine,
self.context)
if self.context is None:
self.context = self.engine.create_execution_context()
input_volume = trt.volume(self._input_shape)
self.numpy_array = np.zeros((self.max_batch_size, input_volume))
def infer(self, imgs):
"""Infers model on batch of same sized images resized to fit the model.
Args:
image_paths (str): paths to images, that will be packed into batch
and fed into model
"""
# Verify if the supplied batch size is not too big
max_batch_size = self.max_batch_size
actual_batch_size = len(imgs)
if actual_batch_size > max_batch_size:
raise ValueError(f"image_paths list bigger ({actual_batch_size}) than \
engine max batch size ({max_batch_size})")
self.numpy_array[:actual_batch_size] = imgs.reshape(actual_batch_size, -1)
# ...copy them into appropriate place into memory...
# (self.inputs was returned earlier by allocate_buffers())
np.copyto(self.inputs[0].host, self.numpy_array.ravel())
# ...fetch model outputs...
results = do_inference(
self.context, bindings=self.bindings, inputs=self.inputs,
outputs=self.outputs, stream=self.stream,
batch_size=max_batch_size,
execute_v2=self.execute_v2)
# ...and return results up to the actual batch size.
return [i.reshape(max_batch_size, -1)[:actual_batch_size] for i in results]
def __del__(self):
"""Clear things up on object deletion."""
# Clear session and buffer
if self.trt_runtime:
del self.trt_runtime
if self.context:
del self.context
if self.engine:
del self.engine
if self.stream:
del self.stream
# Loop through inputs and free inputs.
for inp in self.inputs:
inp.device.free()
# Loop through outputs and free them.
for out in self.outputs:
out.device.free()
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet TensorRT engine builder."""
import logging
import os
import sys
import onnx
import tensorrt as trt
from nvidia_tao_deploy.engine.builder import EngineBuilder
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class OCRNetEngineBuilder(EngineBuilder):
"""Parses an ONNX graph and builds a TensorRT engine from it."""
def __init__(
self,
data_format="channels_first",
**kwargs
):
"""Init.
Args:
data_format (str): data_format.
"""
super().__init__(**kwargs)
self._data_format = data_format
def set_input_output_node_names(self):
"""Set input output node names."""
self._output_node_names = ["output_prob", "output_id"]
self._input_node_names = ["input"]
def get_onnx_input_dims(self, model_path):
"""Get input dimension of ONNX model."""
onnx_model = onnx.load(model_path)
onnx_inputs = onnx_model.graph.input
logger.info('List inputs:')
for i, inputs in enumerate(onnx_inputs):
logger.info('Input %s -> %s.', i, inputs.name)
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][1:])
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][0])
return [i.dim_value for i in inputs.type.tensor_type.shape.dim][:]
def create_network(self, model_path, file_format="onnx"):
"""Parse the UFF/ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the UFF/ONNX graph to load.
file_format: The file format of the decrypted etlt file (default: onnx).
"""
if file_format == "onnx":
logger.info("Parsing ONNX model")
self._input_dims = self.get_onnx_input_dims(model_path)
self.batch_size = self._input_dims[0]
self._input_dims = self._input_dims[1:]
network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network_flags = network_flags | (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
model_path = os.path.realpath(model_path)
with open(model_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: %s", model_path)
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
outputs = [self.network.get_output(i) for i in range(self.network.num_outputs)]
logger.info("Network Description")
for input in inputs: # noqa pylint: disable=W0622
logger.info("Input '%s' with shape %s and dtype %s", input.name, input.shape, input.dtype)
for output in outputs:
logger.info("Output '%s' with shape %s and dtype %s", output.name, output.shape, output.dtype)
if self.batch_size <= 0: # dynamic batch size
logger.info("dynamic batch size handling")
opt_profile = self.builder.create_optimization_profile()
model_input = self.network.get_input(0)
input_shape = model_input.shape
input_name = model_input.name
real_shape_min = (self.min_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_opt = (self.opt_batch_size, input_shape[1],
input_shape[2], input_shape[3])
real_shape_max = (self.max_batch_size, input_shape[1],
input_shape[2], input_shape[3])
opt_profile.set_shape(input=input_name,
min=real_shape_min,
opt=real_shape_opt,
max=real_shape_max)
self.config.add_optimization_profile(opt_profile)
else:
logger.info("Parsing UFF model")
raise NotImplementedError("UFF for LPRNet is not supported")
def create_engine(self, engine_path, precision,
calib_input=None, calib_cache=None, calib_num_images=5000,
calib_batch_size=8, calib_data_file=None):
"""Build the TensorRT engine and serialize it to disk.
Args:
engine_path: The path where to serialize the engine to.
precision: The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
logger.debug("Building %s Engine in %s", precision, engine_path)
if self.batch_size is None:
self.batch_size = calib_batch_size
self.builder.max_batch_size = self.batch_size
if precision == "fp16":
if not self.builder.platform_has_fast_fp16:
logger.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision == "int8":
raise NotImplementedError("INT8 is not supported for LPRNet!")
with self.builder.build_engine(self.network, self.config) as engine, \
open(engine_path, "wb") as f:
logger.debug("Serializing engine to file: %s", engine_path)
f.write(engine.serialize())
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OCRNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper function to decode OCRNet model's output."""
import numpy as np
def decode_ctc(output_id, output_prob, character_list, blank_id=0):
"""Decode the raw CTC output to string."""
prob = np.cumprod(output_prob)[-1]
seq = np.squeeze(output_id)
prev = seq[0]
tmp_seq = [prev]
for idx in range(1, len(seq)):
if seq[idx] != prev:
tmp_seq.append(seq[idx])
prev = seq[idx]
text = ""
for idx in tmp_seq:
if idx != blank_id:
text += character_list[idx]
return text, prob
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LPRNet loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from abc import ABC
import numpy as np
from PIL import Image
from nvidia_tao_deploy.cv.common.constants import VALID_IMAGE_EXTENSIONS
from nvidia_tao_deploy.utils.path_utils import expand_path
class OCRNetLoader(ABC):
"""OCRNet Dataloader."""
def __init__(self,
shape,
image_dirs,
batch_size=10,
label_txts=[],
dtype=None):
"""Init.
Args:
image_dirs (list): list of image directories.
classes (list): list of classes
batch_size (int): size of the batch.
label_txt (list): list of ground truth file.
dtype (str): data type to cast to
"""
self.image_paths = []
self.labels = []
if len(label_txts) != 0:
self.is_inference = False
assert len(label_txts) == len(image_dirs)
for image_dir, label_txt in zip(image_dirs, label_txts):
self._add_source(image_dir, label_txt)
else:
self.is_inference = True
for image_dir in image_dirs:
self._add_source(image_dir)
self.image_paths = np.array(self.image_paths)
self.data_inds = np.arange(len(self.image_paths))
self.num_channels, self.height, self.width = shape
self.batch_size = batch_size
self.n_samples = len(self.data_inds)
self.dtype = dtype
self.n_batches = int(len(self.image_paths) // self.batch_size)
assert self.n_batches > 0, "empty image dir or batch size too large!"
def _add_source(self, image_folder, label_txt=None):
"""Add image/label paths."""
if not self.is_inference:
# label_list = open(label_txt, "r", encoding="utf-8").readlines()
with open(label_txt, "r", encoding="utf-8") as f:
label_list = f.readlines()
for label_meta in label_list:
img_file, label = label_meta.split()
label = label.strip()
img_path = expand_path(f"{image_folder}/{img_file}")
if img_file.lower().endswith(VALID_IMAGE_EXTENSIONS) and os.path.exists(expand_path(img_path)):
self.image_paths.append(img_path)
self.labels.append(label)
else:
img_files = os.listdir(image_folder)
for img_file in img_files:
if img_file.lower().endswith(VALID_IMAGE_EXTENSIONS):
self.image_paths.append(os.path.join(image_folder,
img_file))
def __len__(self):
"""Get length of Sequence."""
return self.n_batches
def _load_gt_image(self, image_path):
"""Load GT image from file."""
img = Image.open(image_path).convert("L")
return img
def __iter__(self):
"""Iterate."""
self.n = 0
return self
def __next__(self):
"""Load a full batch."""
images = []
labels = []
if self.n < self.n_batches:
for idx in range(self.n * self.batch_size,
(self.n + 1) * self.batch_size):
image, label = self._get_single_processed_item(idx)
images.append(image)
labels.append(label)
self.n += 1
return self._batch_post_processing(images, labels)
raise StopIteration
def _batch_post_processing(self, images, labels):
"""Post processing for a batch."""
images = np.array(images)
return images, labels
def _get_single_processed_item(self, idx):
"""Load and process single image and its label."""
image, label = self._get_single_item_raw(idx)
image = self.preprocessing(image)
return image, label
def _get_single_item_raw(self, idx):
"""Load single image and its label.
Returns:
image (np.array): image object in original resolution
label (int): one-hot encoded class label
"""
image = self._load_gt_image(self.image_paths[self.data_inds[idx]])
if self.is_inference:
label = "NO-LABEL" # Fake label
else:
label = self.labels[self.data_inds[idx]]
return image, label
def preprocessing(self, image):
"""The image preprocessor loads an image from disk and prepares it as needed for batching.
This includes padding, resizing, normalization, data type casting, and transposing.
Args:
image (np.array): The opencv image on disk to load.
Returns:
image (np.array): A numpy array holding the image sample, ready to be concatenated
into the rest of the batch
"""
image = image.resize((self.width, self.height), resample=Image.BICUBIC)
image = (np.array(image, dtype=self.dtype) / 255.0 - 0.5) / 0.5
# @TODO(tylerz): No need to transpose for gray input
# # transpose image from HWC (0, 1, 2) to CHW (2, 0, 1)
# image = image.transpose(2, 0, 1)
return image
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OCRNet config."""
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional, List
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class PruneConfig:
"""Prune config."""
mode: str = "amount" # [amount, threshold, experimental_hybrid]
amount: Optional[float] = None
threshold: Optional[float] = None
granularity: int = 8
raw_prune_score: str = "L1" # [L1, L2]
@dataclass
class OCRNetModelConfig:
"""OCRNet model config."""
TPS: bool = False # Thin-Plate-Spline interpolation
num_fiducial: int = 20 # number of keypoints for TPS
backbone: str = "ResNet" # [ResNet]
feature_channel: int = 512
sequence: str = "BiLSTM" # [BiLSTM]
hidden_size: int = 256
prediction: str = "CTC" # [Attn, CTC]
quantize: bool = False
input_width: int = 100
input_height: int = 32
input_channel: int = 1
@dataclass
class OptimConfig:
"""Optimizer config."""
name: str = "adadelta" # [adam, adadelta]
lr: float = 1.0 # default value = 1.0 for adadelta
momentum: float = 0.9
weight_decay: float = 5e-4
lr_scheduler: str = "MultiStep" # {AutoReduce, MultiStep}
lr_monitor: str = "val_loss" # {val_loss, train_loss}
patience: int = 1
min_lr: float = 1e-4
lr_steps: List[int] = field(default_factory=lambda: [15, 25])
lr_decay: float = 0.1
# TODO(tylerz): no augmentation from original implementation
@dataclass
class OCRNetAugmentationConfig:
"""Augmentation config."""
keep_aspect_ratio: bool = False
@dataclass
class OCRNetDatasetConfig:
"""Dataset config."""
train_dataset_dir: Optional[List[str]] = None
val_dataset_dir: Optional[str] = None
character_list_file: Optional[str] = None
max_label_length: int = 25 # Shall we check it with output feature length ?
batch_size: int = 32
workers: int = 8
augmentation: OCRNetAugmentationConfig = OCRNetAugmentationConfig()
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: List[str] = MISSING
cal_cache_file: str = MISSING
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "fp16"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class OCRNetGenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: Optional[str] = None
input_channel: int = 1
input_width: int = 100
input_height: int = 32
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class OCRNetTrainExpConfig:
"""Train experiment config."""
results_dir: Optional[str] = None
seed: int = 1111
# TODO(tylerz): Update to use torch.distributed.launch for multi gpu training.
gpu_ids: List[int] = field(default_factory=lambda: [0])
resume_training_checkpoint_path: Optional[str] = None
pretrained_model_path: Optional[str] = None
quantize_model_path: Optional[str] = None
optim: OptimConfig = OptimConfig()
num_epochs: int = 10
clip_grad_norm: float = 5.0 # default = 5.0 for adadelta
checkpoint_interval: int = 2
validation_interval: int = 1
distributed_strategy: str = "ddp"
@dataclass
class OCRNetInferenceExpConfig:
"""Inference experiment config."""
checkpoint: str = MISSING
trt_engine: Optional[str] = None
results_dir: Optional[str] = None
gpu_id: int = 0
inference_dataset_dir: str = MISSING
batch_size: int = 1
input_width: int = 100
input_height: int = 32
@dataclass
class OCRNetEvalExpConfig:
"""Evaluation experiment config."""
checkpoint: str = MISSING
trt_engine: Optional[str] = None
gpu_id: int = 0
test_dataset_dir: str = MISSING
test_dataset_gt_file: Optional[str] = None
results_dir: Optional[str] = None
batch_size: int = 1
input_width: int = 100
input_height: int = 32
@dataclass
class OCRNetExportExpConfig:
"""Export experiment config."""
checkpoint: str = MISSING
results_dir: Optional[str] = None
onnx_file: Optional[str] = None
gpu_id: int = 0
@dataclass
class OCRNetPruneExpConfig:
"""Prune experiment config."""
checkpoint: str = MISSING
results_dir: Optional[str] = None
pruned_file: Optional[str] = None
gpu_id: int = 0
prune_setting: PruneConfig = PruneConfig()
@dataclass
class OCRNetConvertDatasetExpConfig:
"""Convert_dataset experiment config."""
input_img_dir: str = MISSING
gt_file: str = MISSING
results_dir: Optional[str] = None
@dataclass
class ExperimentConfig:
"""Experiment config."""
model: OCRNetModelConfig = OCRNetModelConfig()
dataset: OCRNetDatasetConfig = OCRNetDatasetConfig()
train: OCRNetTrainExpConfig = OCRNetTrainExpConfig()
evaluate: OCRNetEvalExpConfig = OCRNetEvalExpConfig()
export: OCRNetExportExpConfig = OCRNetExportExpConfig()
inference: OCRNetInferenceExpConfig = OCRNetInferenceExpConfig()
prune: OCRNetPruneExpConfig = OCRNetPruneExpConfig()
dataset_convert: OCRNetConvertDatasetExpConfig = OCRNetConvertDatasetExpConfig()
gen_trt_engine: OCRNetGenTrtEngineExpConfig = OCRNetGenTrtEngineExpConfig()
encryption_key: Optional[str] = None
results_dir: str = MISSING
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet convert etlt model to TRT engine."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.ocrnet.engine_builder import OCRNetEngineBuilder
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.ocrnet.config.default_config import ExperimentConfig
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment", schema=ExperimentConfig
)
@monitor_status(name="ocrnet", mode="gen_trt_engine")
def main(cfg: ExperimentConfig) -> None:
"""Convert encrypted uff or onnx model to TRT engine."""
# decrypt etlt
trt_cfg = cfg.gen_trt_engine
# decrypt onnx or etlt
tmp_onnx_file, file_format = decode_model(trt_cfg.onnx_file, cfg['encryption_key'])
engine_file = trt_cfg.trt_engine
data_type = trt_cfg.tensorrt['data_type']
workspace_size = trt_cfg.tensorrt['workspace_size']
min_batch_size = trt_cfg.tensorrt['min_batch_size']
opt_batch_size = trt_cfg.tensorrt['opt_batch_size']
max_batch_size = trt_cfg.tensorrt['max_batch_size']
if engine_file is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = engine_file
builder = OCRNetEngineBuilder(verbose=trt_cfg.verbose,
workspace=workspace_size,
min_batch_size=min_batch_size,
opt_batch_size=opt_batch_size,
max_batch_size=max_batch_size)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
data_type)
logging.info("Engine generation finished successfully.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy OCRNet scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.ocrnet.dataloader import OCRNetLoader
from nvidia_tao_deploy.cv.ocrnet.inferencer import OCRNetInferencer
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.ocrnet.config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.ocrnet.utils import decode_ctc
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment", schema=ExperimentConfig
)
@monitor_status(name="ocrnet", mode="inference")
def main(cfg: ExperimentConfig) -> None:
"""Convert encrypted uff or onnx model to TRT engine."""
engine_file = cfg.inference.trt_engine
batch_size = cfg.inference.batch_size
img_dirs = cfg.inference.inference_dataset_dir
character_list_file = cfg.dataset.character_list_file
img_width = cfg.inference.input_width
img_height = cfg.inference.input_height
img_channel = cfg.model.input_channel
shape = [img_channel, img_height, img_width]
ocrnet_engine = OCRNetInferencer(engine_path=engine_file,
batch_size=batch_size)
character_list = ["CTCBlank"]
# for ch in open(character_list_file, "r", encoding="utf-8").readlines():
with open(character_list_file, "r", encoding="utf-8") as f:
for ch in f.readlines():
ch = ch.strip()
character_list.append(ch)
inf_dl = OCRNetLoader(shape=shape,
image_dirs=[img_dirs],
batch_size=batch_size,
dtype=ocrnet_engine.inputs[0].host.dtype)
for idx, (imgs, _) in enumerate(inf_dl):
y_preds = ocrnet_engine.infer(imgs)
output_ids, output_probs, _ = y_preds
img_paths = inf_dl.image_paths[idx * batch_size: (idx + 1) * batch_size]
assert len(output_ids) == len(output_probs) == len(img_paths)
for img_path, output_id, output_prob in zip(img_paths, output_ids, output_probs):
text, conf = decode_ctc(output_id, output_prob, character_list=character_list)
print(f"{img_path}: {text} {conf}")
logging.info("TensorRT engine inference finished successfully.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNet TensorRT inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
from tqdm import tqdm
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.ocrnet.dataloader import OCRNetLoader
from nvidia_tao_deploy.cv.ocrnet.inferencer import OCRNetInferencer
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.ocrnet.config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.ocrnet.utils import decode_ctc
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment", schema=ExperimentConfig
)
@monitor_status(name="ocrnet", mode="evaluation")
def main(cfg: ExperimentConfig) -> None:
"""Convert encrypted uff or onnx model to TRT engine."""
engine_file = cfg.evaluate.trt_engine
batch_size = cfg.evaluate.batch_size
img_dirs = cfg.evaluate.test_dataset_dir
gt_list = cfg.evaluate.test_dataset_gt_file
character_list_file = cfg.dataset.character_list_file
img_width = cfg.evaluate.input_width
img_height = cfg.evaluate.input_height
img_channel = cfg.model.input_channel
shape = [img_channel, img_height, img_width]
ocrnet_engine = OCRNetInferencer(engine_path=engine_file,
batch_size=batch_size)
character_list = ["CTCBlank"]
with open(character_list_file, "r", encoding="utf-8") as f:
for ch in f.readlines():
ch = ch.strip()
character_list.append(ch)
inf_dl = OCRNetLoader(shape=shape,
image_dirs=[img_dirs],
label_txts=[gt_list],
batch_size=batch_size,
dtype=ocrnet_engine.inputs[0].host.dtype)
total_cnt = 0
acc_cnt = 0
for imgs, labels in tqdm(inf_dl):
y_preds = ocrnet_engine.infer(imgs)
output_ids, output_probs, _ = y_preds
total_cnt += len(output_ids)
for output_id, output_prob, label in zip(output_ids, output_probs, labels):
text, _ = decode_ctc(output_id, output_prob, character_list=character_list)
if text == label:
acc_cnt += 1
log_info = f"Accuracy: {acc_cnt}/{total_cnt} {float(acc_cnt)/float(total_cnt)}"
# logging.info("Accuracy: {}/{} {}".format(acc_cnt, total_cnt, float(acc_cnt)/float(total_cnt)))
logging.info(log_info)
logging.info("TensorRT engine evaluation finished successfully.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for ocrnet."""
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import argparse
from nvidia_tao_deploy.cv.ocrnet import scripts
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_hydra import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"ocrnet",
add_help=True,
description="Train Adapt Optimize Deploy entrypoint for OCRNet"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks)
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/ocrnet/entrypoint/ocrnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy TF2 Classification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Classification Hydra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/hydra_config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import List, Optional
from dataclasses import dataclass, field
from omegaconf import MISSING
from nvidia_tao_deploy.cv.common.config.mlops import ClearMLConfig, WandBConfig
@dataclass
class RegConfig:
"""Regularizer config."""
type: str = 'L2'
scope: List[str] = field(default_factory=lambda: ['conv2d', 'dense'])
weight_decay: float = 0.000015
@dataclass
class BNConfig:
"""Batchnorm config."""
momentum: float = 0.9
epsilon: float = 1e-5
@dataclass
class OptimConfig:
"""Optimizer config."""
optimizer: str = 'sgd'
lr: float = 0.05
decay: float = 0.0001
epsilon: float = 0.0001
rho: float = 0.5
beta_1: float = 0.99
beta_2: float = 0.99
momentum: float = 0.99
nesterov: bool = True
@dataclass
class LRConfig:
"""Learning rate config."""
scheduler: str = 'cosine' # soft_anneal, step
learning_rate: float = 0.05
soft_start: float = 0.05
annealing_points: List[float] = field(default_factory=lambda: [0.33, 0.66, 0.88])
annealing_divider: float = 10
min_lr_ratio: float = 0.00003
gamma: float = 0.000015
step_size: int = 10
@dataclass
class TrainConfig:
"""Train config."""
qat: bool = False
checkpoint: str = ''
checkpoint_interval: int = 1
batch_size_per_gpu: int = 64
num_epochs: int = 100
n_workers: int = 10
random_seed: int = 42
label_smoothing: float = 0.01
reg_config: RegConfig = RegConfig()
bn_config: BNConfig = BNConfig()
lr_config: LRConfig = LRConfig()
optim_config: OptimConfig = OptimConfig()
wandb: WandBConfig = WandBConfig(
name="classification",
tags=["classification", "training", "tao-toolkit"]
)
clearml: ClearMLConfig = ClearMLConfig(
task="classification_train",
tags=["classification", "training", "tao-toolkit"]
)
results_dir: Optional[str] = None
@dataclass
class AugmentConfig:
"""Augment config."""
enable_random_crop: bool = True
enable_center_crop: bool = True
enable_color_augmentation: bool = False
disable_horizontal_flip: bool = False
mixup_alpha: float = 0
@dataclass
class DataConfig:
"""Data config."""
train_dataset_path: str = MISSING
val_dataset_path: str = MISSING
preprocess_mode: str = 'caffe'
image_mean: List[float] = field(default_factory=lambda: [103.939, 116.779, 123.68])
augmentation: AugmentConfig = AugmentConfig()
num_classes: int = MISSING
@dataclass
class ModelConfig:
"""Model config."""
backbone: str = 'resnet_18'
input_width: int = 224
input_height: int = 224
input_channels: int = 3
input_image_depth: int = 8
use_batch_norm: bool = True
use_bias: bool = False
use_pooling: bool = True
all_projections: bool = False
freeze_bn: bool = False
freeze_blocks: List[int] = field(default_factory=lambda: [])
retain_head: bool = False
dropout: float = 0.0
resize_interpolation_method: str = 'bilinear' # 'bicubic'
activation_type: Optional[str] = None # only used in efficientnets
byom_model: str = ''
@dataclass
class EvalConfig:
"""Eval config."""
dataset_path: str = MISSING
checkpoint: str = MISSING
trt_engine: Optional[str] = None
batch_size: int = 64
n_workers: int = 64
top_k: int = 3
classmap: str = ""
results_dir: Optional[str] = None
@dataclass
class ExportConfig:
"""Export config."""
checkpoint: str = MISSING
onnx_file: str = MISSING
results_dir: Optional[str] = None
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: str = ""
cal_cache_file: str = ""
cal_batch_size: int = 1
cal_batches: int = 1
cal_data_file: str = ""
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "fp32"
max_workspace_size: int = 2 # in Gb
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class GenTrtEngineConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
onnx_file: str = MISSING
trt_engine: Optional[str] = None
tensorrt: TrtConfig = TrtConfig()
@dataclass
class InferConfig:
"""Inference config."""
checkpoint: str = MISSING
trt_engine: Optional[str] = None
image_dir: str = MISSING
classmap: str = MISSING
results_dir: Optional[str] = None
@dataclass
class PruneConfig:
"""Pruning config."""
checkpoint: str = MISSING
byom_model_path: Optional[str] = None
normalizer: str = 'max'
results_dir: Optional[str] = None
equalization_criterion: str = 'union'
granularity: int = 8
threshold: float = MISSING
min_num_filters: int = 16
excluded_layers: List[str] = field(default_factory=lambda: [])
@dataclass
class ExperimentConfig:
"""Experiment config."""
train: TrainConfig = TrainConfig()
dataset: DataConfig = DataConfig()
model: ModelConfig = ModelConfig()
evaluate: EvalConfig = EvalConfig()
export: ExportConfig = ExportConfig()
inference: InferConfig = InferConfig()
prune: PruneConfig = PruneConfig()
gen_trt_engine: GenTrtEngineConfig = GenTrtEngineConfig()
results_dir: str = MISSING
encryption_key: Optional[str] = None
data_format: str = 'channels_first'
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/hydra_config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification convert etlt model to TRT engine."""
import logging
import os
import tempfile
from nvidia_tao_deploy.utils.decoding import decode_model
from nvidia_tao_deploy.cv.classification_tf1.engine_builder import ClassificationEngineBuilder
from nvidia_tao_deploy.cv.classification_tf2.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.common.utils import update_results_dir
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment_spec", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for TRT engine generation."""
cfg = update_results_dir(cfg, 'gen_trt_engine')
run_conversion(cfg=cfg)
@monitor_status(name='classification_tf2', mode='gen_trt_engine')
def run_conversion(cfg: ExperimentConfig) -> None:
"""Classification TRT convert."""
# decrypt etlt or use onnx
tmp_onnx_file, file_format = decode_model(cfg.gen_trt_engine.onnx_file, cfg.encryption_key)
if cfg.gen_trt_engine.trt_engine is not None or cfg.gen_trt_engine.tensorrt.data_type == 'int8':
if cfg.gen_trt_engine.trt_engine is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = cfg.gen_trt_engine.trt_engine
builder = ClassificationEngineBuilder(verbose=True,
workspace=cfg.gen_trt_engine.tensorrt.max_workspace_size,
min_batch_size=cfg.gen_trt_engine.tensorrt.min_batch_size,
opt_batch_size=cfg.gen_trt_engine.tensorrt.opt_batch_size,
max_batch_size=cfg.gen_trt_engine.tensorrt.max_batch_size,
is_qat=cfg.train.qat,
data_format=cfg.data_format,
preprocess_mode=cfg.dataset.preprocess_mode)
builder.create_network(tmp_onnx_file, file_format)
builder.create_engine(
output_engine_path,
cfg.gen_trt_engine.tensorrt.data_type,
calib_data_file=cfg.gen_trt_engine.tensorrt.calibration.cal_data_file,
calib_input=cfg.gen_trt_engine.tensorrt.calibration.cal_image_dir,
calib_cache=cfg.gen_trt_engine.tensorrt.calibration.cal_cache_file,
calib_num_images=cfg.gen_trt_engine.tensorrt.calibration.cal_batch_size * cfg.gen_trt_engine.tensorrt.calibration.cal_batches,
calib_batch_size=cfg.gen_trt_engine.tensorrt.calibration.cal_batch_size)
logging.info("Export finished successfully.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Classification TF2 scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
import logging
import os
import json
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.classification_tf1.inferencer import ClassificationInferencer
from nvidia_tao_deploy.cv.classification_tf1.dataloader import ClassificationLoader
from nvidia_tao_deploy.cv.classification_tf2.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.common.utils import update_results_dir
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment_spec", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for TRT engine inference."""
cfg = update_results_dir(cfg, 'inference')
run_inference(cfg=cfg)
@monitor_status(name='classification_tf2', mode='inference')
def run_inference(cfg: ExperimentConfig) -> None:
"""Classification TRT inference."""
classmap = cfg.inference.classmap
if classmap:
# if classmap is provided, we explicitly set the mapping from the json file
if not os.path.exists(classmap):
raise FileNotFoundError(f"{classmap} does not exist!")
with open(classmap, "r", encoding="utf-8") as f:
mapping_dict = json.load(f)
else:
# If not, the order of the classes are alphanumeric as defined by Keras
# Ref: https://github.com/keras-team/keras/blob/07e13740fd181fc3ddec7d9a594d8a08666645f6/keras/preprocessing/image.py#L507
mapping_dict = {}
for idx, subdir in enumerate(sorted(os.listdir(cfg.evaluate.dataset_path))):
if os.path.isdir(os.path.join(cfg.evaluate.dataset_path, subdir)):
mapping_dict[subdir] = idx
mode = cfg.dataset.preprocess_mode
interpolation_method = cfg.model.resize_interpolation_method
crop = "center" if cfg.dataset.augmentation.enable_center_crop else None
data_format = cfg.data_format
image_mean = cfg.dataset.image_mean
batch_size = cfg.evaluate.batch_size
trt_infer = ClassificationInferencer(cfg.inference.trt_engine, data_format=data_format, batch_size=batch_size)
dl = ClassificationLoader(
trt_infer._input_shape,
[cfg.inference.image_dir],
mapping_dict,
is_inference=True,
data_format=data_format,
interpolation_method=interpolation_method,
mode=mode,
crop=crop,
batch_size=cfg.evaluate.batch_size,
image_mean=image_mean,
image_depth=cfg.model.input_image_depth,
dtype=trt_infer.inputs[0].host.dtype)
os.makedirs(cfg.results_dir, exist_ok=True)
result_csv_path = os.path.join(cfg.results_dir, 'result.csv')
with open(result_csv_path, 'w', encoding="utf-8") as csv_f:
for i, (imgs, _) in tqdm(enumerate(dl), total=len(dl), desc="Producing predictions"):
image_paths = dl.image_paths[np.arange(batch_size) + batch_size * i]
y_pred = trt_infer.infer(imgs)
# Class output from softmax layer
class_indices = np.argmax(y_pred, axis=1)
# Map label index to label name
class_labels = map(lambda i: list(mapping_dict.keys())
[list(mapping_dict.values()).index(i)],
class_indices)
conf = np.max(y_pred, axis=1)
# Write predictions to file
df = pd.DataFrame(zip(image_paths, class_labels, conf))
df.to_csv(csv_f, header=False, index=False)
logging.info("Finished inference.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT evaluation."""
import logging
import os
import json
import numpy as np
from tqdm.auto import tqdm
from sklearn.metrics import classification_report, confusion_matrix, top_k_accuracy_score
from nvidia_tao_deploy.cv.classification_tf1.inferencer import ClassificationInferencer
from nvidia_tao_deploy.cv.classification_tf1.dataloader import ClassificationLoader
from nvidia_tao_deploy.cv.classification_tf2.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.common.utils import update_results_dir
logging.getLogger('PIL').setLevel(logging.WARNING)
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment_spec", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for TRT engine evaluation."""
cfg = update_results_dir(cfg, 'evaluate')
run_evaluation(cfg=cfg)
@monitor_status(name='classification_tf2', mode='evaluation')
def run_evaluation(cfg: ExperimentConfig) -> None:
"""Classification TRT evaluation."""
classmap = cfg.evaluate.classmap
if classmap:
# if classmap is provided, we explicitly set the mapping from the json file
if not os.path.exists(classmap):
raise FileNotFoundError(f"{classmap} does not exist!")
with open(classmap, "r", encoding="utf-8") as f:
mapping_dict = json.load(f)
else:
# If not, the order of the classes are alphanumeric as defined by Keras
# Ref: https://github.com/keras-team/keras/blob/07e13740fd181fc3ddec7d9a594d8a08666645f6/keras/preprocessing/image.py#L507
mapping_dict = {}
for idx, subdir in enumerate(sorted(os.listdir(cfg.evaluate.dataset_path))):
if os.path.isdir(os.path.join(cfg.evaluate.dataset_path, subdir)):
mapping_dict[subdir] = idx
# Load hparams
target_names = [c[0] for c in sorted(mapping_dict.items(), key=lambda x:x[1])]
top_k = cfg.evaluate.top_k
mode = cfg.dataset.preprocess_mode
interpolation_method = cfg.model.resize_interpolation_method
crop = "center" if cfg.dataset.augmentation.enable_center_crop else None
data_format = cfg.data_format
image_mean = cfg.dataset.image_mean
batch_size = cfg.evaluate.batch_size
trt_infer = ClassificationInferencer(cfg.evaluate.trt_engine, data_format=data_format, batch_size=batch_size)
dl = ClassificationLoader(
trt_infer._input_shape,
[cfg.evaluate.dataset_path],
mapping_dict,
data_format=data_format,
interpolation_method=interpolation_method,
mode=mode,
crop=crop,
batch_size=cfg.evaluate.batch_size,
image_mean=image_mean,
image_depth=cfg.model.input_image_depth,
dtype=trt_infer.inputs[0].host.dtype)
gt_labels = []
pred_labels = []
for imgs, labels in tqdm(dl, total=len(dl), desc="Producing predictions"):
gt_labels.extend(labels)
y_pred = trt_infer.infer(imgs)
pred_labels.extend(y_pred)
# Check output classes
output_num_classes = pred_labels[0].shape[0]
if len(mapping_dict) != output_num_classes:
raise ValueError(f"Provided class map has {len(mapping_dict)} classes while the engine expects {output_num_classes} classes.")
gt_labels = np.array(gt_labels)
pred_labels = np.array(pred_labels)
# Metric calculation
target_names = [c[0] for c in sorted(mapping_dict.items(), key=lambda x:x[1])]
target_labels = [c[1] for c in sorted(mapping_dict.items(), key=lambda x:x[1])]
if len(target_labels) == 2:
# If there are only two classes, sklearn perceive the problem as binary classification
# and requires predictions to be in (num_samples, ) rather than (num_samples, num_classes)
scores = top_k_accuracy_score(gt_labels, pred_labels[:, 1], k=top_k, labels=target_labels)
else:
scores = top_k_accuracy_score(gt_labels, pred_labels, k=top_k, labels=target_labels)
logging.info("Top %s scores: %s", top_k, scores)
logging.info("Confusion Matrix")
y_predictions = np.argmax(pred_labels, axis=1)
print(confusion_matrix(gt_labels, y_predictions))
logging.info("Classification Report")
print(classification_report(gt_labels, y_predictions, labels=target_labels, target_names=target_names))
# Store evaluation results into JSON
eval_results = {"top_k_accuracy": scores}
with open(os.path.join(cfg.results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for classification."""
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import argparse
from nvidia_tao_deploy.cv.classification_tf2 import scripts
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_hydra import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"classification_tf2",
add_help=True,
description="Train Adapt Optimize Deploy entrypoint for TF2 classification"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser,
subtasks,
override_results_dir="results_dir",
override_threshold=None, # No threshold in Classification
override_key="key",
network="classification_tf2")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/classification_tf2/entrypoint/classification_tf2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet TensorRT inferencer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
from nvidia_tao_deploy.inferencer.trt_inferencer import TRTInferencer
class EfficientDetInferencer(TRTInferencer):
"""Implements inference for the EfficientDet TensorRT engine."""
def __init__(self, engine_path, max_detections_per_image=100, batch_size=1):
"""Init.
Args:
engine_path (str): The path to the serialized engine to load from disk.
max_detections_per_image (int): The maximum number of detections to visualize
"""
# Load TRT engine
super().__init__(engine_path)
self.max_detections_per_image = max_detections_per_image
# Setup I/O bindings
self.inputs = []
self.outputs = []
self.allocations = []
for i in range(self.engine.num_bindings):
name = self.engine.get_binding_name(i)
dtype = self.engine.get_binding_dtype(i)
shape = self.engine.get_binding_shape(i)
self.batch_size = shape[0] if shape[0] > 0 else batch_size
size = np.dtype(trt.nptype(dtype)).itemsize
for s in shape[1:]:
size *= s
size *= self.batch_size
allocation = cuda.mem_alloc(size)
binding = {
'index': i,
'name': name,
'dtype': np.dtype(trt.nptype(dtype)),
'shape': list(shape),
'allocation': allocation,
}
self.allocations.append(allocation)
if self.engine.binding_is_input(i):
self.inputs.append(binding)
self._input_shape = (self.batch_size, shape[1], shape[2], shape[3])
self.context.set_binding_shape(0, self._input_shape)
else:
self.outputs.append(binding)
assert self.batch_size > 0
assert len(self.inputs) > 0
assert len(self.outputs) > 0
assert len(self.allocations) > 0
def input_spec(self):
"""Get the specs for the input tensor of the network. Useful to prepare memory allocations.
Args:
None
Returns:
the shape of the input tensor.
(numpy) datatype of the input tensor.
"""
return self.inputs[0]['shape'], self.inputs[0]['dtype']
def output_spec(self):
"""Get the specs for the output tensors of the network. Useful to prepare memory allocations.
Args:
None
Returns:
specs: A list with two items per element, the shape and (numpy) datatype of each output tensor.
"""
specs = []
for o in self.outputs:
specs.append(([self.batch_size, ] + o['shape'][1:], o['dtype']))
return specs
def infer(self, imgs, scales=None):
"""Execute inference on a batch of images.
The images should already be batched and preprocessed, as prepared by
the ImageBatcher class. Memory copying to and from the GPU device will be performed here.
Args:
imgs: A numpy array holding the image batch.
scales: The image resize scales for each image in this batch.
Default: No scale postprocessing applied.
Returns:
detections: A nested list for each image in the batch and each detection in the list.
"""
# Prepare the output data
outputs = []
for shape, dtype in self.output_spec():
outputs.append(np.zeros(shape, dtype))
# Process I/O and execute the network
cuda.memcpy_htod(self.inputs[0]['allocation'], np.ascontiguousarray(imgs))
self.context.execute_v2(self.allocations)
for o in range(len(outputs)):
cuda.memcpy_dtoh(outputs[o], self.outputs[o]['allocation'])
nums = self.max_detections_per_image
boxes = outputs[1][:, :nums, :]
scores = outputs[2][:, :nums]
classes = outputs[3][:, :nums]
# Reorganize from y1, x1, y2, x2 to x1, y1, x2, y2
boxes[:, :, [0, 1]] = boxes[:, :, [1, 0]]
boxes[:, :, [2, 3]] = boxes[:, :, [3, 2]]
# convert x2, y2 to w, h
boxes[:, :, 2] -= boxes[:, :, 0]
boxes[:, :, 3] -= boxes[:, :, 1]
# Scale the box
for i in range(len(boxes)):
boxes[i] /= scales[i]
detections = {}
detections['num_detections'] = np.array([nums] * self.batch_size).astype(np.int32)
detections['detection_classes'] = classes + 1
detections['detection_scores'] = scores
detections['detection_boxes'] = boxes
return detections
def __del__(self):
"""Simple function to destroy tensorrt handlers."""
if self.context:
del self.context
if self.engine:
del self.engine
if self.allocations:
self.allocations.clear()
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet TensorRT engine builder."""
import logging
import os
import sys
import onnx
import tensorrt as trt
from nvidia_tao_deploy.engine.builder import EngineBuilder
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class EfficientDetEngineBuilder(EngineBuilder):
"""Parses an ONNX graph and builds a TensorRT engine from it."""
def get_input_dims(self, model_path):
"""Get input dimension of UFF model."""
onnx_model = onnx.load(model_path)
onnx_inputs = onnx_model.graph.input
logger.info('List inputs:')
for i, inputs in enumerate(onnx_inputs):
logger.info('Input %s -> %s.', i, inputs.name)
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][1:])
logger.info('%s.', [i.dim_value for i in inputs.type.tensor_type.shape.dim][0])
def create_network(self, model_path, dynamic_batch_size=None, file_format="onnx"):
"""Parse the ONNX graph and create the corresponding TensorRT network definition.
Args:
model_path: The path to the ONNX graph to load.
"""
if file_format == "onnx":
self.get_input_dims(model_path)
network_flags = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
self.network = self.builder.create_network(network_flags)
self.parser = trt.OnnxParser(self.network, self.trt_logger)
model_path = os.path.realpath(model_path)
with open(model_path, "rb") as f:
if not self.parser.parse(f.read()):
logger.error("Failed to load ONNX file: %s", model_path)
for error in range(self.parser.num_errors):
logger.error(self.parser.get_error(error))
sys.exit(1)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
# self.batch_size = self.max_batch_size
logger.info("Network Description")
profile = self.builder.create_optimization_profile()
dynamic_inputs = False
for inp in inputs:
logger.info("Input '{}' with shape {} and dtype {}".format(inp.name, inp.shape, inp.dtype)) # noqa pylint: disable=C0209
if inp.shape[0] == -1:
dynamic_inputs = True
if dynamic_batch_size:
if type(dynamic_batch_size) is str:
dynamic_batch_size = [int(v) for v in dynamic_batch_size.split(",")]
assert len(dynamic_batch_size) == 3
min_shape = [dynamic_batch_size[0]] + list(inp.shape[1:])
opt_shape = [dynamic_batch_size[1]] + list(inp.shape[1:])
max_shape = [dynamic_batch_size[2]] + list(inp.shape[1:])
profile.set_shape(inp.name, min_shape, opt_shape, max_shape)
logger.info("Input '{}' Optimization Profile with shape MIN {} / OPT {} / MAX {}".format( # noqa pylint: disable=C0209
inp.name, min_shape, opt_shape, max_shape))
else:
shape = [self.batch_size] + list(inp.shape[1:])
profile.set_shape(inp.name, shape, shape, shape)
logger.info("Input '{}' Optimization Profile with shape {}".format(inp.name, shape)) # noqa pylint: disable=C0209
if dynamic_inputs:
self.config.add_optimization_profile(profile)
else:
logger.info("Parsing UFF model")
raise NotImplementedError("UFF for EfficientDet is not supported")
def create_engine(self, engine_path, precision,
calib_input=None, calib_cache=None, calib_num_images=5000,
calib_batch_size=8, calib_data_file=None):
"""Build the TensorRT engine and serialize it to disk.
Args:
engine_path: The path where to serialize the engine to.
precision: The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
calib_input: The path to a directory holding the calibration images.
calib_cache: The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images: The maximum number of images to use for calibration.
calib_batch_size: The batch size to use for the calibration process.
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
logger.debug("Building %s Engine in %s", precision, engine_path)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
if self.batch_size is None:
self.batch_size = calib_batch_size
self.builder.max_batch_size = self.batch_size
if self.batch_size != calib_batch_size:
warning_msg = "For ONNX models with static batch size, " \
"calibration is done using the original batch size " \
f"of the ONNX model which is {self.batch_size}. " \
f"Overriding the provided calibration batch size {calib_batch_size}" \
f" to {self.batch_size}"
logger.warning(warning_msg)
calib_batch_size = self.batch_size
if self._is_qat and precision != "int8":
raise ValueError(f"QAT model only supports data_type int8 but {precision} was provided.")
if precision == "fp16":
if not self.builder.platform_has_fast_fp16:
logger.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision == "int8":
if not self.builder.platform_has_fast_int8:
logger.warning("INT8 is not supported natively on this platform/device")
elif self._is_qat:
# TF2 embeds QAT scales into the ONNX directly.
# Hence, no need to set dynamic range of tensors.
self.config.set_flag(trt.BuilderFlag.INT8)
else:
if self.builder.platform_has_fast_fp16:
# Also enable fp16, as some layers may be even more efficient in fp16 than int8
self.config.set_flag(trt.BuilderFlag.FP16)
self.config.set_flag(trt.BuilderFlag.INT8)
# Set ImageBatcher based calibrator
self.set_calibrator(inputs=inputs,
calib_cache=calib_cache,
calib_input=calib_input,
calib_num_images=calib_num_images,
calib_batch_size=calib_batch_size,
calib_data_file=calib_data_file)
with self.builder.build_engine(self.network, self.config) as engine, \
open(engine_path, "wb") as f:
logger.debug("Serializing engine to file: %s", engine_path)
f.write(engine.serialize())
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/engine_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy EfficientDet."""
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet Utilities."""
import yaml
coco = {
# 0: 'background',
1: 'person',
2: 'bicycle',
3: 'car',
4: 'motorcycle',
5: 'airplane',
6: 'bus',
7: 'train',
8: 'truck',
9: 'boat',
10: 'traffic light',
11: 'fire hydrant',
13: 'stop sign',
14: 'parking meter',
15: 'bench',
16: 'bird',
17: 'cat',
18: 'dog',
19: 'horse',
20: 'sheep',
21: 'cow',
22: 'elephant',
23: 'bear',
24: 'zebra',
25: 'giraffe',
27: 'backpack',
28: 'umbrella',
31: 'handbag',
32: 'tie',
33: 'suitcase',
34: 'frisbee',
35: 'skis',
36: 'snowboard',
37: 'sports ball',
38: 'kite',
39: 'baseball bat',
40: 'baseball glove',
41: 'skateboard',
42: 'surfboard',
43: 'tennis racket',
44: 'bottle',
46: 'wine glass',
47: 'cup',
48: 'fork',
49: 'knife',
50: 'spoon',
51: 'bowl',
52: 'banana',
53: 'apple',
54: 'sandwich',
55: 'orange',
56: 'broccoli',
57: 'carrot',
58: 'hot dog',
59: 'pizza',
60: 'donut',
61: 'cake',
62: 'chair',
63: 'couch',
64: 'potted plant',
65: 'bed',
67: 'dining table',
70: 'toilet',
72: 'tv',
73: 'laptop',
74: 'mouse',
75: 'remote',
76: 'keyboard',
77: 'cell phone',
78: 'microwave',
79: 'oven',
80: 'toaster',
81: 'sink',
82: 'refrigerator',
84: 'book',
85: 'clock',
86: 'vase',
87: 'scissors',
88: 'teddy bear',
89: 'hair drier',
90: 'toothbrush',
}
voc = {
# 0: 'background',
1: 'aeroplane',
2: 'bicycle',
3: 'bird',
4: 'boat',
5: 'bottle',
6: 'bus',
7: 'car',
8: 'cat',
9: 'chair',
10: 'cow',
11: 'diningtable',
12: 'dog',
13: 'horse',
14: 'motorbike',
15: 'person',
16: 'pottedplant',
17: 'sheep',
18: 'sofa',
19: 'train',
20: 'tvmonitor',
}
waymo = {
# 0: 'background',
1: 'vehicle',
2: 'pedestrian',
3: 'cyclist',
}
def get_label_dict(label_txt):
"""Create label dict from txt file."""
with open(label_txt, 'r', encoding="utf-8") as f:
labels = f.readlines()
result = {i + 1: label.strip() for i, label in enumerate(labels)}
result[-1] = "background"
return result
def get_label_map(mapping):
"""Get label id map based on the name, filename, or dict."""
# case 1: if it is None or dict, just return it.
if not mapping or isinstance(mapping, dict):
return mapping
# case 2: if it is a yaml file, load it to a dict and return the dict.
assert isinstance(mapping, str), 'mapping must be dict or str.'
if mapping.endswith('.yaml'):
with open(mapping, "r", encoding="utf-8") as f:
return yaml.safe_load(f)
# case 3: it is a name of a predefined dataset.
return {'coco': coco, 'voc': voc, 'waymo': waymo}[mapping]
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy EfficientDet Hydra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/hydra_config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file"""
from typing import List, Optional
from dataclasses import dataclass, field
from omegaconf import MISSING
from nvidia_tao_deploy.cv.common.config.mlops import ClearMLConfig, WandBConfig
@dataclass
class LoaderConfig:
"""Dataloader config."""
shuffle_buffer: int = 10000
cycle_length: int = 32
block_length: int = 16
shuffle_file: bool = True
prefetch_size: int = 2
@dataclass
class LRConfig:
"""LR config."""
name: str = 'cosine' # soft_anneal
warmup_epoch: int = 5
warmup_init: float = 0.0001
learning_rate: float = 0.2
annealing_epoch: int = 10
@dataclass
class OptConfig:
"""Optimizer config."""
name: str = 'sgd'
momentum: float = 0.9
@dataclass
class TrainConfig:
"""Train config."""
init_epoch: int = 0
optimizer: OptConfig = OptConfig()
lr_schedule: LRConfig = LRConfig()
num_examples_per_epoch: int = 120000
batch_size: int = 8
num_epochs: int = 300
checkpoint: str = ""
random_seed: int = 42
l1_weight_decay: float = 0.0
l2_weight_decay: float = 0.00004
amp: bool = False
pruned_model_path: str = ''
moving_average_decay: float = 0.9999
clip_gradients_norm: float = 10.0
skip_checkpoint_variables: str = ''
checkpoint_interval: int = 10
image_preview: bool = True
qat: bool = False
label_smoothing: float = 0.0
box_loss_weight: float = 50.0
iou_loss_type: str = ""
iou_loss_weight: float = 1.0
wandb: WandBConfig = WandBConfig(
name="efficientdet",
tags=["efficientdet", "training", "tao-toolkit"]
)
clearml: ClearMLConfig = ClearMLConfig(
task="efficientdet_train",
tags=["efficientdet", "training", "tao-toolkit"]
)
results_dir: Optional[str] = None
@dataclass
class ModelConfig:
"""Model config."""
name: str = 'efficientdet-d0'
aspect_ratios: str = '[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]'
anchor_scale: int = 4
min_level: int = 3
max_level: int = 7
num_scales: int = 3
freeze_bn: bool = False
freeze_blocks: List[int] = field(default_factory=lambda: [])
input_width: int = 512
input_height: int = 512
@dataclass
class AugmentationConfig:
"""Augmentation config."""
rand_hflip: bool = True
random_crop_min_scale: float = 0.1
random_crop_max_scale: float = 2
auto_color_distortion: bool = False
auto_translate_xy: bool = False
@dataclass
class DataConfig:
"""Data config."""
train_tfrecords: List[str] = field(default_factory=lambda: [])
train_dirs: List[str] = field(default_factory=lambda: []) # TODO
val_tfrecords: List[str] = field(default_factory=lambda: [])
val_dirs: List[str] = field(default_factory=lambda: []) # TODO
val_json_file: str = ""
num_classes: int = 91
max_instances_per_image: int = 200
skip_crowd_during_training: bool = True
use_fake_data: bool = False
loader: LoaderConfig = LoaderConfig()
augmentation: AugmentationConfig = AugmentationConfig()
@dataclass
class EvalConfig:
"""Eval config."""
batch_size: int = 8
num_samples: int = 5000
max_detections_per_image: int = 100
label_map: str = ''
max_nms_inputs: int = 5000
checkpoint: str = ''
trt_engine: Optional[str] = None
start_eval_epoch: int = 1
sigma: float = 0.5
results_dir: Optional[str] = None
@dataclass
class ExportConfig:
"""Export config."""
batch_size: int = 8
dynamic_batch_size: bool = True
min_score_thresh: float = 0.01
checkpoint: str = MISSING
onnx_file: str = MISSING
results_dir: Optional[str] = None
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_image_dir: str = ""
cal_cache_file: str = ""
cal_batch_size: int = 1
cal_batches: int = 1
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "fp32"
max_workspace_size: int = 2 # in Gb
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class GenTrtEngineConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
onnx_file: str = MISSING
trt_engine: Optional[str] = None
tensorrt: TrtConfig = TrtConfig()
@dataclass
class InferenceConfig:
"""Inference config."""
checkpoint: str = MISSING
trt_engine: Optional[str] = None
image_dir: str = MISSING
results_dir: Optional[str] = None
dump_label: bool = False
batch_size: int = 1
min_score_thresh: float = 0.3
label_map: str = ''
max_boxes_to_draw: int = 100
@dataclass
class PruneConfig:
"""Pruning config."""
checkpoint: str = MISSING
normalizer: str = 'max'
results_dir: Optional[str] = None
equalization_criterion: str = 'union'
granularity: int = 8
threshold: float = MISSING
min_num_filters: int = 16
excluded_layers: List[str] = field(default_factory=lambda: [])
@dataclass
class DatasetConvertConfig:
"""Dataset Convert config."""
image_dir: str = MISSING
annotations_file: str = MISSING
results_dir: str = MISSING
tag: str = ''
num_shards: int = 256
include_masks: bool = False
@dataclass
class ExperimentConfig:
"""Experiment config."""
train: TrainConfig = TrainConfig()
model: ModelConfig = ModelConfig()
evaluate: EvalConfig = EvalConfig()
dataset: DataConfig = DataConfig()
export: ExportConfig = ExportConfig()
inference: InferenceConfig = InferenceConfig()
prune: PruneConfig = PruneConfig()
dataset_convert: DatasetConvertConfig = DatasetConvertConfig()
gen_trt_engine: GenTrtEngineConfig = GenTrtEngineConfig()
encryption_key: Optional[str] = None
data_format: str = 'channels_last'
results_dir: str = MISSING
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/hydra_config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet convert etlt model to TRT engine."""
import logging
import os
import tempfile
from nvidia_tao_deploy.cv.efficientdet_tf2.engine_builder import EfficientDetEngineBuilder
from nvidia_tao_deploy.cv.efficientdet_tf2.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.common.utils import update_results_dir
from nvidia_tao_deploy.utils.decoding import decode_model
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment_spec", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for TRT engine conversion."""
cfg = update_results_dir(cfg, 'gen_trt_engine')
run_conversion(cfg=cfg)
@monitor_status(name='efficientdet_tf2', mode='gen_trt_engine')
def run_conversion(cfg: ExperimentConfig) -> None:
"""EfficientDet TRT convert."""
# decrypt etlt or use onnx
tmp_onnx_file, file_format = decode_model(cfg.gen_trt_engine.onnx_file, cfg.encryption_key)
if cfg.gen_trt_engine.trt_engine is not None or cfg.gen_trt_engine.tensorrt.data_type == 'int8':
if cfg.gen_trt_engine.trt_engine is None:
engine_handle, temp_engine_path = tempfile.mkstemp()
os.close(engine_handle)
output_engine_path = temp_engine_path
else:
output_engine_path = cfg.gen_trt_engine.trt_engine
builder = EfficientDetEngineBuilder(verbose=True,
workspace=cfg.gen_trt_engine.tensorrt.max_workspace_size,
max_batch_size=cfg.gen_trt_engine.tensorrt.max_batch_size,
is_qat=cfg.train.qat)
builder.create_network(tmp_onnx_file,
dynamic_batch_size=(cfg.gen_trt_engine.tensorrt.min_batch_size,
cfg.gen_trt_engine.tensorrt.opt_batch_size,
cfg.gen_trt_engine.tensorrt.max_batch_size),
file_format=file_format)
builder.create_engine(
output_engine_path,
cfg.gen_trt_engine.tensorrt.data_type,
calib_input=cfg.gen_trt_engine.tensorrt.calibration.cal_image_dir,
calib_cache=cfg.gen_trt_engine.tensorrt.calibration.cal_cache_file,
calib_num_images=cfg.gen_trt_engine.tensorrt.calibration.cal_batch_size * cfg.gen_trt_engine.tensorrt.calibration.cal_batches,
calib_batch_size=cfg.gen_trt_engine.tensorrt.calibration.cal_batch_size
)
logging.info("Export finished successfully.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/scripts/gen_trt_engine.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy TF2 EfficientDet scripts module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
import os
from PIL import Image
import logging
import numpy as np
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.efficientdet_tf2.inferencer import EfficientDetInferencer
from nvidia_tao_deploy.cv.efficientdet_tf2.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.efficientdet_tf2.utils import get_label_dict, get_label_map
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.common.utils import update_results_dir
from nvidia_tao_deploy.utils.image_batcher import ImageBatcher
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment_spec", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for TRT engine inference."""
cfg = update_results_dir(cfg, 'inference')
run_inference(cfg=cfg)
@monitor_status(name='efficientdet_tf2', mode='inference')
def run_inference(cfg: ExperimentConfig) -> None:
"""EfficientDet TRT inference."""
trt_infer = EfficientDetInferencer(cfg.inference.trt_engine, batch_size=cfg.inference.batch_size)
# Inference may not have labels. Hence, use image batcher
batcher = ImageBatcher(cfg.inference.image_dir,
tuple(trt_infer._input_shape),
trt_infer.inputs[0]['dtype'],
preprocessor="EfficientDet")
output_annotate_root = os.path.join(cfg.results_dir, "images_annotated")
output_label_root = os.path.join(cfg.results_dir, "labels")
os.makedirs(output_annotate_root, exist_ok=True)
os.makedirs(output_label_root, exist_ok=True)
if cfg.inference.label_map and not os.path.exists(cfg.inference.label_map):
raise FileNotFoundError(f"Class map at {cfg.inference.label_map} does not exist.")
if str(cfg.inference.label_map).endswith('.yaml'):
inv_classes = get_label_map(cfg.inference.label_map)
elif str(cfg.inference.label_map).endswith('.txt'):
inv_classes = get_label_dict(cfg.inference.label_map)
else:
inv_classes = None
logger.debug("label_map was not provided. Hence, class predictions will not be displayed on the visualization.")
for batch, img_paths, scales in tqdm(batcher.get_batch(), total=batcher.num_batches, desc="Producing predictions"):
detections = trt_infer.infer(batch, scales)
y_pred_valid = np.concatenate([detections['detection_classes'][..., None],
detections['detection_scores'][..., None],
detections['detection_boxes']], axis=-1)
for img_path, pred in zip(img_paths, y_pred_valid):
# Load Image
img = Image.open(img_path)
# Convert xywh to xyxy
pred[:, 4:] += pred[:, 2:4]
bbox_img, label_strings = trt_infer.draw_bbox(img, pred, inv_classes, cfg.inference.min_score_thresh)
img_filename = os.path.basename(img_path)
bbox_img.save(os.path.join(output_annotate_root, img_filename))
# Store labels
filename, _ = os.path.splitext(img_filename)
label_file_name = os.path.join(output_label_root, filename + ".txt")
with open(label_file_name, "w", encoding="utf-8") as f:
for l_s in label_strings:
f.write(l_s)
logging.info("Finished inference.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Standalone TensorRT inference."""
import os
import operator
import copy
import logging
import json
import six
import numpy as np
from tqdm.auto import tqdm
from nvidia_tao_deploy.cv.efficientdet_tf1.dataloader import EfficientDetCOCOLoader
from nvidia_tao_deploy.cv.efficientdet_tf2.inferencer import EfficientDetInferencer
from nvidia_tao_deploy.cv.efficientdet_tf2.hydra_config.default_config import ExperimentConfig
from nvidia_tao_deploy.cv.common.decorators import monitor_status
from nvidia_tao_deploy.cv.common.hydra.hydra_runner import hydra_runner
from nvidia_tao_deploy.cv.common.utils import update_results_dir
from nvidia_tao_deploy.metrics.coco_metric import EvaluationMetric
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "specs"),
config_name="experiment_spec", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for TRT engine evaluation."""
cfg = update_results_dir(cfg, 'evaluate')
run_evaluation(cfg=cfg)
@monitor_status(name='efficientdet_tf2', mode='evaluation')
def run_evaluation(cfg: ExperimentConfig) -> None:
"""EfficientDet TRT evaluation."""
eval_samples = cfg.evaluate.num_samples
eval_metric = EvaluationMetric(cfg.dataset.val_json_file, include_mask=False)
trt_infer = EfficientDetInferencer(cfg.evaluate.trt_engine)
dl = EfficientDetCOCOLoader(
cfg.dataset.val_json_file,
shape=trt_infer.inputs[0]['shape'],
dtype=trt_infer.inputs[0]['dtype'],
batch_size=cfg.evaluate.batch_size,
image_dir=cfg.dataset.val_dirs[0],
eval_samples=eval_samples)
predictions = {
'detection_scores': [],
'detection_boxes': [],
'detection_classes': [],
'source_id': [],
'image_info': [],
'num_detections': []
}
def evaluation_preds(preds):
# Essential to avoid modifying the source dict
_preds = copy.deepcopy(preds)
for k, _ in six.iteritems(_preds):
_preds[k] = np.concatenate(_preds[k], axis=0)
eval_results = eval_metric.predict_metric_fn(_preds)
return eval_results
for imgs, scale, source_id, labels in tqdm(dl, total=len(dl), desc="Producing predictions"):
image = np.array(imgs)
image_info = []
for i, label in enumerate(labels):
image_info.append([label[-1][0], label[-1][1], scale[i], label[-1][2], label[-1][3]])
image_info = np.array(image_info)
detections = trt_infer.infer(image, scale)
predictions['detection_classes'].append(detections['detection_classes'])
predictions['detection_scores'].append(detections['detection_scores'])
predictions['detection_boxes'].append(detections['detection_boxes'])
predictions['num_detections'].append(detections['num_detections'])
predictions['image_info'].append(image_info)
predictions['source_id'].append(source_id)
eval_results = evaluation_preds(preds=predictions)
for key, value in sorted(eval_results.items(), key=operator.itemgetter(0)):
eval_results[key] = float(value)
logging.info("%s: %.9f", key, value)
with open(os.path.join(cfg.results_dir, "results.json"), "w", encoding="utf-8") as f:
json.dump(eval_results, f)
logging.info("Finished evaluation.")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint module for efficientdet."""
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy command line wrapper to invoke CLI scripts."""
import argparse
from nvidia_tao_deploy.cv.efficientdet_tf2 import scripts
from nvidia_tao_deploy.cv.common.entrypoint.entrypoint_hydra import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"efficientdet_tf2",
add_help=True,
description="Train Adapt Optimize Deploy entrypoint for TF2 efficientdet"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser,
subtasks,
override_results_dir="results_dir",
override_threshold="inference.min_score_thresh",
override_key="key",
network="efficientdet_tf2")
if __name__ == '__main__':
main()
| tao_deploy-main | nvidia_tao_deploy/cv/efficientdet_tf2/entrypoint/efficientdet_tf2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for ImageNet data preprocessing & prediction decoding."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
logger = logging.getLogger(__name__)
def _preprocess_numpy_input(x, data_format, mode, color_mode, img_mean, img_std, img_depth, **kwargs):
"""Preprocesses a Numpy array encoding a batch of images.
# Arguments
x: Input array, 3D or 4D.
data_format: Data format of the image array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed Numpy array.
"""
assert img_depth in [8, 16], f"Unsupported image depth: {img_depth}, should be 8 or 16."
if not issubclass(x.dtype.type, np.floating):
x = x.astype(np.float32, copy=False)
if mode == 'tf':
if img_mean and len(img_mean) > 0:
logger.debug("image_mean is ignored in tf mode.")
if img_depth == 8:
x /= 127.5
else:
x /= 32767.5
x -= 1.
return x
if mode == 'torch':
override_mean = False
if (isinstance(img_mean, list) and (np.array(img_mean) > 1).any()) or (img_mean is None):
override_mean = True
logger.debug("image_mean is ignored if larger than 1 in torch mode and overwritten with [0.485, 0.456, 0.406].")
if img_depth == 8:
x /= 255.
else:
x /= 65535.
if color_mode == "rgb":
assert img_depth == 8, f"RGB images only support 8-bit depth, got {img_depth}, "
if override_mean:
mean = [0.485, 0.456, 0.406]
std = [0.224, 0.224, 0.224]
else:
mean = img_mean
std = img_std
elif color_mode == "grayscale":
if not img_mean:
mean = [0.449]
std = [0.224]
else:
assert len(img_mean) == 1, "image_mean must be a list of a single value \
for gray image input."
mean = img_mean
if img_std is not None:
assert len(img_std) == 1, "img_std must be a list of a single value \
for gray image input."
std = img_std
else:
std = None
else:
raise NotImplementedError(f"Invalid color mode: {color_mode}")
else:
if color_mode == "rgb":
assert img_depth == 8, f"RGB images only support 8-bit depth, got {img_depth}, "
if data_format == 'channels_first':
# 'RGB'->'BGR'
if x.ndim == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
if not img_mean:
mean = [103.939, 116.779, 123.68]
else:
assert len(img_mean) == 3, "image_mean must be a list of 3 values \
for RGB input."
mean = img_mean
std = None
else:
if not img_mean:
if img_depth == 8:
mean = [117.3786]
else:
# 117.3786 * 256
mean = [30048.9216]
else:
assert len(img_mean) == 1, "image_mean must be a list of a single value \
for gray image input."
mean = img_mean
std = None
# Zero-center by mean pixel
if data_format == 'channels_first':
for idx in range(len(mean)):
if x.ndim == 3:
x[idx, :, :] -= mean[idx]
if std is not None:
x[idx, :, :] /= std[idx]
else:
x[:, idx, :, :] -= mean[idx]
if std is not None:
x[:, idx, :, :] /= std[idx]
else:
for idx in range(len(mean)):
x[..., idx] -= mean[idx]
if std is not None:
x[..., idx] /= std[idx]
return x
def preprocess_input(x, data_format="channels_first", mode='caffe', color_mode="rgb", img_mean=None, img_std=None, img_depth=8, **kwargs):
"""Preprocesses a tensor or Numpy array encoding a batch of images.
# Arguments
x: Input Numpy or symbolic tensor, 3D or 4D.
The preprocessed data is written over the input data
if the data types are compatible. To avoid this
behaviour, `numpy.copy(x)` can be used.
data_format: Data format of the image tensor/array.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
# Returns
Preprocessed tensor or Numpy array.
# Raises
ValueError: In case of unknown `data_format` argument.
"""
return _preprocess_numpy_input(x, data_format=data_format,
mode=mode, color_mode=color_mode,
img_mean=img_mean,
img_std=img_std, img_depth=img_depth,
**kwargs)
| tao_deploy-main | nvidia_tao_deploy/inferencer/preprocess_input.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Inferencer"""
| tao_deploy-main | nvidia_tao_deploy/inferencer/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base utility functions for TensorRT inferencer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
class HostDeviceMem(object):
"""Clean data structure to handle host/device memory."""
def __init__(self, host_mem, device_mem, npshape, name: str = None):
"""Initialize a HostDeviceMem data structure.
Args:
host_mem (cuda.pagelocked_empty): A cuda.pagelocked_empty memory buffer.
device_mem (cuda.mem_alloc): Allocated memory pointer to the buffer in the GPU.
npshape (tuple): Shape of the input dimensions.
Returns:
HostDeviceMem instance.
"""
self.host = host_mem
self.device = device_mem
self.numpy_shape = npshape
self.name = name
def __str__(self):
"""String containing pointers to the TRT Memory."""
return "Name: " + self.name + "\nHost:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
"""Return the canonical string representation of the object."""
return self.__str__()
def do_inference(context, bindings, inputs,
outputs, stream, batch_size=1,
execute_v2=False, return_raw=False):
"""Generalization for multiple inputs/outputs.
inputs and outputs are expected to be lists of HostDeviceMem objects.
"""
# Transfer input data to the GPU.
for inp in inputs:
cuda.memcpy_htod_async(inp.device, inp.host, stream)
# Run inference.
if execute_v2:
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
else:
context.execute_async(batch_size=batch_size, bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
for out in outputs:
cuda.memcpy_dtoh_async(out.host, out.device, stream)
# Synchronize the stream
stream.synchronize()
if return_raw:
return outputs
# Return only the host outputs.
return [out.host for out in outputs]
def allocate_buffers(engine, context=None, reshape=False):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
context (trt.IExecutionContext): Context for dynamic shape engine
reshape (bool): To reshape host memory or not (FRCNN)
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32,
"BatchedNMS": np.int32, "BatchedNMS_1": np.float32,
"BatchedNMS_2": np.float32, "BatchedNMS_3": np.float32,
"generate_detections": np.float32,
"mask_head/mask_fcn_logits/BiasAdd": np.float32,
"softmax_1": np.float32,
"input_1": np.float32,
# D-DETR
"inputs": np.float32,
"pred_boxes": np.float32,
"pred_logits": np.float32}
for binding in engine:
binding_id = engine.get_binding_index(str(binding))
binding_name = engine.get_binding_name(binding_id)
if context:
size = trt.volume(context.get_binding_shape(binding_id))
dims = context.get_binding_shape(binding_id)
else:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dims = engine.get_binding_shape(binding)
# avoid error when bind to a number (YOLO BatchedNMS)
size = engine.max_batch_size if size == 0 else size
if str(binding) in binding_to_type:
dtype = binding_to_type[str(binding)]
else:
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
# FRCNN requires host memory to be reshaped into target shape
if reshape and not engine.binding_is_input(binding):
if engine.has_implicit_batch_dimension:
target_shape = (engine.max_batch_size, dims[0], dims[1], dims[2])
else:
target_shape = dims
host_mem = host_mem.reshape(*target_shape)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem, dims, name=binding_name))
else:
outputs.append(HostDeviceMem(host_mem, device_mem, dims, name=binding_name))
return inputs, outputs, bindings, stream
| tao_deploy-main | nvidia_tao_deploy/inferencer/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base TensorRT inferencer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC, abstractmethod
import tensorrt as trt
from PIL import ImageDraw
def load_engine(trt_runtime, engine_path):
"""Helper funtion to load an exported engine."""
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
class TRTInferencer(ABC):
"""Base TRT Inferencer."""
def __init__(self, engine_path):
"""Init.
Args:
engine_path (str): The path to the serialized engine to load from disk.
"""
# Load TRT engine
self.logger = trt.Logger(trt.Logger.WARNING)
trt.init_libnvinfer_plugins(self.logger, namespace="")
# Initialize runtime needed for loading TensorRT engine from file
self.trt_runtime = trt.Runtime(self.logger)
self.engine = load_engine(self.trt_runtime, engine_path)
self.context = self.engine.create_execution_context()
assert self.engine
assert self.context
@abstractmethod
def infer(self, imgs, scales=None):
"""Execute inference on a batch of images.
The images should already be batched and preprocessed.
Memory copying to and from the GPU device will be performed here.
Args:
imgs (np.ndarray): A numpy array holding the image batch.
scales: The image resize scales for each image in this batch.
Default: No scale postprocessing applied.
Returns:
A nested list for each image in the batch and each detection in the list.
"""
detections = {}
return detections
@abstractmethod
def __del__(self):
"""Simple function to destroy tensorrt handlers."""
def draw_bbox(self, img, prediction, class_mapping, threshold=0.3):
"""Draws bbox on image and dump prediction in KITTI format
Args:
img (numpy.ndarray): Preprocessed image
prediction (numpy.ndarray): (N x 6) predictions
class_mapping (dict): key is the class index and value is the class string.
If set to None, no class predictions are displayed
threshold (float): value to filter predictions
"""
draw = ImageDraw.Draw(img)
color_list = ['Black', 'Red', 'Blue', 'Gold', 'Purple']
label_strings = []
for i in prediction:
if class_mapping and int(i[0]) not in class_mapping:
continue
if float(i[1]) < threshold:
continue
if isinstance(class_mapping, dict):
cls_name = class_mapping[int(i[0])]
else:
cls_name = str(int(i[0]))
# Default format is xyxy
x1, y1, x2, y2 = float(i[2]), float(i[3]), float(i[4]), float(i[5])
draw.rectangle(((x1, y1), (x2, y2)),
outline=color_list[int(i[0]) % len(color_list)])
# txt pad
draw.rectangle(((x1, y1), (x1 + 75, y1 + 10)),
fill=color_list[int(i[0]) % len(color_list)])
if isinstance(class_mapping, dict):
draw.text((x1, y1), f"{cls_name}: {i[1]:.2f}")
else:
# If label_map is not provided, do not show class prediction
draw.text((x1, y1), f"{i[1]:.2f}")
# Dump predictions
label_head = cls_name + " 0.00 0 0.00 "
bbox_string = f"{x1:.3f} {y1:.3f} {x2:.3f} {y2:.3f}"
label_tail = f" 0.00 0.00 0.00 0.00 0.00 0.00 0.00 {float(i[1]):.3f}\n"
label_string = label_head + bbox_string + label_tail
label_strings.append(label_string)
return img, label_strings
| tao_deploy-main | nvidia_tao_deploy/inferencer/trt_inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy Engine"""
| tao_deploy-main | nvidia_tao_deploy/engine/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.