python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dependency module."""
# pylint: disable=R1705,R0911
import torch
import torch.nn as nn
import typing
from enum import IntEnum
from numbers import Number
import warnings
from . import helpers, functional
__all__ = ["PruningPlan", "Dependency", "DependencyGraph"]
# Standard Modules
TORCH_CONV = nn.modules.conv._ConvNd
TORCH_BATCHNORM = nn.modules.batchnorm._BatchNorm
TORCH_LAYERNORM = nn.modules.normalization.LayerNorm
TORCH_PRELU = nn.PReLU
TORCH_LINEAR = nn.Linear
TORCH_EMBED = nn.Embedding
try:
from nvidia_tao_pytorch.core.modules.activation.activation import MultiheadAttention
TORCH_MHA = MultiheadAttention
except Exception:
TORCH_MHA = helpers.DummyMHA # for pytorch w/o MultiHeadAttention
class OPTYPE(IntEnum):
CONV = 0
BN = 1
LINEAR = 2
PRELU = 3
GROUP_CONV = 4
CONCAT = 5 # torch.cat
SPLIT = 6 # torch.split
CUSTOMIZED = 7 # customized module
ELEMENTWISE = 8 # element-wise add, sub, etc.
LN = 9 # nn.LayerNorm
EMBED = 10 # nn.Embedding
PARAMETER = 11 # nn.Parameter
MHA = 12
def _module2type(module):
if isinstance(module, TORCH_CONV):
if module.groups > 1:
return OPTYPE.GROUP_CONV
else:
return OPTYPE.CONV
elif isinstance(module, TORCH_BATCHNORM):
return OPTYPE.BN
elif isinstance(module, TORCH_PRELU):
return OPTYPE.PRELU
elif isinstance(module, TORCH_LINEAR):
return OPTYPE.LINEAR
elif isinstance(module, helpers._ConcatOp):
return OPTYPE.CONCAT
elif isinstance(module, helpers._SplitOp):
return OPTYPE.SPLIT
elif isinstance(module, TORCH_LAYERNORM):
return OPTYPE.LN
elif isinstance(module, TORCH_EMBED):
return OPTYPE.EMBED
elif isinstance(module, helpers._CustomizedOp):
return OPTYPE.CUSTOMIZED
elif isinstance(module, torch.nn.Parameter):
return OPTYPE.PARAMETER
elif isinstance(module, TORCH_MHA):
return OPTYPE.MHA
else:
return OPTYPE.ELEMENTWISE
def _infer_out_dim_from_node(node):
if node.type == OPTYPE.CONV or node.type == OPTYPE.GROUP_CONV:
return node.module.out_channels
elif node.type == OPTYPE.BN:
return node.module.num_features
elif node.type == OPTYPE.LN:
return node.module.normalized_shape[functional.prune_layernorm.pruning_dim]
elif node.type == OPTYPE.LINEAR:
return node.module.out_features
elif node.type == OPTYPE.PRELU:
if node.module.num_parameters == 1:
return None # return None if oc can not be infered
else:
return node.module.num_parameters
elif node.type == OPTYPE.PARAMETER:
return node.module.shape[functional.prune_parameter.dim]
elif node.type == OPTYPE.CUSTOMIZED:
return node.customized_pruning_fn["get_out_ch_fn"](node.module)
elif node.type == OPTYPE.MHA:
return node.module.embed_dim
else:
return None # return None if oc can not be infered
def _infer_in_dim_from_node(node):
if node.type == OPTYPE.CONV or node.type == OPTYPE.GROUP_CONV:
return node.module.in_channels
elif node.type == OPTYPE.BN:
return node.module.num_features
elif node.type == OPTYPE.LN:
return node.module.normalized_shape[functional.prune_layernorm.pruning_dim]
elif node.type == OPTYPE.LINEAR:
return node.module.in_features
elif node.type == OPTYPE.PRELU:
if node.module.num_parameters == 1:
return None # return None if ic can not be infered
else:
return node.module.num_parameters
elif node.type == OPTYPE.PARAMETER:
return node.module.shape[functional.prune_parameter.dim]
elif node.type == OPTYPE.CUSTOMIZED:
return node.customized_pruning_fn["get_in_ch_fn"](node.module)
elif node.type == OPTYPE.MHA:
return node.module.embed_dim
else:
return None # return None if ic can not be infered
######################################################
# Dependency & DependecyGraph
class Node(object):
def __init__(self, module, grad_fn, name=None):
self.module = module
self.grad_fn = grad_fn
self.inputs = []
self.outputs = []
self.dependencies = []
self._name = name
self.type = _module2type(module)
self.enable_index_transform = True
@property
def name(self):
if self._name is None:
return str(self.module)
else:
fmt = self._name
if self.type != OPTYPE.PARAMETER:
fmt += " ({})".format(str(self.module))
return fmt
def add_input(self, node):
if node not in self.inputs:
self.inputs.append(node)
def add_output(self, node):
if node not in self.outputs:
self.outputs.append(node)
def __repr__(self):
return "<Node: ({})>".format(self.name)
def __str__(self):
return "<Node: ({})>".format(self.name)
def details(self):
fmt = "<Node: ({})>\n".format(self.name)
fmt += " " * 4 + "IN:\n"
for in_node in self.inputs:
fmt += " " * 8 + "{}\n".format(in_node)
fmt += " " * 4 + "OUT:\n"
for out_node in self.outputs:
fmt += " " * 8 + "{}\n".format(out_node)
fmt += " " * 4 + "DEP:\n"
for dep in self.dependencies:
fmt += " " * 8 + "{}\n".format(dep)
fmt += "\tEnable_index_transform={}\n".format(self.enable_index_transform)
return fmt
class Dependency(object):
"""Dependency class."""
def __init__(
self,
trigger,
handler,
source: Node,
target: Node,
index_transform: typing.Callable = None,
):
"""Layer dependency in structed neural network pruning.
Args:
trigger (Callable or None): a pruning function that breaks the dependency
handler (Callable): a pruning function to fix the broken dependency
target (nn.Module): the broken layer
index_transform (Callable): a function to transform the pruning index
"""
self.trigger = trigger
self.handler = handler
self.source = source
self.target = target
self.index_transform = index_transform
def __call__(self, idxs: list, dry_run: bool = False):
"""call function."""
result = self.handler(
self.target.module,
idxs,
dry_run=dry_run,
)
return result
def __repr__(self):
"""repr function."""
return str(self)
def __str__(self):
"""str function."""
return "[DEP] {} on {} => {} on {}".format(
"None" if self.trigger is None else self.trigger.__class__.__name__,
self.source.name,
self.handler.__class__.__name__,
self.target.name,
)
def is_triggered_by(self, pruning_fn):
"""return pruning fn."""
return pruning_fn == self.trigger
def __eq__(self, other):
"""eq function."""
return (
(self.trigger == other.trigger) and
self.handler == other.handler and
self.target == other.target
)
class PruningPlan(object):
"""Pruning plan.
Args:
dry_run (Callable or None): only return the info about pruning.
module_to_name (dict): mapping nn.module to a readable name. It will be filled by DependencyGraph.
"""
def __init__(self):
"""Initialize."""
self._plans = list()
self._metrics_scalar_sum = helpers.ScalarSum()
self._metrics_vector_sum = helpers.VectorSum()
def add_plan(self, dep, idxs):
"""Add plan."""
self._plans.append((dep, idxs))
def __getitem__(self, k):
"""getitem function."""
return self._plans[k]
@property
def plan(self):
"""return plan."""
return self._plans
def exec(self, dry_run=False):
"""exec."""
per_layer_metrics = []
for dep, idxs in self._plans:
_, metric_dict = dep(idxs, dry_run=dry_run)
per_layer_metrics.append(metric_dict)
return per_layer_metrics
def has_dep(self, dep):
"""Check if has dep."""
for _dep, _ in self._plans:
if dep == _dep:
return True
return False
def has_pruning_op(self, dep, idxs):
"""Check if has pruning op."""
for _dep, _idxs in self._plans:
if (
_dep.target == dep.target and
_dep.handler == dep.handler and
_idxs == idxs
):
return True
return False
def __len__(self):
"""len function."""
return len(self._plans)
def add_plan_and_merge(self, dep, idxs):
"""Add plan and merge."""
for i, (_dep, _idxs) in enumerate(self._plans):
if _dep.target == dep.target and _dep.handler == dep.handler:
self._plans[i] = (_dep, list(set(_idxs + idxs)))
return
self.add_plan(dep, idxs)
def __str__(self):
"""str function."""
fmt = ""
fmt += "\n" + "-" * 32 + "\n"
fmt += " " * 10 + "Pruning Plan"
fmt += "\n" + "-" * 32 + "\n"
self._metrics_scalar_sum.reset()
self._metrics_vector_sum.reset()
for i, (dep, idxs) in enumerate(self._plans):
_, metric_dict = dep(idxs, dry_run=True)
for k, v in metric_dict.items():
if helpers.is_scalar(v):
self._metrics_scalar_sum.update(k, v)
else:
self._metrics_vector_sum.update(k, v)
if i == 0:
fmt += "User pruning:\n"
fmt += "[ {}, Index={}, metric={}]\n".format(dep, idxs, metric_dict)
if i == 0:
fmt += "\nCoupled pruning:\n"
scalar_metric = self._metrics_scalar_sum.results()
vector_metric = self._metrics_vector_sum.results()
scalar_metric.update(vector_metric)
fmt += "\nMetric Sum: {}\n".format(scalar_metric)
fmt += "-" * 32 + "\n"
return fmt
class DependencyGraph(object):
"""DependencyGraph class."""
# can be updated by users
PRUNABLE_MODULES = [
TORCH_CONV,
TORCH_BATCHNORM,
TORCH_LINEAR,
TORCH_PRELU,
TORCH_LAYERNORM,
TORCH_EMBED,
TORCH_MHA,
]
PRUNING_FN = (
{ # functions that prune (1. input channels, 2. output channels)
OPTYPE.CONV: (
functional.prune_conv_in_channel,
functional.prune_conv_out_channel,
),
OPTYPE.BN: (functional.prune_batchnorm, functional.prune_batchnorm),
OPTYPE.PRELU: (functional.prune_prelu, functional.prune_prelu),
OPTYPE.LINEAR: (
functional.prune_linear_in_channel,
functional.prune_linear_out_channel,
),
OPTYPE.GROUP_CONV: (
functional.prune_group_conv,
functional.prune_group_conv,
),
OPTYPE.CONCAT: (helpers._prune_concat, helpers._prune_concat),
OPTYPE.SPLIT: (helpers._prune_split, helpers._prune_split),
OPTYPE.ELEMENTWISE: (
helpers._prune_elementwise_op,
helpers._prune_elementwise_op,
),
OPTYPE.LN: (functional.prune_layernorm, functional.prune_layernorm),
OPTYPE.EMBED: (functional.prune_embedding, functional.prune_embedding),
OPTYPE.PARAMETER: (functional.prune_parameter, functional.prune_parameter),
OPTYPE.MHA: (
functional.prune_multihead_attention,
functional.prune_multihead_attention,
),
OPTYPE.CUSTOMIZED: (None, None), # placeholder
}
)
RULES_FOR_SUCCEEDING_LAYERS = {}
RULES_FOR_PRECEDING_LAYERS = {}
for t1 in PRUNING_FN.keys():
for t2 in PRUNING_FN.keys():
RULES_FOR_SUCCEEDING_LAYERS[(t1, t2)] = (
PRUNING_FN[t1][1], # trigger
PRUNING_FN[t2][0], # handler
) # change in_channels of succeeding layers
RULES_FOR_PRECEDING_LAYERS[(t1, t2)] = (
PRUNING_FN[t1][0], # trigger
PRUNING_FN[t2][1], # handler
) # change out_channels of preceding layers
CUSTOMIZED_PRUNING_FN = {}
@property
def out_channel_pruners(self):
"""Out channel pruners."""
return [pruners[1] for pruners in self.PRUNING_FN.values() if pruners[1] is not None]
@property
def in_channel_pruners(self):
"""In channel pruners."""
return [pruners[0] for pruners in self.PRUNING_FN.values() if pruners[0] is not None]
def build_dependency(
self,
model: torch.nn.Module,
example_inputs: typing.Union[torch.Tensor, typing.Sequence],
output_transform: typing.Callable = None,
verbose: bool = True,
user_defined_parameters=None,
):
"""Build a dependency graph by tracing.
Args:
model (class): the model to be pruned.
example_inputs (torch.Tensor or List): dummy inputs for tracing.
output_transform (Callable): A function to transform network outputs.
verbose (Callable): verbose mode.
"""
self.verbose = verbose
self._module2name = {module: name for (name, module) in model.named_modules()}
# user-defined nn.Parameters like the learnable pos_emb in ViT
if user_defined_parameters is None:
user_defined_parameters = []
self.user_defined_parameters = user_defined_parameters
# build dependency graph by tracing
self.module2node = self._trace(
model, example_inputs, output_transform=output_transform
)
self._build_dependency(self.module2node)
self.update_index()
return self
def register_customized_layer(
self,
layer_type,
in_ch_pruning_fn,
out_ch_pruning_fn,
get_in_ch_fn,
get_out_ch_fn,
):
"""Register a customized layer for pruning.
Args:
layer_type (class): the type of layer
in_ch_pruning_fn (Callable): A function to prune channels/dimensions of input tensor
out_ch_pruning_fn (Callable): A function to prune channels/dimensions of output tensor
get_in_ch_fn (Callable): estimate the n_channel of layer input. Return None if the layer does not change tensor shape.
get_out_ch_fn (Callable):estimate the n_channel of layer output. Return None if the layer does not change tensor shape.
"""
self.CUSTOMIZED_PRUNING_FN[layer_type] = {
"in_ch_pruning_fn": in_ch_pruning_fn,
"out_ch_pruning_fn": out_ch_pruning_fn,
"get_in_ch_fn": get_in_ch_fn,
"get_out_ch_fn": get_out_ch_fn,
}
self.PRUNABLE_MODULES.append(layer_type)
def check_pruning_plan(self, plan):
"""Check pruning plan."""
for dep, idxs in plan.plan:
if dep.handler in (
functional.prune_conv_out_channel,
functional.prune_batchnorm,
functional.prune_linear_out_channel,
functional.prune_group_conv,
):
prunable_chs = count_prunable_out_channels(dep.target.module)
if prunable_chs <= len(idxs):
return False
if dep.handler in (
functional.prune_conv_in_channel,
functional.prune_linear_in_channel,
):
prunable_in_chs = count_prunable_in_channels(dep.target.module)
if prunable_in_chs <= len(idxs):
return False
return True
def get_pruning_plan(
self,
module: nn.Module,
pruning_fn: typing.Callable,
idxs: typing.Union[list, tuple],
):
"""Get a pruning plan from the dependency graph, according to user's pruning operations.
Args:
module (nn.Module): the module to be pruned.
pruning_fn (Callable): the pruning function.
idxs (list or tuple): the indices of paramters to be pruned.
"""
if isinstance(module, TORCH_CONV) and module.groups > 1:
pruning_fn = functional.prune_group_conv
if isinstance(idxs, Number):
idxs = [idxs]
self.update_index()
plan = PruningPlan()
# the user pruning operation
root_node = self.module2node[module]
plan.add_plan(
Dependency(pruning_fn, pruning_fn, source=root_node, target=root_node), idxs
)
visited = set()
def _fix_dependency_graph_non_recursive(node, fn, indices):
processing_stack = [(node, fn, indices)]
while len(processing_stack) > 0:
node, fn, indices = processing_stack.pop(-1)
# print(node in visited)
visited.add(node)
for dep in node.dependencies:
if dep.is_triggered_by(fn):
new_indices = (
dep.index_transform(indices)
if dep.index_transform is not None
else indices
)
if len(new_indices) == 0:
continue
if dep.target in visited and plan.has_pruning_op(
dep, new_indices
):
continue
else:
plan.add_plan(dep, new_indices)
processing_stack.append(
(dep.target, dep.handler, new_indices)
)
_fix_dependency_graph_non_recursive(root_node, pruning_fn, idxs)
# merge pruning ops
merged_plan = PruningPlan()
for dep, idxss in plan.plan:
merged_plan.add_plan_and_merge(dep, idxss)
return merged_plan
def _build_dependency(self, module2node):
for _, node in module2node.items():
for in_node in node.inputs:
preceding_rule = self.RULES_FOR_PRECEDING_LAYERS.get(
(node.type, in_node.type), None
)
if preceding_rule is not None:
trigger = preceding_rule[0]
handler = preceding_rule[1]
if trigger is None:
trigger = self.CUSTOMIZED_PRUNING_FN[type(node.module)][
"in_ch_pruning_fn"
]
if handler is None:
handler = self.CUSTOMIZED_PRUNING_FN[type(in_node.module)][
"out_ch_pruning_fn"
]
dep = Dependency(
trigger=trigger, handler=handler, source=node, target=in_node
)
node.dependencies.append(dep)
for out_node in node.outputs:
succeeding_rule = self.RULES_FOR_SUCCEEDING_LAYERS.get(
(node.type, out_node.type), None
)
if succeeding_rule is not None:
trigger = succeeding_rule[0]
handler = succeeding_rule[1]
if trigger is None:
trigger = self.CUSTOMIZED_PRUNING_FN[type(node.module)][
"out_ch_pruning_fn"
]
if handler is None:
handler = self.CUSTOMIZED_PRUNING_FN[type(out_node.module)][
"in_ch_pruning_fn"
]
dep = Dependency(
trigger=trigger, handler=handler, source=node, target=out_node
)
node.dependencies.append(dep)
def _trace(self, model, example_inputs, output_transform):
model.eval()
gradfn2module = {}
visited = {}
def _record_grad_fn(module, inputs, outputs):
if module not in visited:
visited[module] = 1
else:
visited[module] += 1
if isinstance(outputs, tuple):
outputs = outputs[0]
gradfn2module[outputs.grad_fn] = module
hooks = [
m.register_forward_hook(_record_grad_fn)
for m in model.modules()
if isinstance(m, tuple(self.PRUNABLE_MODULES))
]
# Feed forward and record gradient functions of prunable modules
if isinstance(example_inputs, (tuple, list)):
out = model(*example_inputs)
elif isinstance(example_inputs, dict):
out = model(**example_inputs)
elif isinstance(example_inputs, torch.Tensor):
out = model(example_inputs)
for hook in hooks:
hook.remove()
# for recursive models or layers
reused = [m for (m, count) in visited.items() if count > 1]
# build graph
if output_transform is not None:
out = output_transform(out)
module2node = {}
for o in flatten_as_list(out):
self._build_graph(module2node, o.grad_fn, gradfn2module, reused)
# BUG: Special case for torch.cat in ViT,
# where concatination is not applied to feature dims.
# Notably, this is a bad practice and will be fixed in the future version
# Some problems may occurs if your vision transform has a lot of complicated torch.cat.
if len(self.user_defined_parameters) > 0:
for node in module2node.values():
if node.type in (OPTYPE.CONCAT, OPTYPE.SPLIT):
stack = [node]
while len(stack) > 0:
n = stack.pop(-1)
if n.type == OPTYPE.PARAMETER and len(n.module.shape) == 3:
node.enable_index_transform = False
break
else:
stack.extend(n.inputs)
return module2node
def _build_graph(self, module2node, grad_fn_root, gradfn2module, reused):
def create_node_if_not_exists(grad_fn):
module = gradfn2module.get(grad_fn, None)
if module is not None and module in module2node and module not in reused:
return module2node[module]
if module is None: # unseen modules
if not hasattr(grad_fn, "name"):
# we treat unknwon modules as element-wise modules
module = helpers._ElementWiseOp("Unknown")
if self.verbose:
warnings.warn(
"[Warning] Unrecognized operation {} will be treated as an element-wise op".format(str(grad_fn))
)
elif "catbackward" in grad_fn.name().lower(): # concat op
module = helpers._ConcatOp()
elif "split" in grad_fn.name().lower():
module = helpers._SplitOp()
else:
# treate other ops as element-wise ones
module = helpers._ElementWiseOp(grad_fn.name())
gradfn2module[grad_fn] = module
if module not in module2node: # create nodes
node = Node(
module=module,
grad_fn=grad_fn,
name=self._module2name.get(module, None),
)
if (
type(module) in self.CUSTOMIZED_PRUNING_FN.keys()
): # mark it as a customized OP
node.type = OPTYPE.CUSTOMIZED
node.customized_pruning_fn = self.CUSTOMIZED_PRUNING_FN[
type(module)
]
module2node[module] = node
else:
node = module2node[module]
return node
# non-recursive graph construction
processing_stack = [grad_fn_root]
visited = set()
while len(processing_stack) > 0:
grad_fn = processing_stack.pop(-1)
if grad_fn in visited:
continue
node = create_node_if_not_exists(grad_fn=grad_fn)
if hasattr(grad_fn, "next_functions"):
for f in grad_fn.next_functions:
if f[0] is not None:
if (
hasattr(f[0], "name") and
"accumulategrad" in f[0].name().lower()
):
is_user_defined_param = False
for (j, p) in enumerate(self.user_defined_parameters):
if f[0].variable is p:
is_user_defined_param = True
gradfn2module[f[0]] = p
self._module2name[p] = "UserParameter_{}".format(j)
if not is_user_defined_param:
continue
input_node = create_node_if_not_exists(f[0])
node.add_input(input_node)
input_node.add_output(node)
processing_stack.append(f[0])
visited.add(grad_fn)
return module2node
def update_index(self):
"""Update index."""
for _, node in self.module2node.items():
if node.type == OPTYPE.LINEAR:
self._set_fc_index_transform(node)
if node.type == OPTYPE.CONCAT:
self._set_concat_index_transform(node)
if node.type == OPTYPE.SPLIT:
self._set_split_index_transform(node)
def _set_fc_index_transform(self, fc_node: Node):
if fc_node.type != OPTYPE.LINEAR:
return
fc_in_features = fc_node.module.in_features
feature_channels = 0
for n in fc_node.inputs:
feature_channels = _infer_out_dim_from_node_by_recursion(n)
if feature_channels > 0:
break
if (
feature_channels <= 0
): # the first layer: https://github.com/VainF/Torch-Pruning/issues/21
return
stride = fc_in_features // feature_channels
if stride > 1 and fc_in_features % feature_channels == 0:
for in_node in fc_node.inputs:
for dep in fc_node.dependencies:
if dep.target == in_node:
dep.index_transform = helpers._FlattenIndexTransform(
stride=stride, reverse=True
)
for dep in in_node.dependencies:
if dep.target == fc_node:
dep.index_transform = helpers._FlattenIndexTransform(
stride=stride, reverse=False
)
def _set_concat_index_transform(self, cat_node: Node):
if cat_node.type != OPTYPE.CONCAT:
return
chs = []
for n in cat_node.inputs:
chs.append(_infer_out_dim_from_node_by_recursion(n))
offsets = [0]
for ch in chs:
offsets.append(offsets[-1] + ch)
cat_node.module.offsets = offsets
# no transform if the concat dim is different from the feature dim
for i, in_node in enumerate(cat_node.inputs):
for dep in cat_node.dependencies:
if dep.target == in_node:
if cat_node.enable_index_transform:
dep.index_transform = helpers._ConcatIndexTransform(
offset=offsets[i: i + 2], reverse=True
)
for dep in in_node.dependencies:
if dep.target == cat_node:
if cat_node.enable_index_transform:
dep.index_transform = helpers._ConcatIndexTransform(
offset=offsets[i: i + 2], reverse=False
)
def _set_split_index_transform(self, split_node: Node):
if split_node.type != OPTYPE.SPLIT:
return
chs = []
for n in split_node.outputs:
chs.append(_infer_in_dim_from_node_by_recursion(n))
offsets = [0]
for ch in chs:
offsets.append(offsets[-1] + ch)
split_node.module.offsets = offsets
for i, out_node in enumerate(split_node.outputs):
for dep in split_node.dependencies:
if dep.target == out_node:
if split_node.enable_index_transform:
dep.index_transform = helpers._SplitIndexTransform(
offset=offsets[i: i + 2], reverse=False
)
for dep in out_node.dependencies:
if dep.target == split_node:
if split_node.enable_index_transform:
dep.index_transform = helpers._SplitIndexTransform(
offset=offsets[i: i + 2], reverse=True
)
def _infer_out_dim_from_node_by_recursion(node):
ch = _infer_out_dim_from_node(node)
if ch is None:
ch = 0
for in_node in node.inputs:
if node.type == OPTYPE.CONCAT:
ch += _infer_out_dim_from_node_by_recursion(in_node)
else:
ch = _infer_out_dim_from_node_by_recursion(in_node)
return ch
def _infer_in_dim_from_node_by_recursion(node):
ch = _infer_in_dim_from_node(node)
if ch is None:
ch = 0
for out_node in node.outputs:
if node.type == OPTYPE.SPLIT:
ch += _infer_in_dim_from_node_by_recursion(out_node)
else:
ch = _infer_in_dim_from_node_by_recursion(out_node)
return ch
def flatten_as_list(obj):
if isinstance(obj, torch.Tensor):
return [obj]
elif isinstance(obj, (list, tuple)):
flattened_list = []
for sub_obj in obj:
flattened_list.extend(flatten_as_list(sub_obj))
return flattened_list
elif isinstance(obj, dict):
flattened_list = []
for sub_obj in obj.values():
flattened_list.extend(flatten_as_list(sub_obj))
return flattened_list
else:
return obj
def count_prunable_out_channels(module):
if isinstance(module, TORCH_CONV):
return module.weight.shape[0]
elif isinstance(module, TORCH_LINEAR):
return module.out_features
elif isinstance(module, TORCH_BATCHNORM):
return module.num_features
elif isinstance(module, TORCH_PRELU):
if len(module.weight) == 1:
return 0
else:
return len(module.weight)
else:
return 0
def count_prunable_in_channels(module):
if isinstance(module, TORCH_CONV):
return module.weight.shape[1]
elif isinstance(module, TORCH_LINEAR):
return module.in_features
elif isinstance(module, TORCH_BATCHNORM):
return module.num_features
elif isinstance(module, TORCH_PRELU):
if len(module.weight) == 1:
return 0
else:
return len(module.weight)
else:
return 0
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/dependency.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper module."""
# pylint: disable=W0612,W0235
import torch
import torch.nn as nn
from . import functional
import numpy as np
from operator import add
from numbers import Number
def is_scalar(x):
"""Check if is scalar."""
if isinstance(x, torch.Tensor):
return len(x.shape) == 0
if isinstance(x, Number):
return True
if isinstance(x, (list, tuple)):
return False
return False
class _CustomizedOp(nn.Module):
def __init__(self, op_class):
self.op_cls = op_class
def __repr__(self):
return "CustomizedOp({})".format(str(self.op_cls))
######################################################
# Dummy module
class _ConcatOp(nn.Module):
def __init__(self):
super(_ConcatOp, self).__init__()
self.offsets = None
def __repr__(self):
return "_ConcatOp({})".format(self.offsets)
class DummyMHA(nn.Module):
"""DummyMHA class."""
def __init__(self):
"""Initialize."""
super(DummyMHA, self).__init__()
class _SplitOp(nn.Module):
def __init__(self):
super(_SplitOp, self).__init__()
self.offsets = None
def __repr__(self):
return "_SplitOp({})".format(self.offsets)
class _ElementWiseOp(nn.Module):
def __init__(self, grad_fn):
super(_ElementWiseOp, self).__init__()
self._grad_fn = grad_fn
def __repr__(self):
return "_ElementWiseOp({})".format(self._grad_fn)
######################################################
# Dummy Pruning fn
class DummyPruner(functional.BasePruner):
"""Dummy pruning class."""
def __call__(self, layer, *args, **kargs):
"""Call function."""
return layer, {}
def calc_nparams_to_prune(self, layer, idxs):
"""Calculate nparams to prune."""
return 0
def prune(self, layer, idxs):
"""Pruning."""
return layer
class ConcatPruner(DummyPruner):
"""ConcatPruner class."""
pass
class SplitPruner(DummyPruner):
"""SplitPruner class."""
pass
class ElementWiseOpPruner(DummyPruner):
"""ElementWiseOp Pruner class."""
pass
_prune_concat = ConcatPruner()
_prune_split = SplitPruner()
_prune_elementwise_op = ElementWiseOpPruner()
######################################################
# Index transform
class _FlattenIndexTransform(object):
def __init__(self, stride=1, reverse=False):
self._stride = stride
self.reverse = reverse
def __call__(self, idxs):
new_idxs = []
if self.reverse is True:
for i in idxs:
new_idxs.append(i // self._stride)
new_idxs = list(set(new_idxs))
else:
for i in idxs:
new_idxs.extend(list(range(i * self._stride, (i + 1) * self._stride)))
return new_idxs
class _ConcatIndexTransform(object):
def __init__(self, offset, reverse=False):
self.offset = offset
self.reverse = reverse
def __call__(self, idxs):
if self.reverse is True:
new_idxs = [
i - self.offset[0]
for i in idxs
if (self.offset[0] <= i < self.offset[1])
]
else:
new_idxs = [i + self.offset[0] for i in idxs]
return new_idxs
class _SplitIndexTransform(object):
def __init__(self, offset, reverse=False):
self.offset = offset
self.reverse = reverse
def __call__(self, idxs):
if self.reverse is True:
new_idxs = [i + self.offset[0] for i in idxs]
else:
new_idxs = [
i - self.offset[0]
for i in idxs
if (self.offset[0] <= i < self.offset[1])
]
return new_idxs
class _GroupConvIndexTransform(object):
def __init__(self, in_channels, out_channels, groups, reverse=False):
self.in_channels = in_channels
self.out_channels = out_channels
self.groups = groups
self.reverse = reverse
def __call__(self, idxs):
if self.reverse is True:
new_idxs = [i + self.offset[0] for i in idxs]
else:
group_histgram = np.histogram( # noqa: F841
idxs, bins=self.groups, range=(0, self.out_channels)
)
return new_idxs
class GConv(nn.Module):
"""GConv class."""
def __init__(self, gconv):
"""Initialize."""
super(GConv, self).__init__()
self.groups = gconv.groups
self.convs = nn.ModuleList()
oc_size = gconv.out_channels // self.groups
ic_size = gconv.in_channels // self.groups
for _ in range(self.groups):
self.convs.append(
nn.Conv2d(
in_channels=oc_size,
out_channels=ic_size,
kernel_size=gconv.kernel_size,
stride=gconv.stride,
padding=gconv.padding,
dilation=gconv.dilation,
groups=1,
bias=gconv.bias is not None,
padding_mode=gconv.padding_mode,
)
)
# copy parameters
gconv_weight = gconv.weight
for (i, conv) in enumerate(self.convs):
conv.weight.data = gconv_weight.data[oc_size * i: oc_size * (i + 1)]
if gconv.bias is not None:
conv.bias.data = gconv.bias.data[oc_size * i: oc_size * (i + 1)]
def forward(self, x):
"""Forward."""
split_sizes = [conv.in_channels for conv in self.convs]
xs = torch.split(x, split_sizes, dim=1)
out = torch.cat([conv(xi) for (conv, xi) in zip(self.convs, xs)], dim=1)
return out
def gconv2convs(module):
"""GConv to convs."""
new_module = module
if (
isinstance(module, nn.Conv2d) and
module.groups > 1 and
module.groups != module.in_channels
):
new_module = GConv(module)
for name, child in module.named_children():
new_module.add_module(name, gconv2convs(child))
return new_module
class ScalarSum:
"""ScalarSum class."""
def __init__(self):
"""Initialize."""
self._results = {}
def update(self, metric_name, metric_value):
"""Update."""
if metric_name not in self._results:
self._results[metric_name] = 0
self._results[metric_name] += metric_value
def results(self):
"""Return results."""
return self._results
def reset(self):
"""Reset."""
self._results = {}
class VectorSum:
"""VectorSum class."""
def __init__(self):
"""Initialize."""
self._results = {}
def update(self, metric_name, metric_value):
"""Update."""
if metric_name not in self._results:
self._results[metric_name] = metric_value
if isinstance(metric_value, torch.Tensor):
self._results[metric_name] += metric_value
elif isinstance(metric_value, list):
self._results[metric_name] = list(
map(add, self._results[metric_name], metric_value)
)
def results(self):
"""Return results."""
return self._results
def reset(self):
"""Reset."""
self._results = {}
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""magnitude based pruner module."""
from .basepruner import LocalPruner, GlobalPruner
class LocalMagnitudePruner(LocalPruner):
"""Local Magnitude Pruner class."""
pass
class GlobalMagnitudePruner(GlobalPruner):
"""Global Magnitude Pruner class."""
pass
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/pruner/magnitude_based_pruner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""basepruner module."""
from .. import dependency, functional, utils
import abc
import torch
def linear_scheduler(layer_ch_sparsity, steps):
"""linear scheduler."""
return [((i + 1) / float(steps)) * layer_ch_sparsity for i in range(steps)]
class MetaPruner(abc.ABC):
"""Meta pruner class."""
def __init__(
self,
model,
example_inputs,
importance,
total_steps=1,
pruning_rate_scheduler=linear_scheduler,
ch_sparsity=0.5,
layer_ch_sparsity=None,
ignored_layers=None,
user_defined_parameters=None,
output_transform=None,
round_to=None,
):
"""Initialize."""
self.model = model
self.importance = importance
self.ch_sparsity = ch_sparsity
self.round_to = round_to
self.layer_ch_sparsity = layer_ch_sparsity if layer_ch_sparsity is not None else {}
self.DG = dependency.DependencyGraph().build_dependency(
model,
example_inputs=example_inputs,
output_transform=output_transform,
user_defined_parameters=user_defined_parameters,
)
if ignored_layers is None:
ignored_layers = []
self.ignored_layers = ignored_layers
self.total_steps = total_steps
self.current_step = 0
self.pruning_rate_scheduler = pruning_rate_scheduler
self.layer_init_out_ch = {}
self.layer_init_in_ch = {}
self.per_step_ch_sparsity = {}
for m in self.model.modules():
if isinstance(m, (dependency.TORCH_CONV, dependency.TORCH_LINEAR)):
self.layer_init_out_ch[m] = utils.count_prunable_out_channels(m)
self.layer_init_in_ch[m] = utils.count_prunable_in_channels(m)
self.per_step_ch_sparsity[m] = self.pruning_rate_scheduler(
self.ch_sparsity, self.total_steps
)
# override
for m in self.model.modules():
if m in self.layer_ch_sparsity:
sublayer_ch_sparsity = self.layer_ch_sparsity[m]
for mi in m.modules():
if isinstance(mi, (dependency.TORCH_CONV, dependency.TORCH_LINEAR)):
self.per_step_ch_sparsity[mi] = self.pruning_rate_scheduler(
sublayer_ch_sparsity, self.total_steps
)
def reset(self):
"""reset."""
self.current_step = 0
def regularize(self, model):
"""regularize."""
pass
def get_all_plans(self):
"""get all plans."""
visited_layers = []
for m in self.model.modules():
if m in self.ignored_layers:
continue
if isinstance(m, dependency.TORCH_CONV):
pruning_fn = functional.prune_conv_out_channel
elif isinstance(m, dependency.TORCH_LINEAR):
pruning_fn = functional.prune_linear_out_channel
else:
continue
if m in visited_layers and pruning_fn in self.DG.out_channel_pruners:
continue
layer_channels = utils.count_prunable_out_channels(m)
plan = self.DG.get_pruning_plan(
m, pruning_fn, list(range(layer_channels))
)
active_plan = True
for dep, _ in plan:
module = dep.target.module
pruning_fn = dep.handler
if pruning_fn in self.DG.out_channel_pruners:
visited_layers.append(module)
if module in self.ignored_layers:
active_plan = False
if active_plan:
yield plan
@abc.abstractclassmethod
def step(cls):
"""step."""
pass
def estimate_importance(self, plan):
"""estimate importance."""
return self.importance(plan)
class LocalPruner(MetaPruner):
"""Local pruner class."""
def step(self):
"""step."""
if self.current_step == self.total_steps:
return
for plan in self.get_all_plans():
# check pruning rate
if self._is_valid(plan):
module = plan[0][0].target.module
pruning_fn = plan[0][0].handler
imp = self.estimate_importance(plan)
current_channels = utils.count_prunable_out_channels(module)
layer_step_ch_sparsity = self.per_step_ch_sparsity[module][self.current_step]
n_pruned = current_channels - int(
self.layer_init_out_ch[module] * (1 - layer_step_ch_sparsity)
)
if self.round_to:
n_pruned = n_pruned % self.round_to * self.round_to
imp_argsort = torch.argsort(imp)
pruning_idxs = imp_argsort[:n_pruned].tolist()
plan = self.DG.get_pruning_plan(module, pruning_fn, pruning_idxs)
if self.DG.check_pruning_plan(plan):
plan.exec()
self.current_step += 1
def _is_valid(self, plan):
for dep, _ in plan:
if dep.target.module in self.per_step_ch_sparsity:
if dep.handler in [
functional.prune_conv_out_channel,
functional.prune_linear_out_channel,
]:
layer_step_ch_sparsity = self.per_step_ch_sparsity[dep.target.module][self.current_step]
layer_channels = utils.count_prunable_out_channels(dep.target.module)
if layer_channels <= self.layer_init_out_ch[dep.target.module] * (1 - layer_step_ch_sparsity):
return False
elif dep.handler in [
functional.prune_conv_in_channel,
functional.prune_linear_in_channel,
]:
layer_step_ch_sparsity = self.per_step_ch_sparsity[dep.target.module][self.current_step]
layer_channels = utils.count_prunable_in_channels(dep.target.module)
if layer_channels <= self.layer_init_in_ch[dep.target.module] * (1 - layer_step_ch_sparsity):
return False
return True
class GlobalPruner(MetaPruner):
"""Global pruner class."""
def __init__(
self,
model,
example_inputs,
importance,
total_steps=1,
pruning_rate_scheduler=linear_scheduler,
ch_sparsity=0.5,
max_ch_sparsity=1.0,
layer_ch_sparsity=None,
round_to=None,
ignored_layers=None,
user_defined_parameters=None,
output_transform=None,
):
"""Initialize."""
super(GlobalPruner, self).__init__(
model=model,
example_inputs=example_inputs,
importance=importance,
total_steps=total_steps,
pruning_rate_scheduler=pruning_rate_scheduler,
ch_sparsity=ch_sparsity,
layer_ch_sparsity=layer_ch_sparsity,
round_to=round_to,
ignored_layers=ignored_layers,
user_defined_parameters=user_defined_parameters,
output_transform=output_transform,
)
self.max_ch_sparsity = max_ch_sparsity
# global channel sparsity shared by all layers
self.per_step_ch_sparsity = self.pruning_rate_scheduler(
self.ch_sparsity, self.total_steps
)
self.current_step = 0
def reset(self):
"""reset."""
self.current_step = 0
def regularize(self, model):
"""regularize."""
pass
def step(self):
"""step."""
if self.current_step == self.total_steps:
return
global_importance = []
for plan in self.get_all_plans():
imp = self.estimate_importance(plan)
global_importance.append((plan, imp))
# get pruning threshold by ranking
imp = torch.cat([local_imp[-1] for local_imp in global_importance], dim=0)
topk_imp, _ = torch.topk(imp, k=int(len(imp) * self.ch_sparsity))
thres = topk_imp[-1]
for plan, imp in global_importance:
module = plan[0][0].target.module
pruning_fn = plan[0][0].handler
pruning_indices = (imp < thres).nonzero().view(-1).tolist()
plan = self.DG.get_pruning_plan(module, pruning_fn, pruning_indices)
n_prune = self._adjust_sparsity(plan)
if n_prune < len(pruning_indices):
pruning_indices = pruning_indices[:n_prune]
plan = self.DG.get_pruning_plan(module, pruning_fn, pruning_indices)
if self.DG.check_pruning_plan(plan):
plan.exec()
self.current_step += 1
def _adjust_sparsity(self, plan):
new_idxs = plan[0][1]
n_prune = len(new_idxs)
for _, (dep, idxs) in enumerate(plan):
module = dep.target.module
if dep.handler in [
functional.prune_conv_out_channel,
functional.prune_linear_out_channel,
]:
max_ch_sparsity = self.layer_ch_sparsity.get(module, self.max_ch_sparsity)
min_layer_channels = self.layer_init_out_ch[module] * (1 - max_ch_sparsity)
layer_channels = utils.count_prunable_out_channels(module)
if len(idxs) <= int(layer_channels - min_layer_channels):
continue
else:
n_prune = int(layer_channels - min_layer_channels)
elif dep.handler in [
functional.prune_conv_in_channel,
functional.prune_linear_in_channel,
]:
max_ch_sparsity = self.layer_ch_sparsity.get(module, self.max_ch_sparsity)
min_layer_channels = self.layer_init_in_ch[module] * (1 - max_ch_sparsity)
layer_channels = utils.count_prunable_in_channels(module)
if len(idxs) <= int(layer_channels - min_layer_channels):
continue
else:
n_prune = int(layer_channels - min_layer_channels)
return n_prune
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/pruner/basepruner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initial module."""
# flake8: noqa: F401, F403
from .basepruner import LocalPruner, GlobalPruner
from .magnitude_based_pruner import LocalMagnitudePruner, GlobalMagnitudePruner
from .batchnorm_scale_pruner import LocalBNScalePruner, GlobalBNScalePruner
from .structural_reg_pruner import LocalStructrualRegularizedPruner
from .structural_dropout_pruner import StructrualDropoutPruner
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/pruner/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Batchnorm scale pruner moduel."""
from typing import Callable
from .basepruner import LocalPruner, GlobalPruner
import torch
import torch.nn as nn
class LocalBNScalePruner(LocalPruner):
"""Local BN Scale Pruner class."""
def __init__(
self,
model,
example_inputs,
importance,
beta=1e-5,
total_steps=1,
pruning_rate_scheduler: Callable = None,
ch_sparsity=0.5,
layer_ch_sparsity=None,
round_to=None,
ignored_layers=None,
user_defined_parameters=None,
output_transform=None,
):
"""Initialize."""
super(LocalBNScalePruner, self).__init__(
model=model,
example_inputs=example_inputs,
importance=importance,
total_steps=total_steps,
pruning_rate_scheduler=pruning_rate_scheduler,
ch_sparsity=ch_sparsity,
layer_ch_sparsity=layer_ch_sparsity,
round_to=round_to,
ignored_layers=ignored_layers,
user_defined_parameters=user_defined_parameters,
output_transform=output_transform,
)
self.beta = beta
def regularize(self, model):
"""regularize."""
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.grad.data.add_(self.beta * torch.sign(m.weight.data))
class GlobalBNScalePruner(GlobalPruner):
"""Global BN Scale Pruner class."""
def __init__(
self,
model,
example_inputs,
importance,
beta=1e-5,
total_steps=1,
pruning_rate_scheduler: Callable = None,
ch_sparsity=0.5,
max_ch_sparsity=1.0,
layer_ch_sparsity=None,
round_to=None,
ignored_layers=None,
user_defined_parameters=None,
output_transform=None,
):
"""Initialize."""
super(GlobalBNScalePruner, self).__init__(
model=model,
example_inputs=example_inputs,
importance=importance,
total_steps=total_steps,
pruning_rate_scheduler=pruning_rate_scheduler,
ch_sparsity=ch_sparsity,
max_ch_sparsity=max_ch_sparsity,
layer_ch_sparsity=layer_ch_sparsity,
round_to=round_to,
ignored_layers=ignored_layers,
user_defined_parameters=user_defined_parameters,
output_transform=output_transform,
)
self.beta = beta
def regularize(self, model):
"""regularize."""
for m in model.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.grad.data.add_(self.beta * torch.sign(m.weight.data))
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/pruner/batchnorm_scale_pruner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Structural dropout pruner."""
# pylint: disable=W0622
from typing import Callable
from .basepruner import LocalPruner
import torch
import torch.nn as nn
def imp_to_prob(x, scale=1.0):
"""Importance to prob."""
return torch.nn.functional.sigmoid((x - x.mean()) / (x.std() + 1e-8) * scale)
class StructrualDropout(nn.Module):
"""Structual Dropout class."""
def __init__(self, p):
"""Initialize."""
super(StructrualDropout, self).__init__()
self.p = p
self.mask = None
def forward(self, x):
"""Forward."""
C = x.shape[1]
if self.mask is None:
self.mask = (torch.cuda.FloatTensor(C, device=x.device).uniform_() > self.p).view(1, -1, 1, 1)
res = x * self.mask
return res
def reset(self, p):
"""Reset."""
self.p = p
self.mask = None
class StructrualDropoutPruner(LocalPruner):
"""Structual Dropout Pruner class."""
def __init__(
self,
model,
example_inputs,
importance,
total_steps=1,
p=0.1,
pruning_rate_scheduler: Callable = None,
ch_sparsity=0.5,
layer_ch_sparsity=None,
round_to=None,
ignored_layers=None,
user_defined_parameters=None,
output_transform=None,
):
"""Initialize."""
super(StructrualDropoutPruner, self).__init__(
model=model,
example_inputs=example_inputs,
total_steps=total_steps,
pruning_rate_scheduler=pruning_rate_scheduler,
ch_sparsity=ch_sparsity,
layer_ch_sparsity=layer_ch_sparsity,
round_to=round_to,
ignored_layers=ignored_layers,
user_defined_parameters=user_defined_parameters,
output_transform=output_transform,
)
self.importance = importance
self.module2dropout = {}
self.p = p
self.plans = self.get_all_plans()
def estimate_importance(self, plan):
"""Estimate importance."""
return self.importance(plan)
def structrual_dropout(self, module, input, output):
"""Structrual Dropout."""
return self.module2dropout[module][0](output)
def regularize(self, model):
"""Regularize."""
pass
def register_structural_dropout(self, module):
"""Register Structural Dropout."""
for plan in self.plans:
dropout_layer = StructrualDropout(p=self.p)
for dep, _ in plan:
module = dep.target.module
if self.ignored_layers is not None and module in self.ignored_layers:
continue
if module in self.module2dropout:
continue
if dep.handler not in self.DG.out_channel_pruners:
continue
hook = module.register_forward_hook(self.structrual_dropout)
self.module2dropout[module] = (dropout_layer, hook)
def remove_structural_dropout(self):
"""Remove Structural Dropout."""
for __, (_, hook) in self.module2dropout.items():
hook.remove()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/pruner/structural_dropout_pruner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""structural regularized pruner module."""
# pylint: disable=W0622
from .. import functional
from typing import Callable
from .basepruner import LocalPruner
import torch
class LocalStructrualRegularizedPruner(LocalPruner):
"""Local Structrual Regularized Pruner class."""
def __init__(
self,
model,
example_inputs,
importance,
total_steps=1,
beta=1e-4,
pruning_rate_scheduler: Callable = None,
ch_sparsity=0.5,
layer_ch_sparsity=None,
round_to=None,
ignored_layers=None,
user_defined_parameters=None,
output_transform=None,
):
"""Initialize."""
super(LocalStructrualRegularizedPruner, self).__init__(
model=model,
example_inputs=example_inputs,
total_steps=total_steps,
pruning_rate_scheduler=pruning_rate_scheduler,
ch_sparsity=ch_sparsity,
layer_ch_sparsity=layer_ch_sparsity,
round_to=round_to,
ignored_layers=ignored_layers,
user_defined_parameters=user_defined_parameters,
output_transform=output_transform,
)
self.importance = importance
self.dropout_groups = {}
self.beta = beta
self.plans = self.get_all_plans()
def estimate_importance(self, plan):
"""estimate importance."""
return self.importance(plan)
def structrual_dropout(self, module, input, output):
"""structrual dropout."""
return self.dropout_groups[module][0](output)
def regularize(self, model):
"""regularize."""
for plan in self.plans:
for dep, _ in plan:
layer = dep.target.module
prune_fn = dep.handler
if prune_fn in [
functional.prune_conv_out_channel,
functional.prune_linear_out_channel,
]:
# regularize output channels
layer.weight.grad.data.add_(self.beta * torch.sign(layer.weight.data))
elif prune_fn in [
functional.prune_conv_in_channel,
functional.prune_linear_in_channel,
]:
# regularize input channels
layer.weight.grad.data.add_(self.beta * torch.sign(layer.weight.data))
elif prune_fn == functional.prune_batchnorm:
# regularize BN
if layer.affine is not None:
layer.weight.grad.data.add_(self.beta * torch.sign(layer.weight.data))
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/pruner/structural_reg_pruner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""unstructured model."""
# pylint: disable=W0622
import torch
from copy import deepcopy
__all__ = ['mask_weight', 'mask_bias']
def _mask_weight_hook(module, input):
if hasattr(module, 'weight_mask'):
module.weight.data *= module.weight_mask
def _mask_bias_hook(module, input):
if module.bias is not None and hasattr(module, 'bias_mask'):
module.bias.data *= module.bias_mask
def mask_weight(layer, mask, inplace=True):
"""Unstructed pruning for convolution layer
Args:
layer: a convolution layer.
mask: 0-1 mask.
"""
if not inplace:
layer = deepcopy(layer)
if mask.shape != layer.weight.shape:
return layer
mask = torch.tensor(mask, dtype=layer.weight.dtype, device=layer.weight.device, requires_grad=False)
if hasattr(layer, 'weight_mask'):
mask = mask + layer.weight_mask
mask[mask > 0] = 1
layer.weight_mask = mask
else:
layer.register_buffer('weight_mask', mask)
layer.register_forward_pre_hook(_mask_weight_hook)
return layer
def mask_bias(layer, mask, inplace=True):
"""Unstructed pruning for convolution layer
Args:
layer: a convolution layer.
mask: 0-1 mask.
"""
if not inplace:
layer = deepcopy(layer)
if layer.bias is None or mask.shape != layer.bias.shape:
return layer
mask = torch.tensor(mask, dtype=layer.weight.dtype, device=layer.weight.device, requires_grad=False)
if hasattr(layer, 'bias_mask'):
mask = mask + layer.bias_mask
mask[mask > 0] = 1
layer.bias_mask = mask
else:
layer.register_buffer('bias_mask', mask)
layer.register_forward_pre_hook(_mask_bias_hook)
return layer
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/functional/unstructured.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initial module."""
# flake8: noqa: F401, F403
from .structured import *
from .unstructured import *
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/functional/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""structured module."""
import torch
import torch.nn as nn
from copy import deepcopy
from functools import reduce
from operator import mul
from abc import ABC, abstractmethod, abstractstaticmethod
from typing import Callable, Sequence, Tuple, Dict
class BasePruner(ABC):
"""BasePruner class."""
def __init__(self, metrics_dict: Dict[str, Callable] = None, dim=1):
"""Initialize."""
self.metrics = None
self.set_metrics(metrics_dict)
self.dim = dim
def add_metric(self, name, metric_fn):
"""add metric."""
self.metrics[name] = metric_fn
def set_metrics(self, metric_dict=None):
"""set metrics."""
if metric_dict is None:
metric_dict = {"#params": self.calc_nparams_to_prune}
self.metrics = metric_dict
def check(self, layer, idxs):
"""check."""
pass
def __call__(self, layer: nn.Module, idxs: Sequence[int], inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""call funtion."""
self.check(layer, idxs)
metrics = {name: metric_fn(layer, idxs) for (name, metric_fn) in self.metrics.items()}
if dry_run:
return layer, metrics
if not inplace:
layer = deepcopy(layer)
layer = self.prune(layer, idxs)
return layer, metrics
@abstractmethod
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""prune."""
pass
@abstractstaticmethod
def calc_nparams_to_prune(layer, idxs):
"""calc nparams to prune."""
return 0
class ConvOutChannelPruner(BasePruner):
"""Conv out channle pruner class."""
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""prune."""
keep_idxs = list(set(range(layer.out_channels)) - set(idxs))
keep_idxs.sort()
layer.out_channels = layer.out_channels - len(idxs)
if not hasattr(layer, "transposed") or not layer.transposed:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
else:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[:, keep_idxs])
if layer.bias is not None:
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
nparams_to_prune = len(idxs) * reduce(mul, layer.weight.shape[1:]) + (len(idxs) if layer.bias is not None else 0)
return nparams_to_prune
class ConvInChannelPruner(BasePruner):
"""Conv in channle pruner class."""
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""prune."""
keep_idxs = list(set(range(layer.in_channels)) - set(idxs))
layer.in_channels = layer.in_channels - len(idxs)
if not layer.transposed:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[:, keep_idxs])
else:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
# no bias pruning because it does not change the output channels
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
nparams_to_prune = len(idxs) * layer.weight.shape[0] * reduce(mul, layer.weight.shape[2:])
return nparams_to_prune
class GroupConvPruner(ConvOutChannelPruner):
"""GroupConv pruner class."""
def check(self, layer, idxs) -> nn.Module:
"""check."""
pass
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""prune."""
keep_idxs = list(set(range(layer.out_channels)) - set(idxs))
keep_idxs.sort()
layer.out_channels = layer.out_channels - len(idxs)
layer.in_channels = layer.in_channels - len(idxs)
layer.groups = layer.groups - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
if layer.bias is not None:
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
class LinearOutChannelPruner(BasePruner):
"""Linear out channle pruner class."""
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""prune."""
keep_idxs = list(set(range(layer.out_features)) - set(idxs))
keep_idxs.sort()
layer.out_features = layer.out_features - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
if layer.bias is not None:
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
nparams_to_prune = len(idxs) * layer.weight.shape[1] + (len(idxs) if layer.bias is not None else 0)
return nparams_to_prune
class LinearInChannelPruner(BasePruner):
"""Linear in channle pruner class."""
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""prune."""
keep_idxs = list(set(range(layer.in_features)) - set(idxs))
keep_idxs.sort()
layer.in_features = layer.in_features - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[:, keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
nparams_to_prune = len(idxs) * layer.weight.shape[0]
return nparams_to_prune
class BatchnormPruner(BasePruner):
"""Batchnorm pruner class."""
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""prune."""
keep_idxs = list(set(range(layer.num_features)) - set(idxs))
keep_idxs.sort()
layer.num_features = layer.num_features - len(idxs)
layer.running_mean = layer.running_mean.data.clone()[keep_idxs]
layer.running_var = layer.running_var.data.clone()[keep_idxs]
if layer.affine:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
nparams_to_prune = len(idxs) * (2 if layer.affine else 1)
return nparams_to_prune
class LayernormPruner(BasePruner):
"""Layernorm pruner class."""
def __init__(self, metrcis=None, pruning_dim=-1):
"""Initialize."""
super().__init__(metrcis)
self.pruning_dim = pruning_dim
def check(self, layer, idxs):
"""check."""
layer.pruning_dim = self.pruning_dim
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""prune."""
pruning_dim = self.pruning_dim
if len(layer.normalized_shape) < -pruning_dim:
return layer
num_features = layer.normalized_shape[pruning_dim]
keep_idxs = torch.tensor(list(set(range(num_features)) - set(idxs)))
keep_idxs.sort()
if layer.elementwise_affine:
layer.weight = torch.nn.Parameter(layer.weight.data.clone().index_select(pruning_dim, keep_idxs))
layer.bias = torch.nn.Parameter(layer.bias.data.clone().index_select(pruning_dim, keep_idxs))
if pruning_dim != -1:
layer.normalized_shape = layer.normalized_shape[:pruning_dim] + (keep_idxs.size(0), ) + layer.normalized_shape[pruning_dim + 1:]
else:
layer.normalized_shape = layer.normalized_shape[:pruning_dim] + (keep_idxs.size(0), )
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
nparams_to_prune = len(idxs) * 2 if layer.elementwise_affine and len(layer.normalized_shape) >= -layer.pruning_dim else 0
return nparams_to_prune
class PReLUPruner(BasePruner):
"""PReLU pruner class."""
def prune(self, layer: nn.PReLU, idxs: list) -> nn.Module:
"""prune."""
if layer.num_parameters == 1:
return layer
keep_idxs = list(set(range(layer.num_parameters)) - set(idxs))
keep_idxs.sort()
layer.num_parameters = layer.num_parameters - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.PReLU, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
nparams_to_prune = 0 if layer.num_parameters == 1 else len(idxs)
return nparams_to_prune
class EmbeddingPruner(BasePruner):
"""Embedding pruner class."""
def prune(self, layer: nn.Embedding, idxs: list) -> nn.Module:
"""prune."""
num_features = layer.embedding_dim
keep_idxs = list(set(range(num_features)) - set(idxs))
keep_idxs.sort()
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[:, keep_idxs])
layer.embedding_dim = len(keep_idxs)
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Embedding, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
nparams_to_prune = layer.num_embeddings * len(idxs)
return nparams_to_prune
class ParameterPruner(BasePruner):
"""Parameter pruner class."""
def prune(self, tensor, idxs: list) -> nn.Module:
"""prune."""
keep_idxs = list(set(range(tensor.data.shape[self.dim])) - set(idxs))
keep_idxs.sort()
tensor.data = torch.index_select(tensor.data, self.dim, torch.LongTensor(keep_idxs))
return tensor
@staticmethod
def calc_nparams_to_prune(tensor: nn.Embedding, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
return 0
class MultiheadAttentionPruner(BasePruner):
"""MultiheadAttention pruner class."""
def check(self, layer, idxs):
"""check."""
assert (layer.embed_dim - len(idxs)) % layer.num_heads == 0, "embed_dim (%d) of MultiheadAttention after pruning must divide evenly by `num_heads` (%d)" % (layer.embed_dim, layer.num_heads)
def prune(self, layer, idxs: list) -> nn.Module:
"""prune."""
keep_idxs = list(set(range(layer.embed_dim)) - set(idxs))
keep_idxs.sort()
if layer.q_proj_weight is not None:
layer.q_proj_weight.data = torch.index_select(layer.q_proj_weight.data, 0, torch.LongTensor(keep_idxs))
if layer.k_proj_weight is not None:
layer.q_proj_weight.data = torch.index_select(layer.q_proj_weight.data, 0, torch.LongTensor(keep_idxs))
if layer.v_proj_weight is not None:
layer.v_proj_weight.data = torch.index_select(layer.v_proj_weight.data, 0, torch.LongTensor(keep_idxs))
pruning_idxs_3x = idxs + [i + layer.embed_dim for i in idxs] + [i + 2 * layer.embed_dim for i in idxs]
keep_idxs_3x = list(set(range(3 * layer.embed_dim)) - set(pruning_idxs_3x))
keep_idxs_3x.sort()
if layer.in_proj_weight is not None:
layer.in_proj_weight.data = torch.index_select(layer.in_proj_weight.data, 0, torch.LongTensor(keep_idxs_3x))
layer.in_proj_weight.data = torch.index_select(layer.in_proj_weight.data, 1, torch.LongTensor(keep_idxs))
if layer.in_proj_bias is not None:
layer.in_proj_bias.data = torch.index_select(layer.in_proj_bias.data, 0, torch.LongTensor(keep_idxs_3x))
if layer.bias_k is not None:
layer.bias_k.data = torch.index_select(layer.bias_k.data, 2, torch.LongTensor(keep_idxs))
if layer.bias_v is not None:
layer.bias_v.data = torch.index_select(layer.bias_v.data, 2, torch.LongTensor(keep_idxs))
linear = layer.out_proj
keep_idxs = list(set(range(linear.out_features)) - set(idxs))
keep_idxs.sort()
linear.out_features = linear.out_features - len(idxs)
linear.weight = torch.nn.Parameter(linear.weight.data.clone()[keep_idxs])
if linear.bias is not None:
linear.bias = torch.nn.Parameter(linear.bias.data.clone()[keep_idxs])
keep_idxs = list(set(range(linear.in_features)) - set(idxs))
keep_idxs.sort()
linear.in_features = linear.in_features - len(idxs)
linear.weight = torch.nn.Parameter(linear.weight.data.clone()[:, keep_idxs])
layer.embed_dim = layer.embed_dim - len(idxs)
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Embedding, idxs: Sequence[int]) -> int:
"""calc nparams to prune."""
linear = layer.out_proj
nparams_to_prune = len(idxs) * linear.weight.shape[1] + len(idxs) * (layer.embed_dim - len(idxs)) + (len(idxs) if linear.bias is not None else 0)
return nparams_to_prune
prune_conv_in_channel = ConvInChannelPruner()
prune_conv_out_channel = ConvOutChannelPruner()
prune_group_conv = GroupConvPruner()
prune_batchnorm = BatchnormPruner()
prune_linear_in_channel = LinearInChannelPruner()
prune_linear_out_channel = LinearOutChannelPruner()
prune_prelu = PReLUPruner()
prune_layernorm = LayernormPruner()
prune_embedding = EmbeddingPruner()
prune_parameter = ParameterPruner(dim=2) # default=2 for tranformers
prune_multihead_attention = MultiheadAttentionPruner()
def set_global_metrics(metric_dict):
"""set global metrics"""
prune_conv_in_channel.set_metrics(metric_dict)
prune_conv_out_channel.set_metrics(metric_dict)
prune_group_conv.set_metrics(metric_dict)
prune_batchnorm.set_metrics(metric_dict)
prune_linear_in_channel.set_metrics(metric_dict)
prune_linear_out_channel.set_metrics(metric_dict)
prune_prelu.set_metrics(metric_dict)
prune_layernorm.set_metrics(metric_dict)
prune_embedding.set_metrics(metric_dict)
prune_parameter.set_metrics(metric_dict)
prune_multihead_attention.set_metrics(metric_dict)
def add_global_metrics(name, metric_fn):
"""add global metrics"""
prune_conv_in_channel.add_metric(name, metric_fn)
prune_conv_out_channel.add_metric(name, metric_fn)
prune_group_conv.add_metric(name, metric_fn)
prune_batchnorm.add_metric(name, metric_fn)
prune_linear_in_channel.add_metric(name, metric_fn)
prune_linear_out_channel.add_metric(name, metric_fn)
prune_prelu.add_metric(name, metric_fn)
prune_layernorm.add_metric(name, metric_fn)
prune_embedding.add_metric(name, metric_fn)
prune_parameter.add_metric(name, metric_fn)
prune_multihead_attention.add_metric(name, metric_fn)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/pruning/torch_pruning/functional/structured.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCDnet scripts module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Export module."""
import os
import torch
from torch.onnx import register_custom_op_symbolic
import copy
import onnx
import onnx_graphsurgeon as onnx_gs
from torchvision.ops import DeformConv2d
import tempfile
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
from nvidia_tao_pytorch.cv.ocdnet.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.ocdnet.model.pl_ocd_model import OCDnetModel
from omegaconf import OmegaConf
def symbolic_dcnv2_forward(g, *inputs):
"""symbolic_dcnv2_forward"""
# weights as last input to align with TRT plugin
return g.op("ModulatedDeformConv2d", inputs[0], inputs[2], inputs[3], inputs[1])
# Register custom symbolic function
register_custom_op_symbolic("torchvision::deform_conv2d", symbolic_dcnv2_forward, 11)
class Export():
"""Export OCDNet model."""
def __init__(
self, model_path, config_file,
width, height, opset_version,
gpu_id=0
):
"""Initialize."""
self.model_path = model_path
self.config_file = config_file
self.opset_version = opset_version
self.gpu_id = gpu_id
if self.gpu_id is not None and isinstance(self.gpu_id, int) and torch.cuda.is_available():
self.device = torch.device("cuda:%s" % self.gpu_id)
torch.backends.cudnn.benchmark = True
else:
self.device = torch.device("cpu")
checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
if "state_dict" in checkpoint.keys():
checkpoint = checkpoint["state_dict"]
checkpoint = {key.replace("model.", ""): value for key, value in checkpoint.items()}
config = OmegaConf.to_container(config_file)
config['model']['pretrained'] = False
config["dataset"]["train_dataset"] = config["dataset"]["validate_dataset"]
self.model = OCDnetModel(config)
layers = checkpoint.keys()
ckpt = dict()
# Support loading official pretrained weights for eval
for layer in layers:
new_layer = layer
if new_layer.startswith("model.module."):
new_layer = new_layer[13:]
if new_layer == "decoder.in5.weight":
new_layer = "neck.in5.weight"
elif new_layer == "decoder.in4.weight":
new_layer = "neck.in4.weight"
elif new_layer == "decoder.in3.weight":
new_layer = "neck.in3.weight"
elif new_layer == "decoder.in2.weight":
new_layer = "neck.in2.weight"
elif new_layer == "decoder.out5.0.weight":
new_layer = "neck.out5.0.weight"
elif new_layer == "decoder.out4.0.weight":
new_layer = "neck.out4.0.weight"
elif new_layer == "decoder.out3.0.weight":
new_layer = "neck.out3.0.weight"
elif new_layer == "decoder.out2.weight":
new_layer = "neck.out2.weight"
elif new_layer == "decoder.binarize.0.weight":
new_layer = "head.binarize.0.weight"
elif new_layer == "decoder.binarize.1.weight":
new_layer = "head.binarize.1.weight"
elif new_layer == "decoder.binarize.1.bias":
new_layer = "head.binarize.1.bias"
elif new_layer == "decoder.binarize.1.running_mean":
new_layer = "head.binarize.1.running_mean"
elif new_layer == "decoder.binarize.1.running_var":
new_layer = "head.binarize.1.running_var"
elif new_layer == "decoder.binarize.3.weight":
new_layer = "head.binarize.3.weight"
elif new_layer == "decoder.binarize.3.bias":
new_layer = "head.binarize.3.bias"
elif new_layer == "decoder.binarize.4.weight":
new_layer = "head.binarize.4.weight"
elif new_layer == "decoder.binarize.4.bias":
new_layer = "head.binarize.4.bias"
elif new_layer == "decoder.binarize.4.running_mean":
new_layer = "head.binarize.4.running_mean"
elif new_layer == "decoder.binarize.4.running_var":
new_layer = "head.binarize.4.running_var"
elif new_layer == "decoder.binarize.6.weight":
new_layer = "head.binarize.6.weight"
elif new_layer == "decoder.binarize.6.bias":
new_layer = "head.binarize.6.bias"
elif new_layer == "decoder.thresh.0.weight":
new_layer = "head.thresh.0.weight"
elif new_layer == "decoder.thresh.1.weight":
new_layer = "head.thresh.1.weight"
elif new_layer == "decoder.thresh.1.bias":
new_layer = "head.thresh.1.bias"
elif new_layer == "decoder.thresh.1.running_mean":
new_layer = "head.thresh.1.running_mean"
elif new_layer == "decoder.thresh.1.running_var":
new_layer = "head.thresh.1.running_var"
elif new_layer == "decoder.thresh.3.weight":
new_layer = "head.thresh.3.weight"
elif new_layer == "decoder.thresh.3.bias":
new_layer = "head.thresh.3.bias"
elif new_layer == "decoder.thresh.4.weight":
new_layer = "head.thresh.4.weight"
elif new_layer == "decoder.thresh.4.bias":
new_layer = "head.thresh.4.bias"
elif new_layer == "decoder.thresh.4.running_mean":
new_layer = "head.thresh.4.running_mean"
elif new_layer == "decoder.thresh.4.running_var":
new_layer = "head.thresh.4.running_var"
elif new_layer == "decoder.thresh.6.weight":
new_layer = "head.thresh.6.weight"
elif new_layer == "decoder.thresh.6.bias":
new_layer = "head.thresh.6.bias"
elif "num_batches_tracked" in new_layer:
continue
elif "backbone.fc" in new_layer:
continue
elif "backbone.smooth" in new_layer:
continue
ckpt[new_layer] = checkpoint[layer]
self.model.model.load_state_dict(ckpt)
self.model.to(self.device)
def export(self):
"""Export."""
self.model.eval()
dummy_image = torch.zeros(
(1, 3, 544, 960),
dtype=torch.float32,
device='cuda:0'
)
if self.config_file.export.results_dir is not None:
results_dir = self.config_file.export.results_dir
else:
results_dir = os.path.join(self.config_file.results_dir, "export")
self.config_file.export.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting OCDNet export"
)
# Set default output filename if the filename isn't provided over the command line.
if self.config_file.export.onnx_file is None:
split_name = os.path.splitext(self.model_path)[0]
self.config_file.export.onnx_file = "{}.onnx".format(split_name)
# Warn the user if an exported file already exists.
if os.path.exists(self.config_file.export.onnx_file):
raise FileExistsError(f"Output file already exists at {self.config_file.export.onnx_file}")
self.output_model = self.config_file.export.onnx_file
handle, temp_onnx = tempfile.mkstemp()
os.close(handle)
torch.onnx.export(
self.model,
(dummy_image,),
temp_onnx,
export_params=True,
opset_version=self.opset_version,
do_constant_folding=True,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
keep_initializers_as_inputs=True,
input_names=['input'],
output_names=['pred'],
dynamic_axes={
"input": {0: "batch", 2: "height", 3: "width"},
}
)
# Import and add DCNv2 attributes
onnx_model = onnx.load(temp_onnx)
gs_graph = onnx_gs.import_onnx(onnx_model)
layer_dict = {}
attrs_dict = {}
for name, layer in self.model.named_modules():
if isinstance(layer, DeformConv2d):
attrs_dict["stride"] = list(layer.stride)
attrs_dict["padding"] = list(layer.padding)
attrs_dict["dilation"] = list(layer.dilation)
attrs_dict["group"] = 1
attrs_dict["deformable_group"] = 1
name = name.replace("model.backbone.", "") + ".ModulatedDeformConv2d"
layer_dict[name] = copy.deepcopy(attrs_dict)
for node in gs_graph.nodes:
if node.op == "ModulatedDeformConv2d":
key = (".".join(node.name.split("/")[-3:]))
node.attrs = layer_dict[key]
gs_graph.fold_constants()
gs_graph.cleanup()
new_onnx_model = onnx_gs.export_onnx(gs_graph)
onnx.save(new_onnx_model, self.output_model)
print("Model exported to {}".format(self.output_model))
os.remove(temp_onnx)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="export", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the export process."""
# Obfuscate logs.
obfuscate_logs(cfg)
try:
exporter = Export(
config_file=cfg,
model_path=cfg.export.checkpoint,
width=cfg.export.width,
height=cfg.export.height,
opset_version=cfg.export.opset_version
)
exporter.export()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Train OCDnet model."""
import os
import re
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.strategies import DDPStrategy
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.ocdnet.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.ocdnet.model.pl_ocd_model import OCDnetModel
from omegaconf import OmegaConf
def run_experiment(tmp_experiment_config,
results_dir):
"""Start the training."""
if tmp_experiment_config.train.results_dir is not None:
results_dir = tmp_experiment_config.train.results_dir
else:
results_dir = os.path.join(tmp_experiment_config.results_dir, "train")
tmp_experiment_config.train.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
experiment_config = OmegaConf.to_container(tmp_experiment_config)
ocd_model = OCDnetModel(experiment_config)
total_epochs = experiment_config['train']['num_epochs']
assert total_epochs != experiment_config['train']['lr_scheduler']['args']['warmup_epoch'], "num_epochs should not be same as warmup_epoch."
val_inter = experiment_config['train']['validation_interval']
clip_grad = experiment_config['train']['trainer']['clip_grad_norm']
num_gpus = experiment_config["num_gpus"]
status_logger_callback = TAOStatusLogger(
results_dir,
append=True,
num_epochs=total_epochs
)
status_logging.set_status_logger(status_logger_callback.logger)
strategy = None
if num_gpus > 1:
strategy = DDPStrategy(find_unused_parameters=False)
trainer = Trainer(devices=num_gpus,
max_epochs=total_epochs,
check_val_every_n_epoch=val_inter,
default_root_dir=results_dir,
enable_checkpointing=False,
accelerator="gpu",
strategy=strategy,
gradient_clip_val=clip_grad,
num_sanity_val_steps=0,
callbacks=None
)
ckpt_inter = experiment_config['train']['checkpoint_interval']
ModelCheckpoint.FILE_EXTENSION = ".pth"
checkpoint_callback = ModelCheckpoint(every_n_epochs=ckpt_inter,
dirpath=results_dir,
save_on_train_epoch_end=True,
monitor=None,
save_top_k=-1,
filename='ocd_model_{epoch:03d}')
resume_ckpt = experiment_config['train']['resume_training_checkpoint_path']
if resume_ckpt:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_ckpt}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_ckpt)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1 # make sure callback epoch matches resumed epoch
trainer.callbacks.append(status_logger_callback)
trainer.callbacks.append(checkpoint_callback)
if resume_ckpt and resume_ckpt.endswith(".pth"):
print(f'Resume training model from {resume_ckpt}')
trainer.fit(ocd_model, ckpt_path=resume_ckpt)
else:
trainer.fit(ocd_model)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="train", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
run_experiment(tmp_experiment_config=cfg,
results_dir=cfg.train.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prune module."""
import os
import torch
from torch import nn
from typing import Sequence
from functools import reduce
from operator import mul
from omegaconf import OmegaConf
from torchvision.ops import DeformConv2d
from nvidia_tao_pytorch.cv.ocdnet.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.ocdnet.model.model import Model
from nvidia_tao_pytorch.cv.ocdnet.data_loader.build_dataloader import get_dataloader
from nvidia_tao_pytorch.cv.ocdnet.utils import mkdir
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
import nvidia_tao_pytorch.cv.ocdnet.pruning.torch_pruning as tp
# force pycuda on primary context before using TensorRT
import pycuda
import pycuda.autoinit
pyc_dev = pycuda.autoinit.device
pyc_ctx = pyc_dev.retain_primary_context()
class DCNv2OutputPruning(tp.functional.structured.BasePruner):
"""DCNv2 Pruning."""
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
keep_idxs = list(set(range(layer.out_channels)) - set(idxs))
layer.out_channels = layer.out_channels - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
if layer.bias is not None:
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
nparams_to_prune = len(idxs) * reduce(mul, layer.weight.shape[1:]) + (len(idxs) if layer.bias is not None else 0)
return nparams_to_prune
class DCNv2InputPruning(tp.functional.structured.BasePruner):
"""DCNv2 Pruning."""
def prune(self, layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
keep_idxs = list(set(range(layer.in_channels)) - set(idxs))
layer.in_channels = layer.in_channels - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[:, keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
nparams_to_prune = len(idxs) * layer.weight.shape[0] * reduce(mul, layer.weight.shape[2:])
return nparams_to_prune
class Prune():
"""Prune."""
def __init__(
self,
model_path,
config,
pruning_thresh,
output_dir,
gpu_id=0
):
"""Initialize."""
config['model']['pretrained'] = False
self.validate_loader = get_dataloader(config['dataset']['validate_dataset'], False)
self.model = None
self.pruning_thresh = pruning_thresh
self.output_dir = output_dir
self.gpu_id = gpu_id
if self.gpu_id is not None and isinstance(self.gpu_id, int) and torch.cuda.is_available():
self.device = torch.device("cuda:%s" % self.gpu_id)
torch.backends.cudnn.benchmark = True
else:
self.device = torch.device("cpu")
checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
if "state_dict" in checkpoint.keys():
checkpoint = checkpoint["state_dict"]
checkpoint = {key.replace("model.", ""): value for key, value in checkpoint.items()}
self.model = Model(config['model'])
layers = checkpoint.keys()
ckpt = dict()
# Support loading official pretrained weights for eval
for layer in layers:
new_layer = layer
if new_layer.startswith("model.module."):
new_layer = new_layer[13:]
if new_layer == "decoder.in5.weight":
new_layer = "neck.in5.weight"
elif new_layer == "decoder.in4.weight":
new_layer = "neck.in4.weight"
elif new_layer == "decoder.in3.weight":
new_layer = "neck.in3.weight"
elif new_layer == "decoder.in2.weight":
new_layer = "neck.in2.weight"
elif new_layer == "decoder.out5.0.weight":
new_layer = "neck.out5.0.weight"
elif new_layer == "decoder.out4.0.weight":
new_layer = "neck.out4.0.weight"
elif new_layer == "decoder.out3.0.weight":
new_layer = "neck.out3.0.weight"
elif new_layer == "decoder.out2.weight":
new_layer = "neck.out2.weight"
elif new_layer == "decoder.binarize.0.weight":
new_layer = "head.binarize.0.weight"
elif new_layer == "decoder.binarize.1.weight":
new_layer = "head.binarize.1.weight"
elif new_layer == "decoder.binarize.1.bias":
new_layer = "head.binarize.1.bias"
elif new_layer == "decoder.binarize.1.running_mean":
new_layer = "head.binarize.1.running_mean"
elif new_layer == "decoder.binarize.1.running_var":
new_layer = "head.binarize.1.running_var"
elif new_layer == "decoder.binarize.3.weight":
new_layer = "head.binarize.3.weight"
elif new_layer == "decoder.binarize.3.bias":
new_layer = "head.binarize.3.bias"
elif new_layer == "decoder.binarize.4.weight":
new_layer = "head.binarize.4.weight"
elif new_layer == "decoder.binarize.4.bias":
new_layer = "head.binarize.4.bias"
elif new_layer == "decoder.binarize.4.running_mean":
new_layer = "head.binarize.4.running_mean"
elif new_layer == "decoder.binarize.4.running_var":
new_layer = "head.binarize.4.running_var"
elif new_layer == "decoder.binarize.6.weight":
new_layer = "head.binarize.6.weight"
elif new_layer == "decoder.binarize.6.bias":
new_layer = "head.binarize.6.bias"
elif new_layer == "decoder.thresh.0.weight":
new_layer = "head.thresh.0.weight"
elif new_layer == "decoder.thresh.1.weight":
new_layer = "head.thresh.1.weight"
elif new_layer == "decoder.thresh.1.bias":
new_layer = "head.thresh.1.bias"
elif new_layer == "decoder.thresh.1.running_mean":
new_layer = "head.thresh.1.running_mean"
elif new_layer == "decoder.thresh.1.running_var":
new_layer = "head.thresh.1.running_var"
elif new_layer == "decoder.thresh.3.weight":
new_layer = "head.thresh.3.weight"
elif new_layer == "decoder.thresh.3.bias":
new_layer = "head.thresh.3.bias"
elif new_layer == "decoder.thresh.4.weight":
new_layer = "head.thresh.4.weight"
elif new_layer == "decoder.thresh.4.bias":
new_layer = "head.thresh.4.bias"
elif new_layer == "decoder.thresh.4.running_mean":
new_layer = "head.thresh.4.running_mean"
elif new_layer == "decoder.thresh.4.running_var":
new_layer = "head.thresh.4.running_var"
elif new_layer == "decoder.thresh.6.weight":
new_layer = "head.thresh.6.weight"
elif new_layer == "decoder.thresh.6.bias":
new_layer = "head.thresh.6.bias"
elif "num_batches_tracked" in new_layer:
continue
elif "backbone.fc" in new_layer:
continue
elif "backbone.smooth" in new_layer:
continue
ckpt[new_layer] = checkpoint[layer]
self.model.load_state_dict(ckpt)
self.model.to(self.device)
def prune(self):
"""Prune function."""
input_dict = next(iter(self.validate_loader))
if self.model is not None:
self.model.eval()
print(self.model)
with torch.no_grad():
if self.model is not None:
for key, value in input_dict.items():
if value is not None:
if isinstance(value, torch.Tensor):
input_dict[key] = value.to(self.device)
unpruned_total_params = sum(p.numel() for p in self.model.parameters())
strategy = tp.strategy.L1Strategy() # or tp.strategy.RandomStrategy()
DG = tp.DependencyGraph()
DG.register_customized_layer(
DeformConv2d,
in_ch_pruning_fn=DCNv2InputPruning(), # A function to prune channels/dimensions of input tensor
out_ch_pruning_fn=DCNv2OutputPruning(), # A function to prune channels/dimensions of output tensor
get_in_ch_fn=lambda n: n.in_channels, # estimate the n_channel of layer input. Return None if the layer does not change tensor shape.
get_out_ch_fn=lambda n: n.out_channels) # estimate the n_channel of layer output. Return None if the layer does not change tensor shape.
DG.build_dependency(self.model, example_inputs=input_dict["img"])
for m in DG.module2node:
_inputs = DG.module2node[m].inputs
_deps = DG.module2node[m].dependencies
if isinstance(m, DeformConv2d):
DG.module2node[m].inputs = [_inputs[0]]
DG.module2node[m].dependencies = [_deps[0], _deps[3]]
# Prune Conv2d, DeformConv2d will be pruned indirectly by coupled pruning
layers = [module for module in self.model.modules() if isinstance(module, torch.nn.Conv2d)]
# Exclude DCNv2 conv2_offset layer
black_list = []
for layer in layers:
if layer.out_channels == 27:
black_list.append(layer)
count = 0
for layer in layers:
# skip black list layers
if layer in black_list:
continue
# Skip thresh module(not used in eval mode)
if layer not in DG.module2node:
continue
threshold_run = self.pruning_thresh
pruning_idxs = strategy(layer.weight, amount=threshold_run, round_to=64)
pruning_plan = DG.get_pruning_plan(layer, tp.prune_conv_out_channel, idxs=pruning_idxs)
if pruning_plan is not None:
pruning_plan.exec()
else:
continue
count += 1
pruned_total_params = sum(p.numel() for p in self.model.parameters())
print("Pruning ratio: {}".format(
pruned_total_params / unpruned_total_params)
)
# Do inference to sanity check the pruned model
self.model(input_dict["img"])
# Save pruned model
if not os.path.exists(self.output_dir):
mkdir(self.output_dir)
assert os.path.exists(self.output_dir) and os.path.isdir(self.output_dir), "The output_folder should exist."
save_path = os.path.join(self.output_dir, f"pruned_{self.pruning_thresh}.pth")
torch.save(self.model, save_path)
def run_experiment(experiment_config, model_path, pruning_thresh):
"""Run experiment."""
gpu_id = experiment_config.prune.gpu_id
torch.cuda.set_device(gpu_id)
if experiment_config.prune.results_dir is not None:
results_dir = experiment_config.prune.results_dir
else:
results_dir = os.path.join(experiment_config.results_dir, "prune")
experiment_config.prune.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
experiment_config = OmegaConf.to_container(experiment_config)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting OCDNet pruning"
)
pruner = Prune(
model_path,
experiment_config,
pruning_thresh,
output_dir=results_dir
)
pruner.prune()
pyc_ctx.pop()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="prune", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the pruning process."""
# Obfuscate logs.
obfuscate_logs(cfg)
pyc_ctx.push()
try:
run_experiment(experiment_config=cfg,
model_path=cfg.prune.checkpoint,
pruning_thresh=cfg.prune.pruning_thresh
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Pruning finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Pruning was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/scripts/prune.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Inference module."""
import os
import pathlib
import time
import cv2
import numpy as np
import torch
from omegaconf import OmegaConf
from nvidia_tao_pytorch.cv.ocdnet.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.ocdnet.post_processing.seg_detector_representer import get_post_processing
from nvidia_tao_pytorch.cv.ocdnet.model.pl_ocd_model import OCDnetModel
from nvidia_tao_pytorch.cv.ocdnet.utils.util import show_img, draw_bbox, save_result, get_file_list
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
from tqdm import tqdm
import matplotlib.pyplot as plt
import pycuda
import pycuda.autoinit
pyc_dev = pycuda.autoinit.device
pyc_ctx = pyc_dev.retain_primary_context()
def resize_image(img, image_size):
"""Resize image"""
resized_img = cv2.resize(img, image_size)
return resized_img
class Inference:
"""Infer class."""
def __init__(self, model_path, config, post_p_thre=0.7, gpu_id=None):
"""Init model."""
self.gpu_id = gpu_id
if self.gpu_id is not None and isinstance(self.gpu_id, int) and torch.cuda.is_available():
self.device = torch.device("cuda:%s" % self.gpu_id)
else:
self.device = torch.device("cpu")
self.post_process = get_post_processing(config['inference']['post_processing'])
self.post_process.box_thresh = post_p_thre
self.img_mode = config['inference']['img_mode']
config["dataset"]["train_dataset"]["data_path"] = [os.path.dirname(config["inference"]["input_folder"])]
config["dataset"]["validate_dataset"]["data_path"] = [os.path.dirname(config["inference"]["input_folder"])]
if model_path.split(".")[-1] in ["trt", "engine"]:
raise Exception("Please use tao_deploy to run inference against tensorrt engine.")
else:
checkpoint = torch.load(model_path, map_location=self.device)
self.model = OCDnetModel(config)
checkpoint['state_dict'] = {key.replace("model.", ""): value for key, value in checkpoint['state_dict'].items()}
self.model.model.load_state_dict(checkpoint['state_dict'])
self.model.to(self.device)
self.model.eval()
self.is_trt = False
def predict(self, img_path: str, image_size, is_output_polygon=False):
"""Run prediction."""
assert os.path.exists(img_path), 'file is not exists'
ori_img = cv2.imread(img_path, 1 if self.img_mode != 'GRAY' else 0).astype(np.float32)
if self.img_mode == 'RGB':
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB)
h, w = ori_img.shape[:2]
ori_img = resize_image(ori_img, image_size)
rgb_mean = np.array([122.67891434, 116.66876762, 104.00698793])
image = ori_img
image -= rgb_mean
image /= 255.
tensor = torch.from_numpy(image).permute(2, 0, 1).float()
# change (w,h) to (1,img_channel,h,w)
tensor = tensor.unsqueeze_(0)
tensor = tensor.to(self.device)
batch = {'img': torch.Tensor(1, 3, h, w)}
with torch.no_grad():
if str(self.device).__contains__('cuda'):
torch.cuda.synchronize(self.device)
start = time.time()
if self.is_trt:
tensor_np = tensor.detach().cpu().numpy()
preds = torch.from_numpy(
self.model.predict({"input": tensor_np})["pred"]
).cuda()
else:
preds = self.model(tensor)
if str(self.device).__contains__('cuda'):
torch.cuda.synchronize(self.device)
box_list, score_list = self.post_process(batch, preds, is_output_polygon=is_output_polygon)
box_list, score_list = box_list[0], score_list[0]
if len(box_list) > 0:
if is_output_polygon:
idx = [x.sum() > 0 for x in box_list]
box_list = [box_list[i] for i, v in enumerate(idx) if v]
score_list = [score_list[i] for i, v in enumerate(idx) if v]
else:
idx = box_list.reshape(box_list.shape[0], -1).sum(axis=1) > 0 # filer bbox has all 0
box_list, score_list = box_list[idx], score_list[idx]
else:
box_list, score_list = [], []
t = time.time() - start
return preds[0, 0, :, :].detach().cpu().numpy(), box_list, score_list, t
def run_experiment(experiment_config, model_path, post_p_thre, input_folder,
width, height, polygon, show):
"""Run experiment."""
gpu_id = experiment_config.inference.gpu_id
torch.cuda.set_device(gpu_id)
if experiment_config.inference.results_dir is not None:
results_dir = experiment_config.inference.results_dir
else:
results_dir = os.path.join(experiment_config.results_dir, "inference")
experiment_config.inference.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
experiment_config = OmegaConf.to_container(experiment_config)
experiment_config['model']['pretrained'] = False
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting OCDNet inference"
)
# Init the network
infer_model = Inference(
model_path,
experiment_config,
post_p_thre,
gpu_id=0
)
for img_path in tqdm(get_file_list(input_folder, p_postfix=['.jpg', '.png', '.jpeg', '.JPG', '.PNG', '.JPEG', '.bmp'])):
preds, boxes_list, score_list, _ = infer_model.predict(
img_path,
(width, height),
is_output_polygon=polygon
)
im = cv2.imread(img_path)
img = draw_bbox(im[:, :, ::-1], boxes_list)
if show:
show_img(preds)
show_img(img, title=os.path.basename(img_path))
plt.show()
# save result
img_path = pathlib.Path(img_path)
output_path = os.path.join(results_dir, img_path.stem + '_result.jpg')
pred_path = os.path.join(results_dir, img_path.stem + '_pred.jpg')
cv2.imwrite(output_path, img[:, :, ::-1])
cv2.imwrite(pred_path, preds * 255)
save_result(output_path.replace('_result.jpg', '.txt'), boxes_list, score_list, polygon)
pyc_ctx.pop()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="inference", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the inference process."""
# Obfuscate logs.
obfuscate_logs(cfg)
pyc_ctx.push()
try:
run_experiment(experiment_config=cfg,
model_path=cfg.inference.checkpoint,
post_p_thre=cfg.inference.post_processing.args.box_thresh,
input_folder=cfg.inference.input_folder,
width=cfg.inference.width,
height=cfg.inference.height,
polygon=cfg.inference.polygon,
show=cfg.inference.show
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Evaluate a trained ocdnet model."""
import os
import time
import torch
from tqdm import tqdm
from omegaconf import OmegaConf
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.ocdnet.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.ocdnet.data_loader.build_dataloader import get_dataloader
from nvidia_tao_pytorch.cv.ocdnet.post_processing.seg_detector_representer import get_post_processing
from nvidia_tao_pytorch.cv.ocdnet.utils.ocr_metric.icdar2015.quad_metric import get_metric
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
from nvidia_tao_pytorch.cv.ocdnet.model.pl_ocd_model import OCDnetModel
import pycuda
import pycuda.autoinit
pyc_dev = pycuda.autoinit.device
pyc_ctx = pyc_dev.retain_primary_context()
class Evaluate():
"""Eval class."""
def __init__(self, model_path, config_file, gpu_id=0):
"""Initialize."""
config = config_file
config['model']['pretrained'] = False
config["dataset"]["train_dataset"] = config["dataset"]["validate_dataset"]
self.validate_loader = get_dataloader(config['dataset']['validate_dataset'], False)
self.post_process = get_post_processing(config['evaluate']['post_processing'])
self.metric_cls = get_metric(config['evaluate']['metric'])
self.box_thresh = config['evaluate']['post_processing']["args"]["box_thresh"]
self.model = None
self.trt_model = None
if model_path.split(".")[-1] in ["trt", "engine"]:
raise Exception("Please use tao_deploy to run evaluation against tensorrt engine.")
else:
self.gpu_id = gpu_id
if self.gpu_id is not None and isinstance(self.gpu_id, int) and torch.cuda.is_available():
self.device = torch.device("cuda:%s" % self.gpu_id)
torch.backends.cudnn.benchmark = True
else:
self.device = torch.device("cpu")
checkpoint = torch.load(model_path, map_location=torch.device('cpu'))
if not isinstance(checkpoint, dict):
self.model = checkpoint
self.model.to(self.device)
else:
if "state_dict" in checkpoint.keys():
checkpoint = checkpoint["state_dict"]
checkpoint = {key.replace("model.", ""): value for key, value in checkpoint.items()}
self.model = OCDnetModel(config)
layers = checkpoint.keys()
ckpt = dict()
# Support loading official pretrained weights for eval
for layer in layers:
new_layer = layer
if new_layer.startswith("model.module."):
new_layer = new_layer[13:]
if new_layer == "decoder.in5.weight":
new_layer = "neck.in5.weight"
elif new_layer == "decoder.in4.weight":
new_layer = "neck.in4.weight"
elif new_layer == "decoder.in3.weight":
new_layer = "neck.in3.weight"
elif new_layer == "decoder.in2.weight":
new_layer = "neck.in2.weight"
elif new_layer == "decoder.out5.0.weight":
new_layer = "neck.out5.0.weight"
elif new_layer == "decoder.out4.0.weight":
new_layer = "neck.out4.0.weight"
elif new_layer == "decoder.out3.0.weight":
new_layer = "neck.out3.0.weight"
elif new_layer == "decoder.out2.weight":
new_layer = "neck.out2.weight"
elif new_layer == "decoder.binarize.0.weight":
new_layer = "head.binarize.0.weight"
elif new_layer == "decoder.binarize.1.weight":
new_layer = "head.binarize.1.weight"
elif new_layer == "decoder.binarize.1.bias":
new_layer = "head.binarize.1.bias"
elif new_layer == "decoder.binarize.1.running_mean":
new_layer = "head.binarize.1.running_mean"
elif new_layer == "decoder.binarize.1.running_var":
new_layer = "head.binarize.1.running_var"
elif new_layer == "decoder.binarize.3.weight":
new_layer = "head.binarize.3.weight"
elif new_layer == "decoder.binarize.3.bias":
new_layer = "head.binarize.3.bias"
elif new_layer == "decoder.binarize.4.weight":
new_layer = "head.binarize.4.weight"
elif new_layer == "decoder.binarize.4.bias":
new_layer = "head.binarize.4.bias"
elif new_layer == "decoder.binarize.4.running_mean":
new_layer = "head.binarize.4.running_mean"
elif new_layer == "decoder.binarize.4.running_var":
new_layer = "head.binarize.4.running_var"
elif new_layer == "decoder.binarize.6.weight":
new_layer = "head.binarize.6.weight"
elif new_layer == "decoder.binarize.6.bias":
new_layer = "head.binarize.6.bias"
elif new_layer == "decoder.thresh.0.weight":
new_layer = "head.thresh.0.weight"
elif new_layer == "decoder.thresh.1.weight":
new_layer = "head.thresh.1.weight"
elif new_layer == "decoder.thresh.1.bias":
new_layer = "head.thresh.1.bias"
elif new_layer == "decoder.thresh.1.running_mean":
new_layer = "head.thresh.1.running_mean"
elif new_layer == "decoder.thresh.1.running_var":
new_layer = "head.thresh.1.running_var"
elif new_layer == "decoder.thresh.3.weight":
new_layer = "head.thresh.3.weight"
elif new_layer == "decoder.thresh.3.bias":
new_layer = "head.thresh.3.bias"
elif new_layer == "decoder.thresh.4.weight":
new_layer = "head.thresh.4.weight"
elif new_layer == "decoder.thresh.4.bias":
new_layer = "head.thresh.4.bias"
elif new_layer == "decoder.thresh.4.running_mean":
new_layer = "head.thresh.4.running_mean"
elif new_layer == "decoder.thresh.4.running_var":
new_layer = "head.thresh.4.running_var"
elif new_layer == "decoder.thresh.6.weight":
new_layer = "head.thresh.6.weight"
elif new_layer == "decoder.thresh.6.bias":
new_layer = "head.thresh.6.bias"
elif "num_batches_tracked" in new_layer:
continue
elif "backbone.fc" in new_layer:
continue
elif "backbone.smooth" in new_layer:
continue
ckpt[new_layer] = checkpoint[layer]
self.model.model.load_state_dict(ckpt)
self.model.to(self.device)
def eval(self):
"""eval function."""
if self.model is not None:
self.model.eval()
# torch.cuda.empty_cache() # speed up evaluating after training finished
raw_metrics = []
total_frame = 0.0
total_time = 0.0
for _, batch in tqdm(enumerate(self.validate_loader), total=len(self.validate_loader), desc='test model'):
with torch.no_grad():
if self.model is not None:
for key, value in batch.items():
if value is not None:
if isinstance(value, torch.Tensor):
batch[key] = value.to(self.device)
start = time.time()
if self.model is not None:
preds = self.model(batch['img'])
else:
img = batch["img"].detach().cpu().numpy()
start = time.time()
preds = torch.from_numpy(
self.trt_model.predict({"input": img})["pred"]
).cuda()
boxes, scores = self.post_process(batch, preds, is_output_polygon=self.metric_cls.is_output_polygon)
total_frame += batch['img'].size()[0]
total_time += time.time() - start
raw_metric = self.metric_cls.validate_measure(batch, (boxes, scores), box_thresh=self.box_thresh)
raw_metrics.append(raw_metric)
metrics = self.metric_cls.gather_measure(raw_metrics)
print('FPS:{}'.format(total_frame / total_time))
return metrics['recall'].avg, metrics['precision'].avg, metrics['hmean'].avg
def run_experiment(experiment_config, model_path):
"""Run experiment."""
gpu_id = experiment_config.evaluate.gpu_id
torch.cuda.set_device(gpu_id)
if experiment_config.evaluate.results_dir is not None:
results_dir = experiment_config.evaluate.results_dir
else:
results_dir = os.path.join(experiment_config.results_dir, "evaluate")
experiment_config.evaluate.results_dir = results_dir
os.makedirs(results_dir, exist_ok=True)
experiment_config = OmegaConf.to_container(experiment_config)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting OCDNet evaluation"
)
evaluation = Evaluate(model_path, experiment_config)
result = evaluation.eval()
print("Precision: ", result[1])
print("Recall: ", result[0])
print("Hmean: ", result[2])
status_logging_dict = {}
status_logging_dict["Recall"] = str(result[0])
status_logging_dict["Precision"] = str(result[1])
status_logging_dict["Hmean"] = str(result[2])
status_logging.get_status_logger().kpi = status_logging_dict
pyc_ctx.pop()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="evaluate", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the evaluation process."""
# Obfuscate logs.
obfuscate_logs(cfg)
pyc_ctx.push()
try:
run_experiment(experiment_config=cfg,
model_path=cfg.evaluate.checkpoint)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to ocdnet."""
import os
import argparse
import subprocess # nosec B404
import sys
import nvidia_tao_pytorch.cv.ocdnet.scripts as scripts
from nvidia_tao_pytorch.core.entrypoint import get_subtasks
def launch(parser, subtasks):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
parser.add_argument("--gpus", "-g", type=int, default=1, help="Number of GPUs")
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
if args.subtask in ["train"]:
if args.results_dir:
script_args += " train.results_dir=" + args.results_dir
if args.gpus > 1:
if args.subtask != "train":
raise ValueError("Only train task support multi-gpu")
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout) # nosec B602
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
exit(1)
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"ocdnet", add_help=True, description="TAO Toolkit OCD"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks)
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/entrypoint/ocdnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the ocdnet task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""model init."""
import copy
from torch import nn
# pylint: disable=W0401,W0611,W0614
# flake8: noqa: F401, F403
from nvidia_tao_pytorch.cv.ocdnet.model.head.conv_head import ConvHead
from nvidia_tao_pytorch.cv.ocdnet.model.head.db_head import DBHead
from nvidia_tao_pytorch.cv.ocdnet.model.losses.DB_loss import DBLoss
from nvidia_tao_pytorch.cv.ocdnet.model.neck.FPN import FPN
from nvidia_tao_pytorch.cv.ocdnet.model.backbone.resnet import *
from nvidia_tao_pytorch.cv.ocdnet.model.backbone.resnest import *
from nvidia_tao_pytorch.cv.ocdnet.model.backbone.shufflenetv2 import *
from nvidia_tao_pytorch.cv.ocdnet.model.backbone.mobilenet_v3 import MobileNetV3
__all__ = ['build_head', 'build_loss', 'build_neck', 'build_backbone']
support_head = ['ConvHead', 'DBHead']
support_loss = ['DBLoss']
support_neck = ['FPN']
support_backbone = ['resnet18', 'deformable_resnet18', 'deformable_resnet50',
'resnet50', 'resnet34', 'resnet101', 'resnet152',
'resnest50', 'resnest101', 'resnest200', 'resnest269',
'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0',
'MobileNetV3']
def build_head(head_name, **kwargs):
"""Build head."""
assert head_name in support_head, f'all support head is {support_head}'
head = globals()[head_name](**kwargs)
return head
def build_loss(config):
"""Build loss."""
copy_config = copy.deepcopy(config)
loss_type = copy_config.pop('type')
assert loss_type in support_loss, f'all support loss is {support_loss}'
criterion = globals()[loss_type](**copy_config)
return criterion
def build_neck(neck_name, **kwargs):
"""Build neck."""
assert neck_name in support_neck, f'all support neck is {support_neck}'
neck = globals()[neck_name](**kwargs)
return neck
def build_backbone(backbone_name, **kwargs):
"""Build backbone."""
assert backbone_name in support_backbone, f'all support backbone is {support_backbone}'
backbone = globals()[backbone_name](**kwargs)
return backbone
class Model(nn.Module):
"""Model class."""
def __init__(self, model_config: dict):
"""Construct Model."""
super().__init__()
backbone_type = model_config["backbone"]
neck_type = model_config['neck']
head_type = model_config['head']
dict_backbone = {"pretrained": model_config['pretrained'], "in_channels": model_config['in_channels']}
dict_neck = {"inner_channels": model_config['inner_channels']}
dict_head = {"out_channels": model_config['out_channels'], "k": model_config['k']}
self.backbone = build_backbone(backbone_type, **dict_backbone)
self.neck = build_neck(neck_type, in_channels=self.backbone.out_channels, **dict_neck)
self.head = build_head(head_type, in_channels=self.neck.out_channels, **dict_head)
self.name = f'{backbone_type}_{neck_type}_{head_type}'
def forward(self, x):
"""Forward."""
backbone_out = self.backbone(x)
neck_out = self.neck(backbone_out)
y = self.head(neck_out)
return y
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Basic module."""
from torch import nn
class ConvBnRelu(nn.Module):
"""ConvBnRelu class."""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', inplace=True):
"""Initialize."""
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias, padding_mode=padding_mode)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=inplace)
def forward(self, x):
"""Forward."""
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/basic.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The top model builder interface."""
from nvidia_tao_pytorch.cv.ocdnet.utils.util import load_checkpoint
from nvidia_tao_pytorch.cv.ocdnet.model.model import Model
def build_ocd_model(experiment_config,
export=False):
"""Build ocdnet model according to config
Args:
experiment_config (dict): Configuration File.
export (bool): Whether to build the model that can be exported to ONNX format. Defaults to False.
"""
model_config = experiment_config["model"]
load_pruned_graph = model_config['load_pruned_graph']
if load_pruned_graph:
assert model_config['pruned_graph_path'], (
"The load_pruned_graph is set to True. But the pruned_graph_path is not available. "
"Please set the pruned_graph_path in the spec file."
"If you are resuming training, please set resume_training_checkpoint_path as well.")
pruned_graph_path = model_config['pruned_graph_path']
model = load_checkpoint(pruned_graph_path)
else:
model_config['pruned_graph_path'] = None
model = Model(model_config)
# Load pretrained weights or resume model
if experiment_config['train']['resume_training_checkpoint_path']:
assert (experiment_config['train']['resume_training_checkpoint_path']).endswith(".pth"), (
"Will resume training. Please set the file path in 'resume_training_checkpoint_path' for resuming training."
" If not resume training, please set resume_training_checkpoint_path:None")
finetune = False
elif model_config['pretrained_model_path']:
model_path = model_config['pretrained_model_path']
print(f'loading pretrained model from {model_path}')
finetune = True
else:
finetune = False
if finetune:
ckpt = load_checkpoint(model_path)
if not isinstance(ckpt, Model):
ckpt["state_dict"] = {key.replace("model.", ""): value for key, value in ckpt["state_dict"].items()}
state_dict = ckpt["state_dict"]
model.load_state_dict(state_dict, strict=False)
else:
state_dict = ckpt.state_dict()
model.load_state_dict(state_dict, strict=False)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main PTL model file for OCDnet."""
import torch
import os
import shutil
import pytorch_lightning as pl
from nvidia_tao_pytorch.cv.deformable_detr.utils.misc import all_gather
from nvidia_tao_pytorch.cv.ocdnet.model.build_nn_model import build_ocd_model
from nvidia_tao_pytorch.cv.ocdnet.data_loader.build_dataloader import get_dataloader
from nvidia_tao_pytorch.cv.ocdnet.lr_schedulers.schedulers import WarmupPolyLR
from nvidia_tao_pytorch.cv.ocdnet.model.model import build_loss
from nvidia_tao_pytorch.cv.ocdnet.post_processing.seg_detector_representer import get_post_processing
from nvidia_tao_pytorch.cv.ocdnet.utils.ocr_metric.icdar2015.quad_metric import get_metric
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.ocdnet.utils.util import create_logger
# pylint:disable=too-many-ancestors
class OCDnetModel(pl.LightningModule):
"""PTL module for OCDnet model."""
def __init__(self, experiment_spec, export=False):
"""Init training for OCDnet model.
Args:
experiment_spec (dict): The experiment specification.
export (bool, optional): Whether to build the model that can be exported to ONNX format. Defaults to False
"""
super().__init__()
self.experiment_config = experiment_spec
self.train_dataset_config = experiment_spec["dataset"]["train_dataset"]
self.validate_dataset_config = experiment_spec["dataset"]["validate_dataset"]
self.model_config = experiment_spec["model"]
self.train_config = experiment_spec["train"]
self.epochs = self.train_config["num_epochs"]
self.post_process = get_post_processing(self.train_config['post_processing'])
self.box_thresh = self.train_config['post_processing']["args"]["box_thresh"]
self.checkpoint_dir = self.experiment_config["train"]["results_dir"]
self.metrics = {'recall': 0, 'precision': 0, 'hmean': 0, 'train_loss': float('inf'), 'best_model_epoch': 0}
self.train_loss = 0.0
# init the model
self._build_model(experiment_spec, export)
self.name = self.model.name
if torch.cuda.device_count() > 1:
self.experiment_config['distributed'] = True
else:
self.experiment_config['distributed'] = False
self.train_loader = get_dataloader(self.experiment_config["dataset"]['train_dataset'], self.experiment_config['distributed'])
assert self.train_loader is not None, "Train loader does not exist."
if 'validate_dataset' in self.experiment_config["dataset"]:
self.validate_loader = get_dataloader(self.experiment_config["dataset"]['validate_dataset'], False)
else:
self.validate_loader = None
self.train_loader_len = len(self.train_loader)
self.console_logger = create_logger()
self.status_logging_dict = {}
def _build_model(self, experiment_spec, export):
"""Internal function to build the model.
This method constructs a model using the specified experiment specification and export flag. It returns the model.
Args:
experiment_spec (dict): The experiment specification.
export (bool): Whether to build the model that can be exported to ONNX format.
"""
self.model = build_ocd_model(experiment_config=experiment_spec,
export=export)
def forward(self, x):
"""Forward of the ocdnet model."""
output = self.model(x)
return output
def training_step(self, batch, batch_idx):
"""Training step.
Args:
batch (Tensor): Batch of data.
batch_idx (int): Index of batch.
Returns:
loss (float): Loss value for each step in training.
"""
self.train_loss = 0.
preds = self.model(batch['img'])
self.criterion = build_loss(self.experiment_config['train']['loss']).cuda()
loss_dict = self.criterion(preds, batch)
loss = loss_dict['loss']
self.train_loss += loss
return loss
def train_dataloader(self):
"""Build the dataloader for training.
Returns:
train_loader (Dataloader): Traininig Data.
"""
return self.train_loader
def val_dataloader(self):
"""Build the dataloader for validation.
Returns:
val_loader (Dataloader): Validation Data.
"""
return self.validate_loader
def configure_optimizers(self):
"""Configure optimizers for training"""
optim_dict = {}
self.warmup_epochs = self.experiment_config['train']['lr_scheduler']['args']['warmup_epoch']
self.warmup_iters = self.warmup_epochs * self.train_loader_len
self.optimizer = self._initialize('optimizer', torch.optim, self.model.parameters())
self.scheduler = WarmupPolyLR(self.optimizer, max_iters=self.epochs * self.train_loader_len,
warmup_iters=self.warmup_iters, warmup_epochs=self.warmup_epochs, epochs=self.epochs,
**self.experiment_config['train']['lr_scheduler']['args'])
optim_dict["optimizer"] = self.optimizer
optim_dict["lr_scheduler"] = self.scheduler
return optim_dict
def on_train_epoch_start(self):
"""Perform on start of every epoch."""
print('\n')
def on_validation_epoch_start(self):
"""Perform on validation."""
self.raw_metrics = []
def validation_step(self, batch, batch_idx):
"""Validation step."""
preds = self.model(batch['img'])
self.metric_cls = get_metric(self.experiment_config['train']['metric'])
boxes, scores = self.post_process(batch, preds, is_output_polygon=self.metric_cls.is_output_polygon)
raw_metric = self.metric_cls.validate_measure(batch, (boxes, scores), box_thresh=self.box_thresh)
return raw_metric
def validation_epoch_end(self, raw_metric):
"""Validation step end."""
print('\n')
self.raw_metrics = []
for p in all_gather(raw_metric):
self.raw_metrics.extend(p)
metrics = self.metric_cls.gather_measure(self.raw_metrics)
self.log("recall", metrics['recall'].avg, on_step=False, on_epoch=True, prog_bar=True, rank_zero_only=True)
self.log("precision", metrics['precision'].avg, on_step=False, on_epoch=True, prog_bar=True, rank_zero_only=True)
self.log("hmean", metrics['hmean'].avg, on_step=False, on_epoch=True, prog_bar=True, rank_zero_only=True)
try:
os.makedirs(self.checkpoint_dir)
except OSError:
pass
net_save_path = '{}/model_latest.pth'.format(self.checkpoint_dir)
net_save_path_best = '{}/model_best.pth'.format(self.checkpoint_dir)
self._save_checkpoint(self.current_epoch, net_save_path)
save_best = False
if self.validate_loader is not None and self.metric_cls is not None:
recall = metrics['recall'].avg
precision = metrics['precision'].avg
hmean = metrics['hmean'].avg
if hmean >= self.metrics['hmean']:
save_best = True
self.metrics['train_loss'] = self.train_loss / self.train_loader_len
self.metrics['hmean'] = hmean
self.metrics['precision'] = precision
self.metrics['recall'] = recall
self.metrics['best_model_epoch'] = self.current_epoch
else:
if (self.train_loss / self.train_loader_len) <= self.metrics['train_loss']:
save_best = True
self.metrics['train_loss'] = self.train_loss / self.train_loader_len
self.metrics['best_model_epoch'] = self.current_epoch
best_str = 'current best, '
for k, v in self.metrics.items():
best_str += '{}: {:.6f}, '.format(k, v)
self.print(best_str)
if save_best:
shutil.copy(net_save_path, net_save_path_best)
self.print("Saving current best: {}".format(net_save_path_best))
else:
self.print("Saving checkpoint: {}".format(net_save_path))
if self.trainer.is_global_zero:
self.console_logger.info('**********************Start logging Evaluation Results **********************')
self.console_logger.info('current_epoch : {}'.format(self.current_epoch))
self.console_logger.info('lr : {:.9f}'.format(*self.scheduler.get_lr()))
self.console_logger.info('recall : {:2.5f}'.format(recall))
self.console_logger.info('precision : {:2.5f}'.format(precision))
self.console_logger.info('hmean : {:2.5f}'.format(hmean))
self.status_logging_dict["recall"] = str(recall)
self.status_logging_dict["precision"] = str(precision)
self.status_logging_dict["hmean"] = str(hmean)
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Evaluation metrics generated.",
status_level=status_logging.Status.RUNNING
)
def _initialize(self, name, module, *args, **kwargs):
module_name = self.experiment_config['train'][name]['type']
module_args = self.experiment_config['train'][name]['args']
assert all([k not in module_args for k in kwargs]), 'Overwriting kwargs given in config file is not allowed'
module_args.update(kwargs)
return getattr(module, module_name)(*args, **module_args)
def _save_checkpoint(self, epoch, file_name):
"""Saving checkpoints
Args:
epoch: Current epoch number
log: The logging information of the epoch
save_best: If True, rename the saved checkpoint with 'model_best' prefix
"""
state_dict = self.model.state_dict()
state = {
'epoch': epoch,
'global_step': self.global_step,
'state_dict': state_dict,
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'config': self.experiment_config,
'metrics': self.metrics
}
torch.save(state, file_name)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/pl_ocd_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Basic loss module."""
import torch
import torch.nn as nn
class BalanceCrossEntropyLoss(nn.Module):
"""Balanced cross entropy loss.
This loss measures the Binary Cross Entropy between the target and the input probabilities.
"""
def __init__(self, negative_ratio=3.0, eps=1e-6):
"""Initialize."""
super(BalanceCrossEntropyLoss, self).__init__()
self.negative_ratio = negative_ratio
self.eps = eps
def forward(self,
pred: torch.Tensor,
gt: torch.Tensor,
mask: torch.Tensor,
return_origin=False):
"""Forward.
Args:
pred: Prediction of network. The shape is (N, 1, H, W).
gt: Groudtruth. The shape is (N, 1, H, W).
mask: The mask indicates positive regions. The shape is (N, H, W).
"""
positive = (gt * mask).byte()
negative = ((1 - gt) * mask).byte()
positive_count = int(positive.float().sum())
negative_count = min(int(negative.float().sum()), int(positive_count * self.negative_ratio))
loss = nn.functional.binary_cross_entropy(pred, gt, reduction='none')
positive_loss = loss * positive.float()
negative_loss = loss * negative.float()
negative_loss, _ = negative_loss.view(-1).topk(negative_count)
balance_loss = (positive_loss.sum() + negative_loss.sum()) / (positive_count + negative_count + self.eps)
if return_origin:
return balance_loss, loss
return balance_loss
class DiceLoss(nn.Module):
"""DiceLoss.
This Loss function is from https://arxiv.org/abs/1707.03237.
It is used to calculate the similarity between two samples.
"""
def __init__(self, eps=1e-6):
"""Initialize."""
super(DiceLoss, self).__init__()
self.eps = eps
def forward(self, pred: torch.Tensor, gt, mask, weights=None):
"""Forward.
Args:
pred: One or two heatmaps of shape (N, 1, H, W),
the losses of tow heatmaps are added together.
gt: (N, 1, H, W)
mask: (N, H, W)
"""
return self._compute(pred, gt, mask, weights)
def _compute(self, pred, gt, mask, weights):
if pred.dim() == 4:
pred = pred[:, 0, :, :]
gt = gt[:, 0, :, :]
assert pred.shape == gt.shape
assert pred.shape == mask.shape
if weights is not None:
assert weights.shape == mask.shape
mask = weights * mask
intersection = (pred * gt * mask).sum()
union = (pred * mask).sum() + (gt * mask).sum() + self.eps
loss = 1 - 2.0 * intersection / union
assert loss <= 1
return loss
class MaskL1Loss(nn.Module):
"""Mask L1 Loss."""
def __init__(self, eps=1e-6):
"""Initialize."""
super(MaskL1Loss, self).__init__()
self.eps = eps
def forward(self, pred: torch.Tensor, gt, mask):
"""Forward."""
loss = (torch.abs(pred - gt) * mask).sum() / (mask.sum() + self.eps)
return loss
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/losses/basic_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/losses/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""DB loss module."""
from torch import nn
from nvidia_tao_pytorch.cv.ocdnet.model.losses.basic_loss import BalanceCrossEntropyLoss, MaskL1Loss, DiceLoss
class DBLoss(nn.Module):
"""Differentiable Binarization (DB) Loss."""
def __init__(self, alpha=1.0, beta=10, ohem_ratio=3, reduction='mean', eps=1e-6):
"""Implement loss based on PSE(Progressive Scale Expansion) network.
The total loss contains three parts:
- The loss of approximate binary map. It is calculated by Dice Loss.
- The loss of probability(shrink) map. It is calculated by Balance Cross Entropy Loss.
- The loss of threshold map. It is calculated by MaskL1 Loss.
Args:
alpha: coefficient for shrink_map loss
beta: coefficient for threshold_map loss
ohem_ratio: the ratio of negative samples to positive samples
reduction: calculate loss via 'mean' or 'sum'
"""
super().__init__()
assert reduction in ['mean', 'sum'], " reduction must in ['mean','sum']"
self.alpha = alpha
self.beta = beta
self.bce_loss = BalanceCrossEntropyLoss(negative_ratio=ohem_ratio)
self.dice_loss = DiceLoss(eps=eps)
self.l1_loss = MaskL1Loss(eps=eps)
self.ohem_ratio = ohem_ratio
self.reduction = reduction
def forward(self, pred, batch):
"""Forward."""
shrink_maps = pred[:, 0, :, :]
threshold_maps = pred[:, 1, :, :]
binary_maps = pred[:, 2, :, :]
loss_shrink_maps = self.bce_loss(shrink_maps, batch['shrink_map'], batch['shrink_mask'])
loss_threshold_maps = self.l1_loss(threshold_maps, batch['threshold_map'], batch['threshold_mask'])
metrics = dict(loss_shrink_maps=loss_shrink_maps, loss_threshold_maps=loss_threshold_maps)
if pred.size()[1] > 2:
loss_binary_maps = self.dice_loss(binary_maps, batch['shrink_map'], batch['shrink_mask'])
metrics['loss_binary_maps'] = loss_binary_maps
loss_all = self.alpha * loss_shrink_maps + self.beta * loss_threshold_maps + loss_binary_maps
metrics['loss'] = loss_all
else:
metrics['loss'] = loss_shrink_maps
return metrics
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/losses/DB_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/head/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""ConvHead module."""
from torch import nn
class ConvHead(nn.Module):
"""ConvHead class."""
def __init__(self, in_channels, out_channels, **kwargs):
"""Initialize."""
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x):
"""Forward."""
return self.conv(x)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/head/conv_head.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""DBHead module."""
import torch
from torch import nn
class DBHead(nn.Module):
"""DBHead class."""
def __init__(self, in_channels, out_channels, k=50):
"""Initialize."""
super().__init__()
self.k = k
self.binarize = nn.Sequential(
nn.Conv2d(in_channels, in_channels // 4, 3, padding=1, bias=False),
nn.BatchNorm2d(in_channels // 4),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 2, 2),
nn.BatchNorm2d(in_channels // 4),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels // 4, 1, 2, 2),
nn.Sigmoid())
self.binarize.apply(self.weights_init)
self.thresh = self._init_thresh(in_channels)
self.thresh.apply(self.weights_init)
def forward(self, x):
"""Forward."""
# Compute the probability(shrink) map
shrink_maps = self.binarize(x)
# If not during training, return shrink map directly
if not self.training:
return shrink_maps
# Compute threshold map
threshold_maps = self.thresh(x)
# Compute approximate binary map
binary_maps = self.step_function(shrink_maps, threshold_maps)
y = torch.cat((shrink_maps, threshold_maps, binary_maps), dim=1)
return y
def weights_init(self, m):
"""Weights init."""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
def _init_thresh(self, inner_channels, serial=False, smooth=False, bias=False):
in_channels = inner_channels
if serial:
in_channels += 1
self.thresh = nn.Sequential(
nn.Conv2d(in_channels, inner_channels // 4, 3, padding=1, bias=bias),
nn.BatchNorm2d(inner_channels // 4),
nn.ReLU(inplace=True),
self._init_upsample(inner_channels // 4, inner_channels // 4, smooth=smooth, bias=bias),
nn.BatchNorm2d(inner_channels // 4),
nn.ReLU(inplace=True),
self._init_upsample(inner_channels // 4, 1, smooth=smooth, bias=bias),
nn.Sigmoid())
return self.thresh
def _init_upsample(self, in_channels, out_channels, smooth=False, bias=False):
if smooth:
inter_out_channels = out_channels
if out_channels == 1:
inter_out_channels = in_channels
module_list = [
nn.Upsample(scale_factor=2, mode='nearest'),
nn.Conv2d(in_channels, inter_out_channels, 3, 1, 1, bias=bias)]
if out_channels == 1:
module_list.append(nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=1, bias=True))
return nn.Sequential(module_list)
return nn.ConvTranspose2d(in_channels, out_channels, 2, 2)
def step_function(self, x, y):
"""Differentiable binarization function."""
return torch.reciprocal(1 + torch.exp(-self.k * (x - y)))
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/head/db_head.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""FPN module."""
import torch
from torch import nn
class FPN(nn.Module):
"""FPN class."""
def __init__(self, in_channels, inner_channels=256, **kwargs):
"""Initialize Feature Pyramid Network.
https://arxiv.org/abs/1612.03144
Args:
in_channels: The number of input channels.
inner_channels: The number of inner channels.
"""
super().__init__()
self.in5 = nn.Conv2d(in_channels[-1], inner_channels, 1, bias=False)
self.in4 = nn.Conv2d(in_channels[-2], inner_channels, 1, bias=False)
self.in3 = nn.Conv2d(in_channels[-3], inner_channels, 1, bias=False)
self.in2 = nn.Conv2d(in_channels[-4], inner_channels, 1, bias=False)
self.up5 = nn.Upsample(scale_factor=2, mode='nearest')
self.up4 = nn.Upsample(scale_factor=2, mode='nearest')
self.up3 = nn.Upsample(scale_factor=2, mode='nearest')
self.out5 = nn.Sequential(
nn.Conv2d(inner_channels, inner_channels //
4, 3, padding=1, bias=False),
nn.Upsample(scale_factor=8, mode='nearest'))
self.out4 = nn.Sequential(
nn.Conv2d(inner_channels, inner_channels //
4, 3, padding=1, bias=False),
nn.Upsample(scale_factor=4, mode='nearest'))
self.out3 = nn.Sequential(
nn.Conv2d(inner_channels, inner_channels //
4, 3, padding=1, bias=False),
nn.Upsample(scale_factor=2, mode='nearest'))
self.out2 = nn.Conv2d(
inner_channels, inner_channels // 4, 3, padding=1, bias=False)
self.in5.apply(self.weights_init)
self.in4.apply(self.weights_init)
self.in3.apply(self.weights_init)
self.in2.apply(self.weights_init)
self.out5.apply(self.weights_init)
self.out4.apply(self.weights_init)
self.out3.apply(self.weights_init)
self.out2.apply(self.weights_init)
self.out_channels = inner_channels
def weights_init(self, m):
"""Weights init."""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
def forward(self, x):
"""Forward."""
c2, c3, c4, c5 = x
in5 = self.in5(c5)
in4 = self.in4(c4)
in3 = self.in3(c3)
in2 = self.in2(c2)
out4 = self.up5(in5) + in4 # 1/16
out3 = self.up4(out4) + in3 # 1/8
out2 = self.up3(out3) + in2 # 1/4
p5 = self.out5(in5)
p4 = self.out4(out4)
p3 = self.out3(out3)
p2 = self.out2(out2)
x = torch.cat((p5, p4, p3, p2), 1)
return x
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/neck/FPN.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/neck/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Shufflenetv2 module."""
import torch
import torch.nn as nn
__all__ = [
'ShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0',
'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0'
]
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups,
channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride):
super(InvertedResidual, self).__init__()
if not (1 <= stride <= 3):
raise ValueError('illegal stride value')
self.stride = stride
branch_features = oup // 2
assert (self.stride != 1) or (inp == branch_features << 1)
if self.stride > 1:
self.branch1 = nn.Sequential(
self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
self.branch2 = nn.Sequential(
nn.Conv2d(inp if (self.stride > 1) else branch_features,
branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),
nn.BatchNorm2d(branch_features),
nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(branch_features),
nn.ReLU(inplace=True),
)
@staticmethod
def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
def forward(self, x):
if self.stride == 1:
x1, x2 = x.chunk(2, dim=1)
out = torch.cat((x1, self.branch2(x2)), dim=1)
else:
out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
out = channel_shuffle(out, 2)
return out
class ShuffleNetV2(nn.Module):
"""ShuffleNetV2 class."""
def __init__(self, stages_repeats, stages_out_channels, in_channels=3, **kwargs):
"""Initialize."""
super(ShuffleNetV2, self).__init__()
self.out_channels = []
if len(stages_repeats) != 3:
raise ValueError('expected stages_repeats as list of 3 positive ints')
if len(stages_out_channels) != 5:
raise ValueError('expected stages_out_channels as list of 5 positive ints')
self._stage_out_channels = stages_out_channels
output_channels = self._stage_out_channels[0]
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, output_channels, 3, 2, 1, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
input_channels = output_channels
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.out_channels.append(input_channels)
stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
for name, repeats, output_channels in zip(
stage_names, stages_repeats, self._stage_out_channels[1:]):
seq = [InvertedResidual(input_channels, output_channels, 2)]
for _ in range(repeats - 1):
seq.append(InvertedResidual(output_channels, output_channels, 1))
setattr(self, name, nn.Sequential(*seq))
input_channels = output_channels
self.out_channels.append(input_channels)
output_channels = self._stage_out_channels[-1]
self.conv5 = nn.Sequential(
nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
nn.BatchNorm2d(output_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
"""Forward."""
x = self.conv1(x)
c2 = self.maxpool(x)
c3 = self.stage2(c2)
c4 = self.stage3(c3)
c5 = self.stage4(c4)
# c5 = self.conv5(c5)
return c2, c3, c4, c5
def _shufflenetv2(arch, pretrained, progress, *args, **kwargs):
model = ShuffleNetV2(*args, **kwargs)
return model
def shufflenet_v2_x0_5(pretrained=False, progress=True, **kwargs):
"""Constructs a ShuffleNetV2 with 0.5x output channels
As described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
<https://arxiv.org/abs/1807.11164>`.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _shufflenetv2('shufflenetv2_x0.5', pretrained, progress,
[4, 8, 4], [24, 48, 96, 192, 1024], **kwargs)
def shufflenet_v2_x1_0(pretrained=False, progress=True, **kwargs):
"""Constructs a ShuffleNetV2 with 1.0x output channels.
As described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
<https://arxiv.org/abs/1807.11164>`.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _shufflenetv2('shufflenetv2_x1.0', pretrained, progress,
[4, 8, 4], [24, 116, 232, 464, 1024], **kwargs)
def shufflenet_v2_x1_5(pretrained=False, progress=True, **kwargs):
"""Constructs a ShuffleNetV2 with 1.5x output channels.
As described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
<https://arxiv.org/abs/1807.11164>`.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _shufflenetv2('shufflenetv2_x1.5', pretrained, progress,
[4, 8, 4], [24, 176, 352, 704, 1024], **kwargs)
def shufflenet_v2_x2_0(pretrained=False, progress=True, **kwargs):
"""Constructs a ShuffleNetV2 with 2.0x output channels.
As described in `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"
<https://arxiv.org/abs/1807.11164>`.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _shufflenetv2('shufflenetv2_x2.0', pretrained, progress,
[4, 8, 4], [24, 244, 488, 976, 2048], **kwargs)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/shufflenetv2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Mobilenet_v3 module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from torch import nn
import torch.nn.functional as F
class HSwish(nn.Module):
"""HSwish class."""
def forward(self, x):
"""Forward."""
out = x * F.relu6(x + 3, inplace=True) / 6
return out
class HardSigmoid(nn.Module):
"""HardSigmoid class."""
def __init__(self, slope=.2, offset=.5):
"""Initialize."""
super().__init__()
self.slope = slope
self.offset = offset
def forward(self, x):
"""Forward."""
x = (self.slope * x) + self.offset
x = F.threshold(-x, -1, -1)
x = F.threshold(-x, 0, 0)
return x
class ConvBNACT(nn.Module):
"""ConvBN activation class"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, act=None):
"""Initialize."""
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, groups=groups,
bias=False)
self.bn = nn.BatchNorm2d(out_channels)
if act == 'relu':
self.act = nn.ReLU()
elif act == 'hard_swish':
self.act = HSwish()
elif act is None:
self.act = None
def forward(self, x):
"""Forward."""
x = self.conv(x)
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class SEBlock(nn.Module):
"""SEBlock class."""
def __init__(self, in_channels, out_channels, ratio=4):
"""Initialize."""
super().__init__()
num_mid_filter = out_channels // ratio
self.pool = nn.AdaptiveAvgPool2d(1)
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=num_mid_filter, kernel_size=1, bias=True)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=num_mid_filter, kernel_size=1, out_channels=out_channels, bias=True)
self.relu2 = HardSigmoid()
def forward(self, x):
"""Forward."""
attn = self.pool(x)
attn = self.conv1(attn)
attn = self.relu1(attn)
attn = self.conv2(attn)
attn = self.relu2(attn)
return x * attn
class ResidualUnit(nn.Module):
"""Construct Residual unit."""
def __init__(self, num_in_filter, num_mid_filter, num_out_filter, stride, kernel_size, act=None, use_se=False):
"""Initialize."""
super().__init__()
self.conv0 = ConvBNACT(in_channels=num_in_filter, out_channels=num_mid_filter, kernel_size=1, stride=1,
padding=0, act=act)
self.conv1 = ConvBNACT(in_channels=num_mid_filter, out_channels=num_mid_filter, kernel_size=kernel_size,
stride=stride,
padding=int((kernel_size - 1) // 2), act=act, groups=num_mid_filter)
if use_se:
self.se = SEBlock(in_channels=num_mid_filter, out_channels=num_mid_filter)
else:
self.se = None
self.conv2 = ConvBNACT(in_channels=num_mid_filter, out_channels=num_out_filter, kernel_size=1, stride=1,
padding=0)
self.not_add = num_in_filter != num_out_filter or stride != 1
def forward(self, x):
"""Forward."""
y = self.conv0(x)
y = self.conv1(y)
if self.se is not None:
y = self.se(y)
y = self.conv2(y)
if not self.not_add:
y = x + y
return y
class MobileNetV3(nn.Module):
"""MobileNetV3 class."""
def __init__(self, in_channels=3, **kwargs):
"""The Mobilenet_v3 backbone network for detection module.
Args:
params(dict): The super parameters for build network.
"""
super().__init__()
self.scale = kwargs.get('scale', 0.5)
model_name = kwargs.get('model_name', 'large')
self.inplanes = 16
if model_name == "large":
self.cfg = [
# k, exp, c, se, nl, s,
[3, 16, 16, False, 'relu', 1],
[3, 64, 24, False, 'relu', 2],
[3, 72, 24, False, 'relu', 1],
[5, 72, 40, True, 'relu', 2],
[5, 120, 40, True, 'relu', 1],
[5, 120, 40, True, 'relu', 1],
[3, 240, 80, False, 'hard_swish', 2],
[3, 200, 80, False, 'hard_swish', 1],
[3, 184, 80, False, 'hard_swish', 1],
[3, 184, 80, False, 'hard_swish', 1],
[3, 480, 112, True, 'hard_swish', 1],
[3, 672, 112, True, 'hard_swish', 1],
[5, 672, 160, True, 'hard_swish', 2],
[5, 960, 160, True, 'hard_swish', 1],
[5, 960, 160, True, 'hard_swish', 1],
]
self.cls_ch_squeeze = 960
self.cls_ch_expand = 1280
elif model_name == "small":
self.cfg = [
# k, exp, c, se, nl, s,
[3, 16, 16, True, 'relu', 2],
[3, 72, 24, False, 'relu', 2],
[3, 88, 24, False, 'relu', 1],
[5, 96, 40, True, 'hard_swish', 2],
[5, 240, 40, True, 'hard_swish', 1],
[5, 240, 40, True, 'hard_swish', 1],
[5, 120, 48, True, 'hard_swish', 1],
[5, 144, 48, True, 'hard_swish', 1],
[5, 288, 96, True, 'hard_swish', 2],
[5, 576, 96, True, 'hard_swish', 1],
[5, 576, 96, True, 'hard_swish', 1],
]
self.cls_ch_squeeze = 576
self.cls_ch_expand = 1280
else:
raise NotImplementedError("mode[" + model_name +
"_model] is not implemented!")
supported_scale = [0.35, 0.5, 0.75, 1.0, 1.25]
assert self.scale in supported_scale, \
"Supported scale are {} but input scale is {}".format(supported_scale, self.scale)
scale = self.scale
inplanes = self.inplanes
cfg = self.cfg
cls_ch_squeeze = self.cls_ch_squeeze
# conv1
self.conv1 = ConvBNACT(in_channels=in_channels,
out_channels=self.make_divisible(inplanes * scale),
kernel_size=3,
stride=2,
padding=1,
groups=1,
act='hard_swish')
i = 0
inplanes = self.make_divisible(inplanes * scale)
self.stages = nn.ModuleList()
block_list = []
self.out_channels = []
for layer_cfg in cfg:
if layer_cfg[5] == 2 and i > 2:
self.out_channels.append(inplanes)
self.stages.append(nn.Sequential(*block_list))
block_list = []
block = ResidualUnit(num_in_filter=inplanes,
num_mid_filter=self.make_divisible(scale * layer_cfg[1]),
num_out_filter=self.make_divisible(scale * layer_cfg[2]),
act=layer_cfg[4],
stride=layer_cfg[5],
kernel_size=layer_cfg[0],
use_se=layer_cfg[3])
block_list.append(block)
inplanes = self.make_divisible(scale * layer_cfg[2])
i += 1
self.stages.append(nn.Sequential(*block_list))
self.conv2 = ConvBNACT(
in_channels=inplanes,
out_channels=self.make_divisible(scale * cls_ch_squeeze),
kernel_size=1,
stride=1,
padding=0,
groups=1,
act='hard_swish')
self.out_channels.append(self.make_divisible(scale * cls_ch_squeeze))
def make_divisible(self, v, divisor=8, min_value=None):
"""Calculate the output dimension for each layer."""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
def forward(self, x):
"""Forward."""
x = self.conv1(x)
out = []
for stage in self.stages:
x = stage(x)
out.append(x)
out[-1] = self.conv2(out[-1])
return out
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/mobilenet_v3.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Resnet Module."""
import torch
import torch.nn as nn
import math
BatchNorm2d = nn.BatchNorm2d
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'deformable_resnet18', 'deformable_resnet50',
'resnet152']
def constant_init(module, constant, bias=0):
nn.init.constant_(module.weight, constant)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding."""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dcn=None):
super(BasicBlock, self).__init__()
self.with_dcn = dcn is not None
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.with_modulated_dcn = True
if not self.with_dcn:
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
else:
from torchvision.ops import DeformConv2d
deformable_groups = dcn.get('deformable_groups', 1)
if self.with_modulated_dcn:
offset_channels = 27
else:
offset_channels = 18
self.conv2_offset = nn.Conv2d(
planes,
deformable_groups * offset_channels,
kernel_size=3,
padding=1)
self.conv2 = DeformConv2d(
planes,
planes,
kernel_size=3,
padding=1,
bias=False)
self.bn2 = BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
# out = self.conv2(out)
if not self.with_dcn:
out = self.conv2(out)
elif self.with_modulated_dcn:
offset_mask = self.conv2_offset(out)
mask_0, mask_1 = torch.split(offset_mask, [18, 9], 1)
offset = mask_0
mask = mask_1.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dcn=None):
super(Bottleneck, self).__init__()
self.with_dcn = dcn is not None
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm2d(planes)
self.with_modulated_dcn = True
if not self.with_dcn:
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
else:
deformable_groups = dcn.get('deformable_groups', 1)
from torchvision.ops import DeformConv2d
if self.with_modulated_dcn:
offset_channels = 27
else:
offset_channels = 18
self.conv2_offset = nn.Conv2d(
planes, deformable_groups * offset_channels,
stride=stride, kernel_size=3, padding=1)
self.conv2 = DeformConv2d(
planes, planes, kernel_size=3, padding=1, stride=stride,
bias=False)
self.bn2 = BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dcn = dcn
self.with_dcn = dcn is not None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
# out = self.conv2(out)
if not self.with_dcn:
out = self.conv2(out)
elif self.with_modulated_dcn:
offset_mask = self.conv2_offset(out)
mask_0, mask_1 = torch.split(offset_mask, [18, 9], 1)
offset = mask_0
mask = mask_1.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet class."""
def __init__(self, block, layers, in_channels=3, dcn=None):
"""Initialize."""
self.dcn = dcn
self.inplanes = 64
super(ResNet, self).__init__()
self.out_channels = []
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dcn=dcn)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dcn=dcn)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dcn=dcn)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if self.dcn is not None:
for m in self.modules():
if isinstance(m, (BasicBlock, Bottleneck)):
if hasattr(m, 'conv2_offset'):
constant_init(m.conv2_offset, 0)
def _make_layer(self, block, planes, blocks, stride=1, dcn=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, dcn=dcn))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, dcn=dcn))
self.out_channels.append(planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
"""Forward."""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x2 = self.layer1(x)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
return x2, x3, x4, x5
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def deformable_resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], dcn=dict(deformable_groups=1), **kwargs)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def deformable_resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model with deformable conv.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], dcn=dict(deformable_groups=1), **kwargs)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): Defaults to False. Not use a model pre-trained on ImageNet.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/resnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Split-Attention Module"""
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Conv2d, Module, ReLU
from torch.nn.modules.utils import _pair
__all__ = ['SplitAttentionConv2D']
class SplitAttentionConv2D(Module):
"""Split-Attention Conv2d."""
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
dilation=(1, 1), groups=1, bias=True,
radix=2, reduction_factor=4,
rectify=False, rectify_avg=False, norm_layer=None,
dropblock_prob=0.0, **kwargs):
"""Initialize."""
super(SplitAttentionConv2D, self).__init__()
padding = _pair(padding)
self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
self.rectify_avg = rectify_avg
inter_channels = max(in_channels * radix // reduction_factor, 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
from rfconv import RFConv2d
self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation,
groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation,
groups=groups * radix, bias=bias, **kwargs)
self.use_bn = norm_layer is not None
if self.use_bn:
self.bn0 = norm_layer(channels * radix)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(inter_channels)
self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self.cardinality)
if dropblock_prob > 0.0:
self.dropblock = DropBlock2D(dropblock_prob, 3) # pylint: disable=E0602
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
"""Forward."""
x = self.conv(x)
if self.use_bn:
x = self.bn0(x)
if self.dropblock_prob > 0.0:
x = self.dropblock(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
if torch.__version__ < '1.5':
splited = torch.split(x, int(rchannel // self.radix), dim=1)
else:
splited = torch.split(x, rchannel // self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
if torch.__version__ < '1.5':
attens = torch.split(atten, int(rchannel // self.radix), dim=1)
else:
attens = torch.split(atten, rchannel // self.radix, dim=1)
out = sum([att * split for (att, split) in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError("Randomly dropping block is not implemented.")
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/resnest/splat.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""ResNeSt ablation study models"""
from .resnet import ResNet, Bottleneck
__all__ = ['resnest50_fast_1s1x64d', 'resnest50_fast_2s1x64d', 'resnest50_fast_4s1x64d',
'resnest50_fast_1s2x40d', 'resnest50_fast_2s2x40d', 'resnest50_fast_4s2x40d',
'resnest50_fast_1s4x24d']
def resnest50_fast_1s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest50_fast_1s1x64d model."""
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=1, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
return model
def resnest50_fast_2s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest50_fast_2s1x64d model."""
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
return model
def resnest50_fast_4s1x64d(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest50_fast_4s1x64d model."""
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=4, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
return model
def resnest50_fast_1s2x40d(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest50_fast_1s2x40d model."""
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=1, groups=2, bottleneck_width=40,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
return model
def resnest50_fast_2s2x40d(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest50_fast_2s2x40d model."""
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=2, groups=2, bottleneck_width=40,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
return model
def resnest50_fast_4s2x40d(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest50_fast_4s2x40d model."""
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=4, groups=2, bottleneck_width=40,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
return model
def resnest50_fast_1s4x24d(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest50_fast_1s4x24d model."""
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=1, groups=4, bottleneck_width=24,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=True, **kwargs)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/resnest/ablation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""ResNeSt models"""
from nvidia_tao_pytorch.cv.ocdnet.model.backbone.resnest.resnet import ResNet, Bottleneck
__all__ = ['resnest50', 'resnest101', 'resnest200', 'resnest269']
def resnest50(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest50 model."""
model = ResNet(Bottleneck, [3, 4, 6, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=32, avg_down=True,
avd=True, avd_first=False, **kwargs)
return model
def resnest101(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest101 model."""
model = ResNet(Bottleneck, [3, 4, 23, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
return model
def resnest200(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest200 model."""
model = ResNet(Bottleneck, [3, 24, 36, 3],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
return model
def resnest269(pretrained=False, root='~/.encoding/models', **kwargs):
"""Resnest269 model."""
model = ResNet(Bottleneck, [3, 30, 48, 8],
radix=2, groups=1, bottleneck_width=64,
deep_stem=True, stem_width=64, avg_down=True,
avd=True, avd_first=False, **kwargs)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/resnest/resnest.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Init."""
# flake8: noqa: F401, F403
from .resnest import *
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/resnest/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""ResNet variants"""
import math
import torch.nn as nn
from .splat import SplitAttentionConv2D
__all__ = ['ResNet', 'Bottleneck']
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError("Randomly dropping block is not implemented.")
class GlobalAvgPool2d(nn.Module):
def __init__(self): # pylint: disable=W0235
"""Global average pooling over the input's spatial dimensions."""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return nn.functional.adaptive_avg_pool2d(inputs, 1).view(inputs.size(0), -1)
class Bottleneck(nn.Module):
"""ResNet Bottleneck."""
# pylint: disable=unused-argument
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
radix=1, cardinality=1, bottleneck_width=64,
avd=False, avd_first=False, dilation=1, is_first=False,
rectified_conv=False, rectify_avg=False,
norm_layer=None, dropblock_prob=0.0, last_gamma=False):
"""Initialize."""
super(Bottleneck, self).__init__()
group_width = int(planes * (bottleneck_width / 64.)) * cardinality
self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False)
self.bn1 = norm_layer(group_width)
self.dropblock_prob = dropblock_prob
self.radix = radix
self.avd = avd and (stride > 1 or is_first)
self.avd_first = avd_first
if self.avd:
self.avd_layer = nn.AvgPool2d(3, stride, padding=1)
stride = 1
if dropblock_prob > 0.0:
self.dropblock1 = DropBlock2D(dropblock_prob, 3)
if radix == 1:
self.dropblock2 = DropBlock2D(dropblock_prob, 3)
self.dropblock3 = DropBlock2D(dropblock_prob, 3)
if radix >= 1:
self.conv2 = SplitAttentionConv2D(
group_width, group_width, kernel_size=3,
stride=stride, padding=dilation,
dilation=dilation, groups=cardinality, bias=False,
radix=radix, rectify=rectified_conv,
rectify_avg=rectify_avg,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif rectified_conv:
from rfconv import RFConv2d
self.conv2 = RFConv2d(
group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation,
groups=cardinality, bias=False,
average_mode=rectify_avg)
self.bn2 = norm_layer(group_width)
else:
self.conv2 = nn.Conv2d(
group_width, group_width, kernel_size=3, stride=stride,
padding=dilation, dilation=dilation,
groups=cardinality, bias=False)
self.bn2 = norm_layer(group_width)
self.conv3 = nn.Conv2d(
group_width, planes * 4, kernel_size=1, bias=False)
self.bn3 = norm_layer(planes * 4)
if last_gamma:
from torch.nn.init import zeros_
zeros_(self.bn3.weight)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def forward(self, x):
"""Forward."""
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.dropblock_prob > 0.0:
out = self.dropblock1(out)
out = self.relu(out)
if self.avd and self.avd_first:
out = self.avd_layer(out)
out = self.conv2(out)
if self.radix == 0:
out = self.bn2(out)
if self.dropblock_prob > 0.0:
out = self.dropblock2(out)
out = self.relu(out)
if self.avd and not self.avd_first:
out = self.avd_layer(out)
out = self.conv3(out)
out = self.bn3(out)
if self.dropblock_prob > 0.0:
out = self.dropblock3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet Variants.
Args:
block : Block Class for the residual block. Options are BasicBlockV1, BottleneckV1.
layers (int): Numbers of layers in each block
classes (int): Number of classification classes. Default to 1000.
dilated (bool): Applying dilation strategy to pretrained ResNet yielding a stride-8 model,
typically used in Semantic Segmentation. Default to False.
norm_layer (object): Normalization layer used in backbone network. The default is class:`mxnet.gluon.nn.BatchNorm`,
for Synchronized Cross-GPU BachNormalization).
Reference:
- He, Kaiming, et al. "Deep residual learning for image recognition." Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.
- Yu, Fisher, and Vladlen Koltun. "Multi-scale context aggregation by dilated convolutions."
"""
# pylint: disable=unused-variable
def __init__(self, block, layers, radix=1, groups=1, bottleneck_width=64,
num_classes=1000, dilated=False, dilation=1,
deep_stem=False, stem_width=64, avg_down=False,
rectified_conv=False, rectify_avg=False,
avd=False, avd_first=False,
final_drop=0.0, dropblock_prob=0,
last_gamma=False, norm_layer=nn.BatchNorm2d, in_channels=3):
"""Initialize."""
self.cardinality = groups
self.bottleneck_width = bottleneck_width
# ResNet-D params
self.inplanes = stem_width * 2 if deep_stem else 64
self.avg_down = avg_down
self.last_gamma = last_gamma
# ResNeSt params
self.radix = radix
self.avd = avd
self.avd_first = avd_first
super(ResNet, self).__init__()
self.out_channels = []
self.rectified_conv = rectified_conv
self.rectify_avg = rectify_avg
if rectified_conv:
from rfconv import RFConv2d
conv_layer = RFConv2d
else:
conv_layer = nn.Conv2d
conv_kwargs = {'average_mode': rectify_avg} if rectified_conv else {}
if deep_stem:
self.conv1 = nn.Sequential(
conv_layer(in_channels, stem_width, kernel_size=3, stride=2, padding=1, bias=False, **conv_kwargs),
norm_layer(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs),
norm_layer(stem_width),
nn.ReLU(inplace=True),
conv_layer(stem_width, stem_width * 2, kernel_size=3, stride=1, padding=1, bias=False, **conv_kwargs),
)
else:
self.conv1 = conv_layer(in_channels, 64, kernel_size=7, stride=2, padding=3,
bias=False, **conv_kwargs)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer, is_first=False)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer)
if dilated or dilation == 4:
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
elif dilation == 2:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilation=1, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=2, norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
else:
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
norm_layer=norm_layer,
dropblock_prob=dropblock_prob)
self.avgpool = GlobalAvgPool2d()
self.drop = nn.Dropout(final_drop) if final_drop > 0.0 else None
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, norm_layer):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, norm_layer=None,
dropblock_prob=0.0, is_first=True):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
down_layers = []
if self.avg_down:
if dilation == 1:
down_layers.append(nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False))
else:
down_layers.append(nn.AvgPool2d(kernel_size=1, stride=1,
ceil_mode=True, count_include_pad=False))
down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False))
else:
down_layers.append(nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False))
down_layers.append(norm_layer(planes * block.expansion))
downsample = nn.Sequential(*down_layers)
layers = []
if dilation in (1, 2):
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=1, is_first=is_first, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
elif dilation == 4:
layers.append(block(self.inplanes, planes, stride, downsample=downsample,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=2, is_first=is_first, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
else:
raise RuntimeError("=> unknown dilation size: {}".format(dilation))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
radix=self.radix, cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
avd=self.avd, avd_first=self.avd_first,
dilation=dilation, rectified_conv=self.rectified_conv,
rectify_avg=self.rectify_avg,
norm_layer=norm_layer, dropblock_prob=dropblock_prob,
last_gamma=self.last_gamma))
self.out_channels.append(planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
"""Forward."""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x2 = self.layer1(x)
x3 = self.layer2(x2)
x4 = self.layer3(x3)
x5 = self.layer4(x4)
return x2, x3, x4, x5
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/model/backbone/resnest/resnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Dataloader Init."""
import copy
import PIL
import numpy as np
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
def get_dataset(data_path, module_name, transform, dataset_args):
"""Get dataset.
Args:
data_path (list): dataset file list.
module_name: custom dataset name,Supports data_loaders.ImageDataset
Returns:
ConcatDataset object
"""
from . import dataset
s_dataset = getattr(dataset, module_name)(transform=transform, data_path=data_path,
**dataset_args)
return s_dataset
def get_transforms(transforms_config):
"""Get transforms."""
tr_list = []
for item in transforms_config:
if 'args' not in item:
args = {}
else:
args = item['args']
cls = getattr(transforms, item['type'])(**args)
tr_list.append(cls)
tr_list = transforms.Compose(tr_list)
return tr_list
class ICDARCollateFN:
"""ICDAR Collate."""
def __init__(self, *args, **kwargs):
"""Initialize."""
pass
def __call__(self, batch):
"""Generate data dict."""
data_dict = {}
tensor_keys = []
for sample in batch:
for k, v in sample.items():
if k not in data_dict:
data_dict[k] = []
if isinstance(v, (np.ndarray, torch.Tensor, PIL.Image.Image)):
if k not in tensor_keys:
tensor_keys.append(k)
data_dict[k].append(v)
for k in tensor_keys:
data_dict[k] = torch.stack(data_dict[k], 0)
return data_dict
def get_dataloader(module_config, distributed=False):
"""Generate dataloader based on the config file."""
if module_config is None:
return None
config = copy.deepcopy(module_config)
dataset_args = config['args']
if 'transforms' in dataset_args:
img_transfroms = get_transforms(dataset_args.pop('transforms'))
else:
img_transfroms = None
dataset_name = config['data_name']
data_path = config['data_path']
if data_path is None:
return None
data_path = [x for x in data_path if x is not None]
if len(data_path) == 0:
return None
if 'collate_fn' not in config['loader'] or config['loader']['collate_fn'] is None or len(config['loader']['collate_fn']) == 0:
config['loader']['collate_fn'] = None
else:
config['loader']['collate_fn'] = globals()[config['loader']['collate_fn']]()
_dataset = get_dataset(data_path=data_path, module_name=dataset_name, transform=img_transfroms, dataset_args=dataset_args)
sampler = None
if distributed:
config['loader']['shuffle'] = False
config['loader']['pin_memory'] = True
loader = DataLoader(dataset=_dataset, sampler=sampler, **config['loader'])
return loader
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/build_dataloader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloader Init."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Dataset module."""
import pathlib
import cv2
import numpy as np
from nvidia_tao_pytorch.cv.ocdnet.base.base_dataset import BaseDataSet
from nvidia_tao_pytorch.cv.ocdnet.utils import order_points_clockwise, get_datalist, get_datalist_uber
import multiprocessing
class UberDataset(BaseDataSet):
"""Uber Dataset class."""
def __init__(self, data_path: str, img_mode, pre_processes, filter_keys, ignore_tags, transform=None, **kwargs):
"""Initialize."""
super().__init__(data_path, img_mode, pre_processes, filter_keys, ignore_tags, transform)
def load_data(self, data_path: str) -> list:
"""Load data."""
pool = multiprocessing.Pool(processes=4)
data_list = pool.apply_async(get_datalist_uber, args=(data_path,)).get()
pool.close()
pool.join()
t_datalist = []
pool = multiprocessing.Pool(processes=4)
for img_path, label_path in data_list:
tmp = pool.apply_async(self._get_annotation, args=(label_path,))
data = tmp.get()
if len(data['text_polys']) > 0:
item = {'img_path': img_path, 'img_name': pathlib.Path(img_path).stem}
item.update(data)
t_datalist.append(item)
else:
print('there is no suit bbox in {}'.format(label_path))
pool.close()
pool.join()
return t_datalist
def _get_annotation(self, label_path: str) -> dict:
polys = []
texts = []
ignores = []
with open(label_path, encoding='utf-8', mode='r') as f:
for line in f:
content = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split('\t')
params = content[0].split(" ")[:-2]
try:
poly = np.array(list(map(float, params))).reshape(-1, 2).astype(np.float32)
if cv2.contourArea(poly) > 0:
polys.append(poly)
label = content[1]
if len(label.split(" ")) > 1:
label = "###"
texts.append(label)
ignores.append(label in self.ignore_tags)
except Exception:
print('load label failed on {}'.format(label_path))
data = {
'text_polys': np.array(polys),
'texts': texts,
'ignore_tags': ignores,
}
return data
class ICDAR2015Dataset(BaseDataSet):
"""ICDAR2015 Dataset."""
def __init__(self, data_path: str, img_mode, pre_processes, filter_keys, ignore_tags, transform=None, **kwargs):
"""Initialize."""
super().__init__(data_path, img_mode, pre_processes, filter_keys, ignore_tags, transform)
def load_data(self, data_path: str) -> list:
"""Load data."""
data_list = get_datalist(data_path)
t_datalist = []
for img_path, label_path in data_list:
data = self._get_annotation(label_path)
if len(data['text_polys']) > 0:
item = {'img_path': img_path, 'img_name': pathlib.Path(img_path).stem}
item.update(data)
t_datalist.append(item)
else:
print(f'there is no suit bbox in {label_path}')
return t_datalist
def _get_annotation(self, label_path: str) -> dict:
boxes = []
texts = []
ignores = []
with open(label_path, encoding='utf-8', mode='r') as f:
for line in f.readlines():
params = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split(',')
try:
box = order_points_clockwise(np.array(list(map(float, params[:8]))).reshape(-1, 2))
if cv2.contourArea(box) > 0:
boxes.append(box)
label = params[8]
texts.append(label)
ignores.append(label in self.ignore_tags)
except Exception:
print(f'load label failed on {label_path}')
data = {
'text_polys': np.array(boxes),
'texts': texts,
'ignore_tags': ignores,
}
return data
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Make shrink map."""
import numpy as np
import cv2
def shrink_polygon_py(polygon, shrink_ratio):
"""Shrink the polygon to 1/shrink_ratio.
Args:
polygon (list): The original polygon.
shrink_ratio (float): The shrink_raio.
Returns:
shrinked (list): The shrinked polygon.
"""
cx = polygon[:, 0].mean()
cy = polygon[:, 1].mean()
polygon[:, 0] = cx + (polygon[:, 0] - cx) * shrink_ratio
polygon[:, 1] = cy + (polygon[:, 1] - cy) * shrink_ratio
return polygon
def shrink_polygon_pyclipper(polygon, shrink_ratio):
"""Shrink polygon pyclipper.
Args:
polygon (list): The original polygon.
shrink_ratio (float): The shrink_raio.
Returns:
shrinked (list): The shrinked polygon.
"""
from shapely.geometry import Polygon
import pyclipper
# Generate polygon object
polygon_shape = Polygon(polygon)
# The distance during shrinking
distance = polygon_shape.area * (1 - np.power(shrink_ratio, 2)) / polygon_shape.length
subject = [tuple(p) for p in polygon]
padding = pyclipper.PyclipperOffset() # pylint: disable=I1101
padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) # pylint: disable=I1101
shrinked = padding.Execute(-distance)
if shrinked == []:
shrinked = np.array(shrinked)
else:
shrinked = np.array(shrinked[0]).reshape(-1, 2)
return shrinked
class MakeShrinkMap():
"""Generate probability map."""
def __init__(self, min_text_size=8, shrink_ratio=0.4, shrink_type='pyclipper'):
"""Initialize.
Args:
min_text_size (int): The minimum text size.
shrink_ratio (float): The shrink ratio of polygon.
"""
shrink_func_dict = {'py': shrink_polygon_py, 'pyclipper': shrink_polygon_pyclipper}
self.shrink_func = shrink_func_dict[shrink_type]
self.min_text_size = min_text_size
self.shrink_ratio = shrink_ratio
def __call__(self, data: dict) -> dict:
"""Generate shrinked polygon."""
image = data['img']
text_polys = data['text_polys']
ignore_tags = data['ignore_tags']
h, w = image.shape[:2]
text_polys, ignore_tags = self.validate_polygons(text_polys, ignore_tags, h, w)
gt = np.zeros((h, w), dtype=np.float32)
mask = np.ones((h, w), dtype=np.float32)
for i in range(len(text_polys)):
polygon = text_polys[i]
height = max(polygon[:, 1]) - min(polygon[:, 1])
width = max(polygon[:, 0]) - min(polygon[:, 0])
if ignore_tags[i] or min(height, width) < self.min_text_size:
cv2.fillPoly(mask, polygon.astype(np.int32)[np.newaxis, :, :], 0)
ignore_tags[i] = True
else:
shrinked = self.shrink_func(polygon, self.shrink_ratio)
if shrinked.size == 0:
cv2.fillPoly(mask, polygon.astype(np.int32)[np.newaxis, :, :], 0)
ignore_tags[i] = True
continue
cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1)
data['shrink_map'] = gt
data['shrink_mask'] = mask
return data
def validate_polygons(self, polygons, ignore_tags, h, w):
"""Align the coordinate order of the polygons, ignore the polygon whose area is zero.
Args:
polygons (list): The polygons in text data.
ignore_tags: (list): The tags which are marked ignored.
h (int): The height of image.
w (int): The width of image.
Returns:
polygons: The new polygons.
ignore_tags: The new ignore_tags.
"""
if len(polygons) == 0:
return polygons, ignore_tags
assert len(polygons) == len(ignore_tags)
# Clip the polygon coordinates inside the image
for polygon in polygons:
polygon[:, 0] = np.clip(polygon[:, 0], 0, w - 1)
polygon[:, 1] = np.clip(polygon[:, 1], 0, h - 1)
for i in range(len(polygons)):
# Calculate the area of polygon
area = self.polygon_area(polygons[i])
# Ignore the polygon whose area is small
if abs(area) < 1:
ignore_tags[i] = True
if area > 0:
polygons[i] = polygons[i][::-1, :]
return polygons, ignore_tags
def polygon_area(self, polygon):
"""Calculate the area of polygon."""
return cv2.contourArea(polygon)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/modules/make_shrink_map.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Make border map."""
import cv2
import pyclipper
from shapely.geometry import Polygon
import numpy as np
np.seterr(divide='ignore', invalid='ignore')
class MakeBorderMap():
"""Generate threshold map.
Calculate the distance between expansion polygon and original text polygon.
Normalize the distance and limit it in range 0 to 1. Only keep the minimum
distance of the point which is most closed to the line of original polygon.
Calculate the shrinking polygon and the normalized distance between original
polygon and points which are included in shriking polygon and expansion
polygon. Genrate the threshold map in the range of thresh min and thresh max.
"""
def __init__(self, shrink_ratio=0.4, thresh_min=0.3, thresh_max=0.7):
"""Initialize."""
self.shrink_ratio = shrink_ratio
self.thresh_min = thresh_min
self.thresh_max = thresh_max
def __call__(self, data: dict) -> dict:
"""Generate threshod map"""
im = data['img']
text_polys = data['text_polys']
ignore_tags = data['ignore_tags']
canvas = np.zeros(im.shape[:2], dtype=np.float32)
mask = np.zeros(im.shape[:2], dtype=np.float32)
for i in range(len(text_polys)):
if ignore_tags[i]:
continue
# get the threshold map
self.draw_border_map(text_polys[i], canvas, mask=mask)
# shrink to final threshold map
canvas = canvas * (self.thresh_max - self.thresh_min) + self.thresh_min
data['threshold_map'] = canvas
data['threshold_mask'] = mask
return data
def draw_border_map(self, polygon, canvas, mask):
"""Calculate border map."""
polygon = np.array(polygon)
assert polygon.ndim == 2
assert polygon.shape[1] == 2
polygon_shape = Polygon(polygon)
if polygon_shape.area <= 0:
return
distance = polygon_shape.area * (1 - np.power(self.shrink_ratio, 2)) / polygon_shape.length
subject = [tuple(p) for p in polygon]
padding = pyclipper.PyclipperOffset() # pylint: disable=I1101
padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) # pylint: disable=I1101
padded_polygon = np.array(padding.Execute(distance)[0])
cv2.fillPoly(mask, [padded_polygon.astype(np.int32)], 1.0)
xmin = padded_polygon[:, 0].min()
xmax = padded_polygon[:, 0].max()
ymin = padded_polygon[:, 1].min()
ymax = padded_polygon[:, 1].max()
width = xmax - xmin + 1
height = ymax - ymin + 1
polygon[:, 0] = polygon[:, 0] - xmin
polygon[:, 1] = polygon[:, 1] - ymin
xs = np.broadcast_to(
np.linspace(0, width - 1, num=width).reshape(1, width), (height, width))
ys = np.broadcast_to(
np.linspace(0, height - 1, num=height).reshape(height, 1), (height, width))
distance_map = np.zeros(
(polygon.shape[0], height, width), dtype=np.float32)
# calculate the distance between expansion polygon and original text polygon.
for i in range(polygon.shape[0]):
j = (i + 1) % polygon.shape[0]
absolute_distance = self.distance(xs, ys, polygon[i], polygon[j])
distance_map[i] = np.clip(absolute_distance / distance, 0, 1)
# only keep the minimum distance of the point which is most closed to the line
distance_map = distance_map.min(axis=0)
xmin_valid = min(max(0, xmin), canvas.shape[1] - 1)
xmax_valid = min(max(0, xmax), canvas.shape[1] - 1)
ymin_valid = min(max(0, ymin), canvas.shape[0] - 1)
ymax_valid = min(max(0, ymax), canvas.shape[0] - 1)
canvas[ymin_valid:ymax_valid + 1, xmin_valid:xmax_valid + 1] = np.fmax(
1 - distance_map[
ymin_valid - ymin:ymax_valid - ymax + height,
xmin_valid - xmin:xmax_valid - xmax + width],
canvas[ymin_valid:ymax_valid + 1, xmin_valid:xmax_valid + 1])
def distance(self, xs, ys, point_1, point_2):
"""Compute the distance from expansion points to a line of original polygon
Args:
ys: coordinates in the first axis
xs: coordinates in the second axis
point_1, point_2: the last coordinate of the line
Returns:
result: the normalized distance between expansion polygon points and a line
of original text polygon
"""
_, __ = xs.shape[:2]
square_distance_1 = np.square(xs - point_1[0]) + np.square(ys - point_1[1])
square_distance_2 = np.square(xs - point_2[0]) + np.square(ys - point_2[1])
square_distance = np.square(point_1[0] - point_2[0]) + np.square(point_1[1] - point_2[1])
cosin = (square_distance - square_distance_1 - square_distance_2) / (2 * np.sqrt(square_distance_1 * square_distance_2))
square_sin = 1 - np.square(cosin)
square_sin = np.nan_to_num(square_sin)
result = np.sqrt(square_distance_1 * square_distance_2 * square_sin / square_distance)
result[cosin < 0] = np.sqrt(np.fmin(square_distance_1, square_distance_2))[cosin < 0]
return result
def extend_line(self, point_1, point_2, result):
"""Extend line."""
ex_point_1 = (int(round(point_1[0] + (point_1[0] - point_2[0]) * (1 + self.shrink_ratio))),
int(round(point_1[1] + (point_1[1] - point_2[1]) * (1 + self.shrink_ratio))))
cv2.line(result, tuple(ex_point_1), tuple(point_1), 4096.0, 1, lineType=cv2.LINE_AA, shift=0)
ex_point_2 = (int(round(point_2[0] + (point_2[0] - point_1[0]) * (1 + self.shrink_ratio))),
int(round(point_2[1] + (point_2[1] - point_1[1]) * (1 + self.shrink_ratio))))
cv2.line(result, tuple(ex_point_2), tuple(point_2), 4096.0, 1, lineType=cv2.LINE_AA, shift=0)
return ex_point_1, ex_point_2
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/modules/make_border_map.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Imgaug augment module."""
import numpy as np
import imgaug
import imgaug.augmenters as iaa
class AugmenterBuilder(object):
"""Augmenter Builder."""
def __init__(self):
"""Initialize."""
pass
def build(self, args, root=True):
"""Build augmenter."""
if args is None or len(args) == 0:
return None
if isinstance(args, list):
if root:
sequence = [self.build(value, root=False) for value in args]
return iaa.Sequential(sequence)
return getattr(iaa, args[0])(*[self.list_to_tuple(a) for a in args[1:]])
if isinstance(args, dict):
cls = getattr(iaa, args['type'])
return cls(**{k: self.list_to_tuple(v) for k, v in args['args'].items()})
raise RuntimeError('unknown augmenter arg: ' + str(args))
def list_to_tuple(self, obj):
"""Return tuple for list."""
if isinstance(obj, list):
return tuple(obj)
return obj
class IaaAugment():
"""Imgaug augment class."""
def __init__(self, augmenter_args):
"""Initialize."""
self.augmenter_args = augmenter_args
self.augmenter = AugmenterBuilder().build(self.augmenter_args)
def __call__(self, data):
"""Imgaug augmentation."""
image = data['img']
shape = image.shape
if self.augmenter:
aug = self.augmenter.to_deterministic()
data['img'] = aug.augment_image(image)
data = self.may_augment_annotation(aug, data, shape)
return data
def may_augment_annotation(self, aug, data, shape):
"""Augment annotation."""
if aug is None:
return data
line_polys = []
for poly in data['text_polys']:
new_poly = self.may_augment_poly(aug, shape, poly)
line_polys.append(new_poly)
data['text_polys'] = np.array(line_polys)
return data
def may_augment_poly(self, aug, img_shape, poly):
"""Augment poly."""
keypoints = [imgaug.Keypoint(p[0], p[1]) for p in poly]
keypoints = aug.augment_keypoints(
[imgaug.KeypointsOnImage(keypoints, shape=img_shape)])[0].keypoints
poly = [(p.x, p.y) for p in keypoints]
return poly
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/modules/iaa_augment.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Modules for the dataloader."""
# flake8: noqa: F401, F403
from .iaa_augment import IaaAugment
from .augment import *
from .random_crop_data import EastRandomCropData
from .make_border_map import MakeBorderMap
from .make_shrink_map import MakeShrinkMap
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/modules/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Augment module."""
import math
import numbers
import random
import cv2
import numpy as np
from skimage.util import random_noise
class RandomNoise:
"""Random Noise class."""
def __init__(self, random_rate):
"""Initialize."""
self.random_rate = random_rate
def __call__(self, data: dict):
"""Add random noise."""
if random.random() > self.random_rate:
return data
im = data['img']
data['img'] = (random_noise(data['img'], mode='gaussian', clip=True) * 255).astype(im.dtype)
return data
class RandomScale:
"""Random Scale class."""
def __init__(self, scales, random_rate):
"""Initialize."""
self.random_rate = random_rate
self.scales = scales
def __call__(self, data: dict) -> dict:
"""Add random scale."""
if random.random() > self.random_rate:
return data
im = data['img']
text_polys = data['text_polys']
tmp_text_polys = text_polys.copy()
rd_scale = float(np.random.choice(self.scales))
im = cv2.resize(im, dsize=None, fx=rd_scale, fy=rd_scale)
tmp_text_polys *= rd_scale
data['img'] = im
data['text_polys'] = tmp_text_polys
return data
class RandomRotateImgBox:
"""Random Rotate Image Box."""
def __init__(self, degrees, random_rate, same_size=False):
"""Initialize."""
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError("If degrees is a single number, it must be positive.")
degrees = (-degrees, degrees)
elif isinstance(degrees, (list, np.ndarray, tuple)):
if len(degrees) != 2:
raise ValueError("If degrees is a sequence, it must be of len 2.")
degrees = degrees
else:
raise TypeError('degrees must in Number or list or tuple or np.ndarray')
self.degrees = degrees
self.same_size = same_size
self.random_rate = random_rate
def __call__(self, data: dict) -> dict:
"""Add random rotate image box."""
if random.random() > self.random_rate:
return data
im = data['img']
text_polys = data['text_polys']
# rotate
w = im.shape[1]
h = im.shape[0]
angle = np.random.uniform(self.degrees[0], self.degrees[1])
if self.same_size:
nw = w
nh = h
else:
rangle = np.deg2rad(angle)
# calculate w, h
nw = (abs(np.sin(rangle) * h) + abs(np.cos(rangle) * w))
nh = (abs(np.cos(rangle) * h) + abs(np.sin(rangle) * w))
# getRotationMatrix2D
rot_mat = cv2.getRotationMatrix2D((nw * 0.5, nh * 0.5), angle, 1)
# offset between original center point and new center point
rot_move = np.dot(rot_mat, np.array([(nw - w) * 0.5, (nh - h) * 0.5, 0]))
# update mat
rot_mat[0, 2] += rot_move[0]
rot_mat[1, 2] += rot_move[1]
# warpaffine
rot_img = cv2.warpAffine(im, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4)
rot_text_polys = list()
for bbox in text_polys:
point1 = np.dot(rot_mat, np.array([bbox[0, 0], bbox[0, 1], 1]))
point2 = np.dot(rot_mat, np.array([bbox[1, 0], bbox[1, 1], 1]))
point3 = np.dot(rot_mat, np.array([bbox[2, 0], bbox[2, 1], 1]))
point4 = np.dot(rot_mat, np.array([bbox[3, 0], bbox[3, 1], 1]))
rot_text_polys.append([point1, point2, point3, point4])
data['img'] = rot_img
data['text_polys'] = np.array(rot_text_polys)
return data
class RandomResize:
"""Random Resize."""
def __init__(self, size, random_rate, keep_ratio=False):
"""Initialize."""
if isinstance(size, numbers.Number):
if size < 0:
raise ValueError("If input_size is a single number, it must be positive.")
size = (size, size)
elif isinstance(size, (list, np.ndarray, tuple)):
if len(size) != 2:
raise ValueError("If input_size is a sequence, it must be of len 2.")
size = (size[0], size[1])
else:
raise TypeError('input_size must in Number or list or tuple or np.ndarray')
self.size = size
self.keep_ratio = keep_ratio
self.random_rate = random_rate
def __call__(self, data: dict) -> dict:
"""Add random resize."""
if random.random() > self.random_rate:
return data
im = data['img']
text_polys = data['text_polys']
if self.keep_ratio:
h, w, c = im.shape
max_h = max(h, self.size[0])
max_w = max(w, self.size[1])
im_padded = np.zeros((max_h, max_w, c), dtype=np.uint8)
im_padded[:h, :w] = im.copy()
im = im_padded
text_polys = text_polys.astype(np.float32)
h, w, _ = im.shape
im = cv2.resize(im, self.size)
w_scale = self.size[0] / float(w)
h_scale = self.size[1] / float(h)
text_polys[:, :, 0] *= w_scale
text_polys[:, :, 1] *= h_scale
data['img'] = im
data['text_polys'] = text_polys
return data
class Resize2D:
"""Resize 2D."""
def __init__(self, short_size, resize_text_polys=True):
"""Initialize."""
self.short_size = short_size
self.resize_text_polys = resize_text_polys
def __call__(self, data: dict) -> dict:
"""Resize images and texts"""
im = data['img']
text_polys = data['text_polys']
h, w, _ = im.shape
if isinstance(self.short_size, (list, tuple)):
target_width = self.short_size[0]
target_height = self.short_size[1]
scale = (target_width / w, target_height / h)
im = cv2.resize(im, dsize=None, fx=scale[0], fy=scale[1])
if self.resize_text_polys:
text_polys[:, :, 0] *= scale[0]
text_polys[:, :, 1] *= scale[1]
else:
short_edge = min(h, w)
if short_edge < self.short_size:
scale = self.short_size / short_edge
im = cv2.resize(im, dsize=None, fx=scale, fy=scale)
scale = (scale, scale)
if self.resize_text_polys:
text_polys[:, :, 0] *= scale[0]
text_polys[:, :, 1] *= scale[1]
data['img'] = im
data['text_polys'] = text_polys
return data
class HorizontalFlip:
"""Horizontal Flip class."""
def __init__(self, random_rate):
"""Initialize."""
self.random_rate = random_rate
def __call__(self, data: dict) -> dict:
"""Add horizontal flip."""
if random.random() > self.random_rate:
return data
im = data['img']
text_polys = data['text_polys']
flip_text_polys = text_polys.copy()
flip_im = cv2.flip(im, 1)
__, w, _ = flip_im.shape
flip_text_polys[:, :, 0] = w - flip_text_polys[:, :, 0]
data['img'] = flip_im
data['text_polys'] = flip_text_polys
return data
class VerticalFlip:
"""Vertical Flip class."""
def __init__(self, random_rate):
"""Initialize."""
self.random_rate = random_rate
def __call__(self, data: dict) -> dict:
"""Add Vertical flip."""
if random.random() > self.random_rate:
return data
im = data['img']
text_polys = data['text_polys']
flip_text_polys = text_polys.copy()
flip_im = cv2.flip(im, 0)
h, __, _ = flip_im.shape
flip_text_polys[:, :, 1] = h - flip_text_polys[:, :, 1]
data['img'] = flip_im
data['text_polys'] = flip_text_polys
return data
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/modules/augment.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Random crop data."""
import cv2
import numpy as np
class EastRandomCropData():
"""Random crop images."""
def __init__(self, size=(640, 640), max_tries=50, min_crop_side_ratio=0.1, require_original_image=False, keep_ratio=True):
"""Initialize.
Args:
size (set): The cropped target size.
max_tries (int): The max times to try to crop since the cropped area
is too small or failed to crop.
min_crop_side_ratio (float): The minimum ratio to crop in terms of each line.
require_original_image: Whether require original image.
keep_ratio (bool) : Whether to keep ratio.
"""
self.size = size
self.max_tries = max_tries
self.min_crop_side_ratio = min_crop_side_ratio
self.require_original_image = require_original_image
self.keep_ratio = keep_ratio
def __call__(self, data: dict) -> dict:
"""Random crop images and ensure the text is not cropped"""
im = data['img']
text_polys = data['text_polys']
ignore_tags = data['ignore_tags']
texts = data['texts']
# Get the bbox/polygon whose tag are not ignored
all_care_polys = [text_polys[i] for i, tag in enumerate(ignore_tags) if not tag]
# Calculate the crop area
crop_x, crop_y, crop_w, crop_h = self.crop_area(im, all_care_polys)
# Crop image, keep aspect ratio
scale_w = self.size[0] / crop_w
scale_h = self.size[1] / crop_h
scale = min(scale_w, scale_h)
h = int(crop_h * scale)
w = int(crop_w * scale)
if self.keep_ratio:
if len(im.shape) == 3:
padimg = np.zeros((self.size[1], self.size[0], im.shape[2]), im.dtype)
else:
padimg = np.zeros((self.size[1], self.size[0]), im.dtype)
padimg[:h, :w] = cv2.resize(im[crop_y:crop_y + crop_h, crop_x:crop_x + crop_w], (w, h))
img = padimg
else:
img = cv2.resize(im[crop_y:crop_y + crop_h, crop_x:crop_x + crop_w], tuple(self.size))
# Crop texts
text_polys_crop = []
ignore_tags_crop = []
texts_crop = []
# Calculate the new coordinates
for poly, text, tag in zip(text_polys, texts, ignore_tags):
poly = ((poly - (crop_x, crop_y)) * scale).tolist()
if not self.is_poly_outside_rect(poly, 0, 0, w, h):
text_polys_crop.append(poly)
ignore_tags_crop.append(tag)
texts_crop.append(text)
data['img'] = img
data['text_polys'] = np.float32(text_polys_crop)
data['ignore_tags'] = ignore_tags_crop
data['texts'] = texts_crop
return data
def is_poly_outside_rect(self, poly, x, y, w, h):
"""Check if the text is outside the cropped area."""
poly = np.array(poly)
if poly[:, 0].max() < x or poly[:, 0].min() > x + w:
return True
if poly[:, 1].max() < y or poly[:, 1].min() > y + h:
return True
return False
def split_regions(self, axis):
"""Generate the regions which can be split."""
regions = []
min_axis = 0
for i in range(1, axis.shape[0]):
if axis[i] != axis[i - 1] + 1:
region = axis[min_axis:i]
min_axis = i
regions.append(region)
return regions
def random_select(self, axis, max_size):
"""Select two split lines during one region."""
xx = np.random.choice(axis, size=2)
xmin = np.min(xx)
xmax = np.max(xx)
xmin = np.clip(xmin, 0, max_size - 1)
xmax = np.clip(xmax, 0, max_size - 1)
return xmin, xmax
def region_wise_random_select(self, regions, max_size):
"""Select two split lines during two regions."""
selected_index = list(np.random.choice(len(regions), 2))
selected_values = []
for index in selected_index:
axis = regions[index]
xx = int(np.random.choice(axis, size=1))
selected_values.append(xx)
xmin = min(selected_values)
xmax = max(selected_values)
return xmin, xmax
def crop_area(self, im, text_polys):
"""Crop area."""
h, w = im.shape[:2]
h_array = np.zeros(h, dtype=np.int32)
w_array = np.zeros(w, dtype=np.int32)
for points in text_polys:
points = np.round(points, decimals=0).astype(np.int32)
minx = np.min(points[:, 0])
maxx = np.max(points[:, 0])
w_array[minx:maxx] = 1
miny = np.min(points[:, 1])
maxy = np.max(points[:, 1])
h_array[miny:maxy] = 1
# ensure the cropped area not across a text
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return 0, 0, w, h
h_regions = self.split_regions(h_axis)
w_regions = self.split_regions(w_axis)
for _ in range(self.max_tries):
if len(w_regions) > 1:
xmin, xmax = self.region_wise_random_select(w_regions, w)
else:
xmin, xmax = self.random_select(w_axis, w)
if len(h_regions) > 1:
ymin, ymax = self.region_wise_random_select(h_regions, h)
else:
ymin, ymax = self.random_select(h_axis, h)
if xmax - xmin < self.min_crop_side_ratio * w or ymax - ymin < self.min_crop_side_ratio * h:
# area too small
continue
num_poly_in_rect = 0
for poly in text_polys:
if not self.is_poly_outside_rect(poly, xmin, ymin, xmax - xmin, ymax - ymin):
num_poly_in_rect += 1
break
if num_poly_in_rect > 0:
return xmin, ymin, xmax - xmin, ymax - ymin
return 0, 0, w, h
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/data_loader/modules/random_crop_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# **************************************************************************
# Modified from github (https://github.com/WenmuZhou/DBNet.pytorch)
# Copyright (c) WenmuZhou
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# https://github.com/WenmuZhou/DBNet.pytorch/blob/master/LICENSE.md
# **************************************************************************
"""Base_dataset module."""
import copy
import torch
from torch.utils.data import Dataset
from nvidia_tao_pytorch.cv.ocdnet.data_loader.modules import * # pylint: disable=W0401,W0611,W0614
import cv2
import numpy as np
# flake8: noqa: F401, F403
class BaseDataSet(Dataset):
"""The base class for dataloader."""
def __init__(self, data_path: str, img_mode, pre_processes, filter_keys, ignore_tags, transform=None,
target_transform=None):
"""Initialize the dataset object with the given parameters.
Args:
data_path (str): The path to the dataset
img_mode (str): The image mode of the images
pre_process (dict): The preprocessing parameters to be used
filter_keys (list): The keys not used in data dict
ignore_tags (list): In lable file, the lines which contain ignore_tags will be ignored during training
"""
if img_mode not in ['RGB', 'BGR', 'GRAY']:
raise NotImplementedError(
f"Unsupported image mode {img_mode} requested. Please set to any one of 'RGB', 'BGR', 'GRAY'."
)
self.ignore_tags = ignore_tags
self.data_list = self.load_data(data_path)
item_keys = ['img_path', 'img_name', 'text_polys', 'texts', 'ignore_tags']
for item in item_keys:
assert item in self.data_list[0], 'data_list from load_data must contain {}'.format(item_keys)
self.img_mode = img_mode
self.filter_keys = filter_keys
self.transform = transform
self.target_transform = target_transform
self._init_pre_processes(pre_processes)
def _init_pre_processes(self, pre_processes):
"""Initialize the preprocessing parameters.
Args:
IaaAugment (dict): Uses imgaug to perform augmentation. "Fliplr", "Affine", and "Resize" are used by default.
The "Fliplr" defines the probability of each image to be flipped.
The "rotate" defines the degree range when rotating images by a random value.
The "size" defines the range when resizing each image compared to its original size.
More methods can be implemented by using API in https://imgaug.readthedocs.io/en/latest/source/api.html
EastRandomCropData (dict): The ramdom crop after augmentation. The "size" defines the cropped target size(width,height).
The width and height should be multiples of 32. The "max_tries" defines the maximum times to try
to crop since the cropped area may be too small or cropping may have failed.
The "keep_ratio" specifies whether to keep the aspect ratio.
MakeBorderMap (dict): Defines the parameter when generating a threshold map. The "shrink_ratio" is used to calculate the distance
between expanding/shrinking polygons and the original text polygon. The "thresh_min" and "thresh_max" will
set the threshold range when generating the threshold map.
MakeShrinkMap (dict): Defines the parameter when generating a probability map. The "shrink_ratio" is used to generate shrunken
polygons. The "min_text_size" specifies that the text will be ignored if its height or width is lower than this parameter.
"""
self.aug = []
if pre_processes is not None:
for aug in pre_processes:
if 'args' not in aug:
args = {}
else:
args = aug['args']
if isinstance(args, dict):
cls = globals()[aug['type']](**args)
else:
cls = globals()[aug['type']](args)
self.aug.append(cls)
def load_data(self, data_path: str) -> list:
"""Load data into a dict
Args:
data_path (str): file or folder
Returns:
A dict (dict): contains 'img_path','img_name','text_polys','texts','ignore_tags'
"""
raise NotImplementedError
def apply_pre_processes(self, data):
"""Implement preprocessing for dataset."""
for aug in self.aug:
data = aug(data)
return data
def __getitem__(self, index):
"""Generate data dict per item."""
try:
data = copy.deepcopy(self.data_list[index])
im = cv2.imread(data['img_path'], 1 if self.img_mode != 'GRAY' else 0).astype("float32")
if self.img_mode == 'RGB':
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
data['img'] = im
data['shape'] = [im.shape[0], im.shape[1]]
data = self.apply_pre_processes(data)
rgb_mean = np.array([122.67891434, 116.66876762, 104.00698793])
image = data['img']
image -= rgb_mean
image /= 255.
image = torch.from_numpy(image).permute(2, 0, 1).float()
data['img'] = image
data['text_polys'] = data['text_polys'].tolist()
if len(self.filter_keys):
data_dict = {}
for k, v in data.items():
if k not in self.filter_keys:
data_dict[k] = v
return data_dict
return data
except Exception:
return self.__getitem__(np.random.randint(self.__len__()))
def __len__(self):
"""The length of data list."""
return len(self.data_list)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/base/base_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset module for OCDNet."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/ocdnet/base/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import List, Optional
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class InferConfig:
"""Inference configuration template."""
ann_path: str = MISSING
img_dir: str = MISSING
label_dump_path: str = MISSING
batch_size: int = 3
load_mask: bool = False
results_dir: Optional[str] = None
@dataclass
class EvalConfig:
"""Evaluation configuration template."""
batch_size: int = 3
use_mixed_model_test: bool = False
use_teacher_test: bool = False
comp_clustering: bool = False
use_flip_test: bool = False
results_dir: Optional[str] = None
@dataclass
class DataConfig:
"""Data configuration template."""
type: str = 'coco'
train_ann_path: str = ''
train_img_dir: str = ''
val_ann_path: str = ''
val_img_dir: str = ''
min_obj_size: float = 2048
max_obj_size: float = 1e10
num_workers_per_gpu: int = 2
load_mask: bool = True
crop_size: int = 512
@dataclass
class ModelConfig:
"""Model configuration template."""
arch: str = 'vit-mae-base/16'
frozen_stages: List[int] = field(default_factory=lambda: [-1])
mask_head_num_convs: int = 4
mask_head_hidden_channel: int = 256
mask_head_out_channel: int = 256
teacher_momentum: float = 0.996
not_adjust_scale: bool = False
mask_scale_ratio_pre: int = 1
mask_scale_ratio: float = 2.0
vit_dpr: float = 0
@dataclass
class TrainConfig:
"""Train configuration template."""
seed: int = 1
num_epochs: int = 10
save_every_k_epoch: int = 1
val_interval: int = 1
batch_size: int = 3
accum_grad_batches: int = 1
use_amp: bool = True
# optim
optim_type: str = 'adamw'
optim_momentum: float = 0.9
lr: float = 0.000001
min_lr: float = 0
min_lr_rate: float = 0.2
num_wave: float = 1
wd: float = 0.0005
optim_eps: float = 1e-8
optim_betas: List[float] = field(default_factory=lambda: [0.9, 0.9])
warmup_epochs: int = 1
margin_rate: List[float] = field(default_factory=lambda: [0, 1.2])
test_margin_rate: List[float] = field(default_factory=lambda: [0.6, 0.6])
mask_thres: List[float] = field(default_factory=lambda: [0.1])
# loss
loss_mil_weight: float = 4
loss_crf_weight: float = 0.5
# crf
crf_zeta: float = 0.1
crf_kernel_size: int = 3
crf_num_iter: int = 100
loss_crf_step: int = 4000
loss_mil_step: int = 1000
crf_size_ratio: int = 1
crf_value_high_thres: float = 0.9
crf_value_low_thres: float = 0.1
results_dir: Optional[str] = None
@dataclass
class ExperimentConfig:
"""Experiment configuration template."""
gpu_ids: List[int] = field(default_factory=lambda: [])
strategy: str = 'ddp'
num_nodes: int = 1
checkpoint: Optional[str] = None
dataset: DataConfig = DataConfig()
train: TrainConfig = TrainConfig()
model: ModelConfig = ModelConfig()
inference: InferConfig = InferConfig()
evaluate: EvalConfig = EvalConfig()
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LR scheduler utils."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/lr_schedulers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Cosine learning rate scheduler."""
import math
def adjust_learning_rate(optimizer, epoch, cfg):
"""Decay the learning rate with half-cycle cosine after warmup.
Args:
optimizer (torch.optim): PyTorch optimizer
epoch (int): current epoch
cfg (OmegaConfig): Hydra config
Return:
lr (float): current learning rate
"""
if epoch < cfg.train.warmup_epochs:
lr = cfg.train.lr * (epoch / cfg.train.warmup_epochs)
else:
lr = cfg.train.min_lr + (cfg.train.lr - cfg.train.min_lr) * 0.5 * \
(1. + math.cos(
math.pi * (epoch - cfg.train.warmup_epochs) /
(cfg.train.num_epochs - cfg.train.warmup_epochs) * cfg.train.num_wave))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/lr_schedulers/cosine_lr.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""COCO dataset."""
from nvidia_tao_pytorch.cv.mal.datasets.voc import InstSegVOC, BoxLabelVOC, InstSegVOCwithBoxInput
class BoxLabelCOCO(BoxLabelVOC):
"""Dataset to load COCO box labels."""
def get_category_mapping(self):
"""Category mapping."""
categories = self.coco.dataset['categories']
self.cat_mapping = {cat['id']: idx + 1 for idx, cat in enumerate(categories)}
class InstSegCOCO(InstSegVOC):
"""Dataset to load COCO instance segmentation labels."""
def get_category_mapping(self):
"""Category mapping."""
categories = self.coco.dataset['categories']
self.cat_mapping = {cat['id']: idx + 1 for idx, cat in enumerate(categories)}
class InstSegCOCOwithBoxInput(InstSegVOCwithBoxInput):
"""Dataset to load COCO labels with only box input."""
def get_category_mapping(self):
"""Category mapping."""
categories = self.coco.dataset['categories']
self.cat_mapping = {cat['id']: idx + 1 for idx, cat in enumerate(categories)}
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/datasets/coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL data module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/datasets/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""Data augmentation."""
import collections
from copy import deepcopy
from PIL import ImageFilter, ImageOps, Image
import numpy as np
import random
import torch
from torch.utils.data._utils.collate import default_collate
from torchvision import transforms
from nvidia_tao_pytorch.cv.mal.datasets.voc import DataWrapper
def custom_crop_image(img, box):
"""This function aims at getting `no padding` cropped image.
Implementation Details:
If the target box goes beyond one of the borderlines,
the function will crop the content from the opposite
side of the image
Examples:
An image of HxW, if we crop the image using box
[W-10, H-10, W+10, H+10]
Top-left corner: (W-10, H-10);
Bottom-right corner: (W+10, H+10).
Motivation:
Since the CRF algorithm uses the original pixels
for generating pseudo-labels, each pixels matters a lot here.
A fact that synthetic padding pixels (mean color of ImageNet)
do sereve damage to the refined image
"""
# box [x0, y0, x1 y1] [top left x, top left y, bottom right x, bottom right y]
ret_shape = list(img.shape)
ret_shape[:2] = box[3] - box[1], box[2] - box[0]
h, w = img.shape[:2]
ret_img = np.zeros(ret_shape)
# top left
if box[0] < 0 and box[1] < 0:
ret_img[:-box[1], :-box[0]] = img[box[1]:, box[0]:]
# middle top
if (box[0] < w and box[2] > 0) and box[1] < 0:
ret_img[:-box[1], max(-box[0], 0): min(w, box[2]) - box[0]] = img[box[1]:, max(0, box[0]):min(w, box[2])]
# top right
if box[2] > w and box[1] < 0:
ret_img[:-box[1], -(box[2] - w):] = img[box[1]:, :box[2] - w]
# middle left
if box[0] < 0 and (box[1] < h and box[3] > 0):
ret_img[max(0, -box[1]): min(h, box[3]) - box[1], :-box[0]] = img[max(0, box[1]):min(h, box[3]), box[0]:]
# middle right
if box[2] > w and (box[1] < h and box[3] > 0):
ret_img[max(0, -box[1]): min(h, box[3]) - box[1], -(box[2] - w):] = img[max(0, box[1]):min(h, box[3]), :(box[2] - w)]
# bottom left
if box[0] < 0 and box[3] > h:
ret_img[-(box[3] - h):, :-box[0]] = img[:box[3] - h, box[0]:]
# middle bottom
if (box[0] < w and box[2] > 0) and box[3] > h:
ret_img[-(box[3] - h):, max(-box[0], 0): min(w, box[2]) - box[0]] = img[:box[3] - h, max(0, box[0]):min(w, box[2])]
# bottom right
if box[2] > w and box[3] > h:
ret_img[-(box[3] - h):, -(box[2] - w):] = img[:(box[3] - h), :(box[2] - w)]
# middle
ret_img[max(0, -box[1]): min(h, box[3]) - box[1], max(0, -box[0]): min(w, box[2]) - box[0]] = \
img[max(box[1], 0): min(h, box[3]), max(box[0], 0): min(w, box[2])]
return ret_img
def custom_collate_fn(batch):
"""Puts each data field into a tensor with outer dimension batch size."""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, collections.abc.Mapping):
try:
return elem_type({key: custom_collate_fn([d[key] for d in batch]) for key in elem})
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return {key: custom_collate_fn([d[key] for d in batch]) for key in elem}
if isinstance(elem, DataWrapper):
return batch
return default_collate(batch)
class RandomCropV2:
"""RandomCropV2."""
def __init__(self, max_size=512, margin_rate=[0.05, 0.15],
mean=(0.485, 0.456, 0.406), random=True,
crop_fields=['image', 'mask']):
"""Initialize RandomCrop V2 augmentation.
Args:
max_size (int): Crop image size
margin_rate (list): Range of bbox expansion rate
mean (list): Normalized image mean in RGB order
random (bool): Whether to random pick a value within the margin_rate range
crop_fields (list): list of keys indicating the crop type
"""
self._max_size = max_size
self._margin_rate = np.array(margin_rate)
self._mean = np.array(mean) * 255
self._random = random
self._crop_fields = crop_fields
def _expand_box(self, box, margins):
"""Expand bounding box by margin.
Args:
box (np.array): bounding box coordinates
margins (np.array): margin rates for each coordinate
Return:
box (np.array): expanded bounding box coordinates
"""
ctr = (box[2] + box[0]) / 2, (box[3] + box[1]) / 2
box = ctr[0] - (ctr[0] - box[0]) * (1 + margins[0]), \
ctr[1] - (ctr[1] - box[1]) * (1 + margins[1]), \
ctr[0] + (box[2] - ctr[0]) * (1 + margins[2]) + 1, \
ctr[1] + (box[3] - ctr[1]) * (1 + margins[3]) + 1
return box
def __call__(self, data):
"""Call."""
# obtain more info
img = np.array(data['image'])
box = np.array(data['bbox'])
h, w = img.shape[0], img.shape[1]
if self._random:
margins = np.random.rand(4) * (self._margin_rate[1] - self._margin_rate[0]) + self._margin_rate[0]
gates = np.random.rand(2)
gates = np.array([gates[0], gates[1], 1 - gates[0], 1 - gates[1]])
margins = margins * gates
extbox = self._expand_box(box, margins)
extbox = np.array([np.floor(extbox[0]), np.floor(extbox[1]), np.ceil(extbox[2]), np.ceil(extbox[3])]).astype(np.int32)
ext_h, ext_w = extbox[3] - extbox[1], extbox[2] - extbox[0]
else:
margins = np.ones(4) * self._margin_rate[0] * 0.5
extbox = self._expand_box(box, margins)
extbox = np.array([np.floor(extbox[0]), np.floor(extbox[1]), np.ceil(extbox[2]), np.ceil(extbox[3])]).astype(np.int32)
ext_h, ext_w = extbox[3] - extbox[1], extbox[2] - extbox[0]
# extended box size
data['ext_h'], data['ext_w'] = ext_h, ext_w
# crop image
if 'image' in self._crop_fields:
ret_img = custom_crop_image(img, extbox)
ret_img = Image.fromarray(ret_img.astype(np.uint8)).resize((self._max_size, self._max_size))
data['image'] = ret_img
# crop mask
if 'mask' in self._crop_fields and 'mask' in data.keys():
mask = np.array(data['mask'])
ret_mask = custom_crop_image(mask, extbox)
ret_mask = Image.fromarray(ret_mask.astype(np.uint8)).resize((self._max_size, self._max_size))
ret_mask = np.array(ret_mask)
data['mask'] = ret_mask
# crop box mask (during test)
if 'boxmask' in self._crop_fields:
boxmask = data['boxmask']
ret_boxmask = np.zeros((ext_h, ext_w))
ret_boxmask[max(0 - extbox[1], 0):ext_h + min(0, h - extbox[3]),
max(0 - extbox[0], 0):ext_w + min(0, w - extbox[2])] = \
boxmask[max(extbox[1], 0):min(extbox[3], h),
max(extbox[0], 0):min(extbox[2], w)]
ret_boxmask = np.array(Image.fromarray(ret_boxmask.astype(np.uint8)).resize((self._max_size, self._max_size)))
data['boxmask'] = ret_boxmask
data['ext_boxes'] = extbox
data['margins'] = margins
return data
class RandomCropV3(RandomCropV2):
"""RandomCropV3."""
def __call__(self, data):
"""Call."""
# obtain more info
img = np.array(data['image'])
box = np.array(data['bbox'])
h, w = img.shape[0], img.shape[1]
if self._random:
margins = np.random.rand(4) * (self._margin_rate[1] - self._margin_rate[0]) + self._margin_rate[0]
gates = np.random.rand(2)
gates = np.array([gates[0], gates[1], 1 - gates[0], 1 - gates[1]])
margins = margins * gates
extbox = self._expand_box(box, margins)
extbox = np.array([np.floor(extbox[0]), np.floor(extbox[1]), np.ceil(extbox[2]), np.ceil(extbox[3])]).astype(np.int32)
ext_h, ext_w = extbox[3] - extbox[1], extbox[2] - extbox[0]
else:
margins = np.ones(4) * self._margin_rate[0] * 0.5
extbox = self._expand_box(box, margins)
extbox = np.array([np.floor(extbox[0]), np.floor(extbox[1]), np.ceil(extbox[2]), np.ceil(extbox[3])]).astype(np.int32)
ext_h, ext_w = extbox[3] - extbox[1], extbox[2] - extbox[0]
# extended box size
data['ext_h'], data['ext_w'] = ext_h, ext_w
# crop image
if 'image' in self._crop_fields:
ret_img = custom_crop_image(img, extbox)
ret_img = Image.fromarray(ret_img.astype(np.uint8)).resize((self._max_size, self._max_size))
data['image'] = ret_img
# crop mask
if 'mask' in self._crop_fields:
mask = np.array(data['mask'])
ret_mask = custom_crop_image(mask, extbox)
ret_mask = Image.fromarray(ret_mask.astype(np.uint8)).resize((self._max_size, self._max_size))
ret_mask = np.array(ret_mask)
data['mask'] = ret_mask
# crop box mask (during test)
if 'boxmask' in self._crop_fields:
boxmask = data['boxmask']
ret_boxmask = np.zeros((ext_h, ext_w))
ret_boxmask[max(0 - extbox[1], 0):ext_h + min(0, h - extbox[3]),
max(0 - extbox[0], 0):ext_w + min(0, w - extbox[2])] = \
boxmask[max(extbox[1], 0):min(extbox[3], h),
max(extbox[0], 0):min(extbox[2], w)]
ret_boxmask = np.array(Image.fromarray(ret_boxmask.astype(np.uint8)).resize((self._max_size, self._max_size)))
data['boxmask'] = ret_boxmask
data['ext_boxes'] = extbox
data['margins'] = margins
return data
class RandomFlip:
"""Random Flip."""
def __init__(self, p=0.5):
"""Initialize RandomFlip augmentation.
Args:
p (float): probability of horizontal flip
"""
self.p = p
def __call__(self, x):
"""Call."""
if 'aug_images' in x.keys():
x['flip_records'] = []
for idx in range(len(x['aug_images'])):
x['flip_records'].append([])
for jdx in range(len(x['aug_images'][idx])):
if float(torch.rand(1)) > self.p:
x['aug_images'][idx][jdx] = ImageOps.mirror(x['aug_images'][idx][jdx])
x['flip_records'][idx].append(1)
else:
x['flip_records'][idx].append(0)
elif 'image' in x.keys():
if float(torch.rand(1)) > self.p:
x['flip_records'] = 1
x['image'] = ImageOps.mirror(x['image'])
x['mask'] = x['mask'][:, ::-1]
else:
x['flip_records'] = 0
else:
raise NotImplementedError
return x
class Normalize(transforms.Normalize):
"""Normalize image in dictionary."""
def forward(self, data):
"""Forward."""
if 'image' in data.keys():
data['image'] = super().forward(data['image'])
if 'timage' in data.keys():
data['timage'] = super().forward(data['timage'])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = super().forward(data['aug_images'][idx][jdx])
else:
raise NotImplementedError
return data
class Denormalize:
"""Denormalize image."""
def __init__(self, mean, std):
"""Initialize image denorm.
Args:
mean (np.array): image mean
std (np.array): image standard deviation
"""
self._mean = mean
self._std = std
def __call__(self, img):
"""Call."""
img = (img * self._std + self._mean) * 255
return img
class ToTensor(transforms.ToTensor):
"""Dictioinary data to Tensor."""
def __call__(self, data):
"""Call."""
if 'image' in data.keys():
if isinstance(data['image'], (list, tuple)):
img_list = []
for img in data['image']:
img_list.append(super().__call__(img))
data['image'] = torch.cat(img_list)
else:
data['image'] = super().__call__(data['image'])
if 'flip_records' in data.keys():
data['flip_records'] = torch.tensor([data['flip_records']])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = super().__call__(data['aug_images'][idx][jdx])
data['aug_ranges'][idx][jdx] = torch.tensor(data['aug_ranges'][idx][jdx])
if 'flip_records' in data.keys():
data['flip_records'][idx] = torch.tensor(data['flip_records'][idx])
else:
raise NotImplementedError
if 'timage' in data.keys():
if isinstance(data['timage'], (list, tuple)):
img_list = []
for img in data['timage']:
img_list.append(super().__call__(img))
data['timage'] = torch.cat(img_list)
else:
data['timage'] = super().__call__(data['timage'])
if 'mask' in data.keys():
if isinstance(data['mask'], (list, tuple)):
mask_list = []
for mask in data['mask']:
mask_list.append(torch.tensor(mask, dtype=torch.float)[None, ...])
data['mask'] = torch.cat(mask_list)
else:
data['mask'] = torch.tensor(data['mask'], dtype=torch.float)[None, ...]
if 'boxmask' in data.keys():
if isinstance(data['boxmask'], (list, tuple)):
mask_list = []
for mask in data['boxmask']:
mask_list.append(torch.tensor(mask, dtype=torch.float)[None, ...])
data['boxmask'] = torch.cat(mask_list)
else:
data['boxmask'] = torch.tensor(data['boxmask'], dtype=torch.float)[None, ...]
if 'ann' in data.keys():
data['ann'] = torch.tensor(data['ann'])
return data
class ColorJitter(transforms.ColorJitter):
"""Color Jitter."""
def single_forward(self, img):
"""Single forward."""
if isinstance(img, list):
return [self.single_forward(_img) for _img in img]
return super().forward(img)
def forward(self, data):
"""Forward."""
if 'image' in data.keys():
data['image'] = self.single_forward(data['image'])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = super().forward(data['aug_images'][idx][jdx])
else:
raise NotImplementedError
return data
class RandomGrayscale(transforms.RandomGrayscale):
"""Random Grayscale."""
def single_forward(self, img):
"""Single forward."""
if isinstance(img, list):
return [self.single_forward(_img) for _img in img]
return super().forward(img)
def forward(self, data):
"""Forward."""
if 'image' in data.keys():
data['image'] = self.single_forward(data['image'])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = super().forward(data['aug_images'][idx][jdx])
else:
raise NotImplementedError
return data
class GaussianBlur(object):
"""Apply Gaussian Blur to the PIL image."""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
"""Initialze GaussianBlur augmentation.
Args:
p (float): probality of apply Gaussian blur
radius_min (float): minimum of the radius of the blur kernel
radius_max (float): maximum of the radius of the blur kernel
"""
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def single_forward(self, img):
"""Single forward."""
if isinstance(img, list):
return [self.single_forward(img_) for img_ in img]
do_it = random.random() <= self.prob
if not do_it:
return img
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
def __call__(self, data):
"""Call."""
if 'image' in data.keys():
data['image'] = self.single_forward(data['image'])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = self.single_forward(data['aug_images'][idx][jdx])
else:
raise NotImplementedError
return data
class DropAllExcept:
"""Drop all except keys to keep."""
def __init__(self, keep_keys):
"""Initialize key filtering.
Args:
keep_keys (list): list of keys to keep
"""
self.keep_keys = keep_keys
def __call__(self, data):
"""Call."""
data_keys = list(data.keys())
for key in data_keys:
if key not in self.keep_keys:
del data[key]
return data
class ChangeNames:
"""Change names."""
def __init__(self, kv_dic):
"""Initialize key changer.
Args:
kv_dic (dict): key and updated_key pair
"""
self.kv_dic = kv_dic
def __call__(self, data):
"""Call."""
data_keys = list(data.keys())
for key, value in self.kv_dic.items():
if key in data_keys:
data[value] = data[key]
del data[key]
return data
class Solarization:
"""Apply Solarization to the PIL image."""
def __init__(self, p):
"""Init."""
self.p = p
def single_forward(self, img):
"""Single forward."""
if isinstance(img, list):
return [self.single_forward(img_) for img_ in img]
if random.random() < self.p:
return ImageOps.solarize(img)
return img
def __call__(self, data):
"""Call."""
if 'image' in data.keys():
data['image'] = self.single_forward(data['image'])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = self.single_forward(data['aug_images'][idx][jdx])
else:
raise NotImplementedError
return data
class ImageSizeAlignment:
"""Image Size Alignment."""
def __init__(self, max_size, mean, random_offset=False):
"""Init."""
self._max_size = max_size
self._mean = (np.array(mean) * 255).astype(np.uint8)
self._random_offset = random_offset
def __call__(self, data):
"""Call."""
assert 'image' in data.keys()
padded_image = np.ones((self._max_size, self._max_size, 3), dtype=np.uint8) * self._mean
image = np.array(data['image'])
h, w = image.shape[0], image.shape[1]
if self._random_offset:
offy, offx = torch.randint(0, self._max_size - h + 1, (1,)), torch.randint(0, self._max_size - w + 1, (1,))
else:
offy, offx = 0, 0
padded_image[offy: offy + h, offx: offx + w] = image
data['image'] = Image.fromarray(padded_image)
if 'mask' in data.keys():
padded_mask = np.ones((self._max_size, self._max_size))
padded_mask[offy: offy + h, offx: offx + w] = np.array(data['mask'])
data['mask'] = Image.fromarray(padded_mask)
return data
class SplitAndMerge:
"""Split and Merge."""
def __init__(self, branch1, branch2):
"""Initialize SplitAndMerge.
Args:
branch1 (transforms.Compose): data processing branch1
branch2 (transforms.Compose): data processing branch2
"""
self.branch1 = branch1
self.branch2 = branch2
def __call__(self, data):
"""Call."""
data_clone = deepcopy(data)
data1 = self.branch1(data_clone)
data_clone = deepcopy(data)
data2 = self.branch2(data_clone)
data1.update(data2)
return data1
data_aug_pipelines = {
'test': lambda cfg: transforms.Compose([
RandomCropV2(cfg.dataset.crop_size,
margin_rate=cfg.train.test_margin_rate,
random=False,
crop_fields=['image', 'boxmask', 'mask']),
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
]),
'train': lambda cfg: transforms.Compose([
RandomCropV3(cfg.dataset.crop_size, margin_rate=cfg.train.margin_rate),
RandomFlip(0.5),
SplitAndMerge(
transforms.Compose([
transforms.RandomApply(
[ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
p=0.5
),
RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur(1.0)], p=0.5)
]),
transforms.Compose([
DropAllExcept(['image']),
ChangeNames({'image': 'timage'})
])
),
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
]),
}
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/datasets/data_aug.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""Custom LightningDataModule for MAL."""
import logging
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from nvidia_tao_pytorch.cv.mal.datasets.coco import BoxLabelCOCO, InstSegCOCO, InstSegCOCOwithBoxInput
from nvidia_tao_pytorch.cv.mal.datasets.data_aug import data_aug_pipelines, custom_collate_fn
logger = logging.getLogger(__name__)
class WSISDataModule(pl.LightningDataModule):
"""Weakly supervised instance segmentation data module."""
def __init__(self,
num_workers,
load_train=False,
load_val=False,
cfg=None):
"""Initialize train/val dataset and dataloader.
Args:
num_workers (int): Total number of workers
load_train (bool): Whether to load training set
load_val (bool): Whether to load validation set
cfg (OmegaConf): Hydra config
"""
super().__init__()
self.cfg = cfg
self.num_workers = num_workers
self.train_transform = data_aug_pipelines['train'](cfg)
self.test_transform = data_aug_pipelines['test'](cfg)
assert self.cfg.dataset.type == 'coco', 'only COCO format is supported.'
self._train_data_loader = None
self._val_data_loader = None
self.box_inputs = None
if load_train:
logger.info("Loading train set...")
dataset = BoxLabelCOCO(
self.cfg.dataset.train_ann_path,
self.cfg.dataset.train_img_dir,
min_obj_size=self.cfg.dataset.min_obj_size,
max_obj_size=self.cfg.dataset.max_obj_size,
transform=self.train_transform, cfg=cfg)
data_loader = DataLoader(
dataset, batch_size=self.cfg.train.batch_size, shuffle=True,
num_workers=self.num_workers)
self._train_data_loader = data_loader
logger.info("Train set is loaded successfully.")
if load_val:
logger.info("Loading validation set...")
build_dataset = InstSegCOCOwithBoxInput if self.box_inputs else InstSegCOCO
dataset = build_dataset(
self.cfg.dataset.val_ann_path,
self.cfg.dataset.val_img_dir,
min_obj_size=0,
max_obj_size=1e9,
load_mask=self.cfg.dataset.load_mask,
transform=self.test_transform,
box_inputs=self.box_inputs
)
data_loader = DataLoader(
dataset, collate_fn=custom_collate_fn,
batch_size=self.cfg.train.batch_size, num_workers=self.num_workers)
self._val_data_loader = data_loader
logger.info("Validation set is loaded successfully.")
def train_dataloader(self):
"""Set train dataloader."""
return self._train_data_loader
def val_dataloader(self):
"""Set val dataloader."""
return self._val_data_loader
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/datasets/pl_data_module.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""VOC dataset."""
import os
import logging
import json
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
import pycocotools.mask as maskUtils
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
class DataWrapper:
"""Simple data wrapper."""
def __init__(self, data):
"""Initialize DataWrapper.
Args:
data (np.array): numpy array
"""
self.data = data
class BoxLabelVOC(Dataset):
"""Base class for loading COCO format labels."""
def __init__(self, ann_path, img_data_dir,
min_obj_size=0, max_obj_size=1e10,
transform=None, cfg=None,
**kwargs):
"""Initialize dataset.
Args:
ann_path (str): annotation file in json format
img_data_dir (str): raw image directory
min_obj_size (float): min object size
max_obj_size (float): max object size
transform (transform.Compose): data augmentation methods
cfg (Hydra config): Hydra configurations
"""
self.cfg = cfg
self.ann_path = ann_path
self.img_data_dir = img_data_dir
self.min_obj_size = min_obj_size
self.max_obj_size = max_obj_size
self.transform = transform
self.coco = COCO(ann_path)
self._filter_imgs()
self.get_category_mapping()
def get_category_mapping(self):
"""Map category index in json to 1 based index."""
self.cat_mapping = dict([i, i] for i in range(1, 21))
def _filter_imgs(self):
"""Filter out bboxes based on area and H/W range."""
anns = self.coco.dataset['annotations']
filtered_anns = []
for ann in anns:
# query image info
image_info = self.coco.loadImgs(ann['image_id'])[0]
# check if bbox is out of bound
is_correct_bbox = ann['bbox'][0] >= 0 and ann['bbox'][1] >= 0 and \
(ann['bbox'][0] + ann['bbox'][2]) <= image_info['width'] and \
(ann['bbox'][1] + ann['bbox'][3]) <= image_info['height']
area = ann['bbox'][2] * ann['bbox'][3]
# check if bbox area is within range
is_correct_area = self.max_obj_size > area > self.min_obj_size
# additionally, check bbox w/h > 2
if is_correct_bbox and is_correct_area and ann['bbox'][2] > 2 and ann['bbox'][3] > 2:
filtered_anns.append(ann)
self.coco.dataset['annotations'] = filtered_anns
num_filtered = len(self.coco.dataset['annotations']) - len(filtered_anns)
if num_filtered > 0:
print("***********************************")
print(f"WARNING: {num_filtered} bboxes were filtered out.")
print("***********************************")
def __len__(self):
"""Total number of bboxes."""
return len(self.coco.getAnnIds())
def __getitem__(self, idx):
"""Per item."""
ann = self.coco.dataset['annotations'][idx]
img_info = self.coco.loadImgs(ann['image_id'])[0]
h, w, file_name = img_info['height'], img_info['width'], img_info['file_name']
img = self.get_image(file_name)
# box mask
mask = np.zeros((h, w))
bbox = ann['bbox']
x0, y0, x1, y1 = int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])
mask[y0:y1 + 1, x0:x1 + 1] = 1
data = {
'image': img, 'mask': mask,
'height': h, 'width': w,
'category_id': ann['category_id'],
'bbox': np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], dtype=np.float32),
'compact_category_id': self.cat_mapping[int(ann['category_id'])],
'id': ann['id']
}
if self.transform is not None:
data = self.transform(data)
return data
def get_image(self, file_name):
"""Load image.
Args:
file_name (str): relative path to an image file.
Return:
image (PIL image): loaded image
"""
image = Image.open(os.path.join(self.img_data_dir, file_name)).convert('RGB')
return image
class InstSegVOC(BoxLabelVOC):
"""Class for loading COCO format labels with instance segmentation masks."""
def __init__(self, *args, load_mask=True, **kwargs):
"""Initialize dataset with segmentation groundtruth.
Args:
load_mask (bool): whether to load instance segmentation annotations
"""
super().__init__(*args, **kwargs)
self.load_mask = load_mask
if load_mask:
for ann in self.coco.dataset['annotations']:
if not ann.get('segmentation', None):
raise ValueError(
"Please check your annotation file, "
"as not all annotations contain segmentation info. "
"Or set load_mask to False.")
self.get_category_mapping()
def __getitem__(self, idx):
"""Per item."""
ann = self.coco.dataset['annotations'][idx]
img_info = self.coco.loadImgs(ann['image_id'])[0]
h, w, file_name = img_info['height'], img_info['width'], img_info['file_name']
img = self.get_image(file_name)
# box mask
boxmask = np.zeros((h, w))
bbox = ann['bbox']
x0, y0, x1, y1 = int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])
boxmask[y0:y1 + 1, x0:x1 + 1] = 1
data = {'image': img, 'boxmask': boxmask,
'height': h, 'width': w,
'category_id': ann['category_id'],
'bbox': np.array(
[bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], dtype=np.float32),
'compact_category_id': self.cat_mapping[int(ann['category_id'])],
'id': ann['id'],
'image_id': ann['image_id']}
if self.load_mask:
# mask = np.ascontiguousarray(
# maskUtils.decode(maskUtils.frPyObjects(ann['segmentation'], h, w)))
# polygons
if isinstance(ann['segmentation'], list):
rles = maskUtils.frPyObjects(ann['segmentation'], h, w)
rle = maskUtils.merge(rles)
elif 'counts' in ann['segmentation']:
# e.g. {'counts': [6, 1, 40, 4, 5, 4, 5, 4, 21], 'size': [9, 10]}
if isinstance(ann['segmentation']['counts'], list):
rle = maskUtils.frPyObjects(ann['segmentation'], h, w)
else:
rle = ann['segmentation']
else:
raise ValueError('Please check the segmentation format.')
mask = np.ascontiguousarray(maskUtils.decode(rle))
if len(mask.shape) > 2:
mask = mask.transpose((2, 0, 1)).sum(0) > 0
mask = mask.astype(np.uint8)
data['gtmask'] = DataWrapper(mask)
data['mask'] = mask
if self.transform is not None:
data = self.transform(data)
return data
class InstSegVOCwithBoxInput(InstSegVOC):
"""Class for loading bbox inputs with instance segmentation masks."""
def __init__(self,
ann_path,
img_data_dir,
min_obj_size=0,
max_obj_size=1e10,
transform=None,
load_mask=True,
box_inputs=None):
"""Init."""
self.load_mask = load_mask
self.ann_path = ann_path
self.img_data_dir = img_data_dir
self.min_obj_size = min_obj_size
self.max_obj_size = max_obj_size
self.transform = transform
self.coco = COCO(ann_path)
self._filter_imgs()
self.get_category_mapping()
with open(box_inputs, "r") as f:
self.val_coco = json.load(f)
def __len__(self):
"""Number of samples."""
return len(self.val_coco)
def __getitem__(self, idx):
"""Per item."""
ann = self.val_coco[idx]
img_info = self.coco.loadImgs(ann['image_id'])[0]
h, w, file_name = img_info['height'], img_info['width'], img_info['file_name']
img = self.get_image(file_name)
# box mask
boxmask = np.zeros((h, w))
bbox = np.array(ann['bbox'])
x0, y0, x1, y1 = int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])
boxmask[y0:y1 + 1, x0:x1 + 1] = 1
if 'id' not in ann.keys():
_id = hash(str(ann['image_id']) + ' ' + str(x0) + ' ' + str(x1) + ' ' + str(y0) + ' ' + str(y1))
else:
_id = ann['id']
data = {'image': img, 'boxmask': boxmask,
'height': h, 'width': w,
'category_id': ann['category_id'],
'bbox': np.array(
[bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], dtype=np.float32),
'compact_category_id': self.cat_mapping[int(ann['category_id'])],
'id': _id,
'image_id': ann['image_id'],
'score': ann['score']}
if self.load_mask:
mask = np.ascontiguousarray(maskUtils.decode(ann['segmentation']))
if len(mask.shape) > 2:
mask = mask.transpose((2, 0, 1)).sum(0) > 0
mask = mask.astype(np.uint8)
data['gtmask'] = DataWrapper(mask)
data['mask'] = mask
if self.transform is not None:
data = self.transform(data)
return data
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/datasets/voc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for configuration."""
import logging
import os
logger = logging.getLogger(__name__)
def update_config(cfg, task):
"""Update config parameters.
This function should be called at the beginning of a pipeline script.
Global results_dir will be updated based on task.results_dir
Args:
cfg (Hydra config): Config object loaded by Hydra
task (str): TAO pipeline name
Return:
Updated cfg
"""
# mask threshold
if len(cfg.train.mask_thres) == 1:
# this means to repeat the same threshold three times
# all scale objects are sharing the same threshold
cfg.train.mask_thres = [cfg.train.mask_thres[0] for _ in range(3)]
assert len(cfg.train.mask_thres) == 3, "Length of mask thresholds must be 1 or 3."
# frozen_stages
if len(cfg.model.frozen_stages) == 1:
cfg.model.frozen_stages = [0, cfg.model.frozen_stages[0]]
assert len(cfg.model.frozen_stages) == 2, "Length of frozen stages must be 1 or 2."
assert len(cfg.train.margin_rate) == 2, "Length of margin rate must be 2."
if cfg[task]['results_dir']:
cfg.results_dir = cfg[task]['results_dir']
else:
cfg.results_dir = os.path.join(cfg.results_dir, task)
cfg[task]['results_dir'] = cfg.results_dir
logger.info(f"{task.capitalize()} results will be saved at: %s", cfg.results_dir)
return cfg
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/utils/config_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL model module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
"""MAL model."""
from collections import OrderedDict, namedtuple
import itertools
import json
import os
import cv2
import numpy as np
from typing import Any, Mapping, List
from pycocotools.coco import COCO
from pycocotools.mask import encode
from pycocotools.cocoeval import COCOeval
from mmcv.cnn import ConvModule
import torchmetrics
import pytorch_lightning as pl
from fairscale.nn import auto_wrap
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.mal.datasets.data_aug import Denormalize
from nvidia_tao_pytorch.cv.mal.lr_schedulers.cosine_lr import adjust_learning_rate
from nvidia_tao_pytorch.cv.mal.models import vit_builder
from nvidia_tao_pytorch.cv.mal.optimizers.adamw import AdamWwStep
class _IncompatibleKeys(namedtuple('IncompatibleKeys', ['missing_keys', 'unexpected_keys'])):
def __repr__(self):
if not self.missing_keys and not self.unexpected_keys:
return '<All keys matched successfully>'
return super(_IncompatibleKeys, self).__repr__()
__str__ = __repr__
def load_state_dict(self, state_dict: Mapping[str, Any],
strict: bool = True, prefix: str = ''):
r"""Copies parameters and buffers from :attr:`state_dict` into
this module and its descendants. If :attr:`strict` is ``True``, then
the keys of :attr:`state_dict` must exactly match the keys returned
by this module's :meth:`~torch.nn.Module.state_dict` function.
Args:
state_dict (dict): a dict containing parameters and
persistent buffers.
strict (bool, optional): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
Returns:
``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:
* **missing_keys** is a list of str containing the missing keys
* **unexpected_keys** is a list of str containing the unexpected keys
Note:
If a parameter or buffer is registered as ``None`` and its corresponding key
exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a
``RuntimeError``.
"""
if not isinstance(state_dict, Mapping):
raise TypeError("Expected state_dict to be dict-like, got {}.".format(type(state_dict)))
missing_keys: List[str] = []
unexpected_keys: List[str] = []
error_msgs: List[str] = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = OrderedDict(state_dict)
if metadata is not None:
# mypy isn't aware that "_metadata" exists in state_dict
state_dict._metadata = metadata # type: ignore[attr-defined]
def load(module, local_state_dict, prefix=prefix):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
child_prefix = prefix + name + '.'
child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)}
load(child, child_state_dict, child_prefix) # noqa F821
# Note that the hook can modify missing_keys and unexpected_keys.
incompatible_keys = _IncompatibleKeys(missing_keys, unexpected_keys)
for hook in module._load_state_dict_post_hooks.values():
out = hook(module, incompatible_keys)
assert out is None, (
"Hooks registered with ``register_load_state_dict_post_hook`` are not"
"expected to return new values, if incompatible_keys need to be modified,"
"it should be done inplace."
)
load(self, state_dict, prefix)
del load
if strict:
if len(unexpected_keys) > 0:
error_msgs.insert(
0, 'Unexpected key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in unexpected_keys)))
if len(missing_keys) > 0:
error_msgs.insert(
0, 'Missing key(s) in state_dict: {}. '.format(
', '.join('"{}"'.format(k) for k in missing_keys)))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
self.__class__.__name__, "\n\t".join(error_msgs)))
return _IncompatibleKeys(missing_keys, unexpected_keys)
nn.modules.Module.load_state_dict = load_state_dict
class MeanField(nn.Module):
"""Mean Field approximation to refine mask."""
def __init__(self, cfg=None):
"""Initialize MeanField estimation.
Args:
cfg (OmegaConfig): Hydra config
"""
super(MeanField, self).__init__()
self.kernel_size = cfg.train.crf_kernel_size
assert self.kernel_size % 2 == 1
self.zeta = cfg.train.crf_zeta
self.num_iter = cfg.train.crf_num_iter
self.high_thres = cfg.train.crf_value_high_thres
self.low_thres = cfg.train.crf_value_low_thres
self.cfg = cfg
def trunc(self, seg):
"""Clamp mask values by crf_value_(low/high)_thres."""
seg = torch.clamp(seg, min=self.low_thres, max=self.high_thres)
return seg
@torch.no_grad()
def forward(self, feature_map, seg, targets=None):
"""Forward pass with num_iter."""
feature_map = feature_map.float()
kernel_size = self.kernel_size
B, H, W = seg.shape
C = feature_map.shape[1]
self.unfold = torch.nn.Unfold(kernel_size, stride=1, padding=self.kernel_size // 2)
# feature_map [B, C, H, W]
feature_map = feature_map + 10
# unfold_feature_map [B, C, kernel_size ** 2, H*W]
unfold_feature_map = self.unfold(feature_map).reshape(B, C, kernel_size**2, H * W)
# B, kernel_size**2, H*W
kernel = torch.exp(-(((unfold_feature_map - feature_map.reshape(B, C, 1, H * W)) ** 2) / (2 * self.zeta ** 2)).sum(1))
if targets is not None:
t = targets.reshape(B, H, W)
seg = seg * t
else:
t = None
seg = self.trunc(seg)
for it in range(self.num_iter):
seg = self.single_forward(seg, kernel, t, B, H, W, it)
return (seg > 0.5).float()
def single_forward(self, x, kernel, targets, B, H, W, it):
"""Forward pass."""
x = x[:, None]
# x [B 2 H W]
B, _, H, W = x.shape
x = torch.cat([1 - x, x], 1)
kernel_size = self.kernel_size
# unfold_x [B, 2, kernel_size**2, H * W]
# kernel [B, kennel_size**2, H * W]
unfold_x = self.unfold(-torch.log(x)).reshape(B, 2, kernel_size ** 2, H * W)
# aggre, x [B, 2, H * W]
aggre = (unfold_x * kernel[:, None]).sum(2)
aggre = torch.exp(-aggre)
if targets is not None:
aggre[:, 1:] = aggre[:, 1:] * targets.reshape(B, 1, H * W)
out = aggre
out = out / (1e-6 + out.sum(1, keepdim=True))
out = self.trunc(out)
return out[:, 1].reshape(B, H, W)
class MaskHead(nn.Module):
"""Mask Head."""
def __init__(self, in_channels=2048, cfg=None):
"""Initialize mask head.
Args:
in_channels (int): number of input channels
cfg (OmegaConfig): Hydra config
"""
super().__init__()
self.num_convs = cfg.model.mask_head_num_convs
self.in_channels = in_channels
self.mask_head_hidden_channel = cfg.model.mask_head_hidden_channel
self.mask_head_out_channel = cfg.model.mask_head_out_channel
self.mask_scale_ratio = cfg.model.mask_scale_ratio
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else self.mask_head_hidden_channel
out_channels = self.mask_head_hidden_channel if i < self.num_convs - 1 else self.mask_head_out_channel
self.convs.append(ConvModule(in_channels, out_channels, 3, padding=1))
def forward(self, x):
"""Forward pass."""
for idx, conv in enumerate(self.convs):
if idx == 3:
h, w = x.shape[2:]
th, tw = int(h * self.mask_scale_ratio), int(w * self.mask_scale_ratio)
x = F.interpolate(x, (th, tw), mode='bilinear', align_corners=False)
x = conv(x)
return x
class RoIHead(nn.Module):
"""RoI Head."""
def __init__(self, in_channels=2048, cfg=None):
"""Initialize RoI Head.
Args:
in_channels (int): number of input channels
cfg (OmegaConfig): Hydra config
"""
super().__init__()
self.mlp1 = nn.Linear(in_channels, cfg.model.mask_head_out_channel)
self.relu = nn.ReLU()
self.mlp2 = nn.Linear(cfg.model.mask_head_out_channel, cfg.model.mask_head_out_channel)
def forward(self, x, boxmask=None):
"""Forward pass."""
x = x.mean((2, 3))
x = self.mlp2(self.relu(self.mlp1(x)))
return x
class MALStudentNetwork(pl.LightningModule):
"""MAL student model."""
def __init__(self, in_channels=2048, cfg=None):
"""Initialize MAL student model.
Args:
in_channels (int): number of input channels
cfg (OmegaConfig): Hydra config
"""
super().__init__()
self.cfg = cfg
self.backbone = vit_builder.build_model(cfg=cfg)
has_roi = False
has_mask = False
# Load pretrained weights
if cfg.checkpoint:
print('Loading backbone weights...')
state_dict = torch.load(cfg.checkpoint, map_location="cpu")
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
if 'model' in state_dict.keys():
state_dict = state_dict['model']
is_pretrained = any('student' in k for k in state_dict.keys())
has_roi = any('roi_head' in k for k in state_dict.keys())
has_mask = any('mask_head' in k for k in state_dict.keys())
prefix = 'backbone.' if 'fan' in cfg.model.arch else ''
self.backbone.load_state_dict(state_dict, strict=False, prefix='student.backbone.' if is_pretrained else prefix)
# K head
self.roi_head = RoIHead(in_channels, cfg=cfg)
if has_roi:
print('Loading ROI head weights...')
self.roi_head.load_state_dict(state_dict, strict=False, prefix='student.roi_head.')
# V head
self.mask_head = MaskHead(in_channels, cfg=cfg)
if has_mask:
print('Loading mask head weights...')
self.mask_head.load_state_dict(state_dict, strict=False, prefix='student.mask_head.')
# make student sharded on multiple gpus
self.configure_sharded_model()
def configure_sharded_model(self):
"""Sharded backbone."""
self.backbone = auto_wrap(self.backbone)
def forward(self, x, boxmask, bboxes):
"""Forward pass."""
if self.cfg.train.use_amp:
x = x.half()
feat = self.backbone.base_forward(x)
spatial_feat_ori = self.backbone.get_spatial_feat(feat)
h, w = spatial_feat_ori.shape[2:]
mask_scale_ratio_pre = int(self.cfg.model.mask_scale_ratio_pre)
if not self.cfg.model.not_adjust_scale:
spatial_feat_list = []
masking_list = []
areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0])
for idx, (scale_low, scale_high) in enumerate([(0, 32**2), (32**2, 96**2), (96**2, 1e5**2)]):
masking = (areas < scale_high) * (areas > scale_low)
if masking.sum() > 0:
spatial_feat = F.interpolate(
spatial_feat_ori[masking],
size=(int(h * 2 ** (idx - 1)), int(w * 2 ** (idx - 1))),
mode='bilinear', align_corners=False)
boxmask = None
else:
spatial_feat = None
boxmask = None
spatial_feat_list.append(spatial_feat)
masking_list.append(masking)
roi_feat = self.roi_head(spatial_feat_ori)
n, maxh, maxw = roi_feat.shape[0], h * 4, w * 4
seg_all = torch.zeros(n, 1, maxh, maxw).to(roi_feat)
for idx, (spatial_feat, masking) in enumerate(zip(spatial_feat_list, masking_list)):
if masking.sum() > 0:
mn = masking.sum()
mh, mw = int(h * mask_scale_ratio_pre * 2 ** (idx - 1)), int(w * mask_scale_ratio_pre * 2 ** (idx - 1))
seg_feat = self.mask_head(spatial_feat)
c = seg_feat.shape[1]
masked_roi_feat = roi_feat[masking]
seg = (masked_roi_feat[:, None, :] @ seg_feat.reshape(mn, c, mh * mw * 4)).reshape(mn, 1, mh * 2, mw * 2)
seg = F.interpolate(seg, size=(maxh, maxw), mode='bilinear', align_corners=False)
seg_all[masking] = seg
ret_vals = {'feat': feat, 'seg': seg_all, 'spatial_feat': spatial_feat_ori, 'masking_list': masking_list}
else:
spatial_feat = F.interpolate(
spatial_feat_ori, size=(int(h * mask_scale_ratio_pre), int(w * mask_scale_ratio_pre)),
mode='bilinear', align_corners=False)
boxmask = F.interpolate(boxmask, size=spatial_feat.shape[2:], mode='bilinear', align_corners=False)
seg_feat = self.mask_head(spatial_feat)
roi_feat = self.roi_head(spatial_feat_ori, boxmask)
n, c, h, w = seg_feat.shape
seg = (roi_feat[:, None, :] @ seg_feat.reshape(n, c, h * w)).reshape(n, 1, h, w)
seg = F.interpolate(seg, (h * 4, w * 4), mode='bilinear', align_corners=False)
ret_vals = {'feat': feat, 'seg': seg, 'spatial_feat': spatial_feat_ori}
return ret_vals
class MALTeacherNetwork(MALStudentNetwork):
"""MAL teacher model."""
def __init__(self, in_channels, cfg=None):
"""Initialize MAL teacher model.
Args:
in_channels (int): number of input channels
cfg (OmegaConfig): Hydra config
"""
super().__init__(in_channels, cfg=cfg)
self.eval()
self.momentum = cfg.model.teacher_momentum
@torch.no_grad()
def update(self, student):
"""Update EMA teacher model."""
for param_student, param_teacher in zip(student.parameters(), self.parameters()):
param_teacher.data = param_teacher.data * self.momentum + param_student.data * (1 - self.momentum)
class MIoUMetrics(torchmetrics.Metric):
"""MIoU Metrics."""
def __init__(self, dist_sync_on_step=True, num_classes=20):
"""Initialize MIoU metrics.
Args:
dist_sync_on_step (bool): If metric state should synchronize on forward()
num_classes (int): Number of classes
"""
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("cnt", default=torch.zeros(num_classes), dist_reduce_fx="sum")
self.add_state("total", default=torch.zeros(num_classes), dist_reduce_fx="sum")
def update(self, label, iou):
"""Update."""
self.cnt[label - 1] += 1
self.total[label - 1] += iou
def update_with_ious(self, labels, ious):
"""Update with IOUs."""
for iou, label in zip(ious, labels):
self.cnt[label - 1] += 1
self.total[label - 1] += float(iou)
return ious
def cal_intersection(self, seg, gt):
"""Calcuate mask intersection."""
B = seg.shape[0]
inter_cnt = (seg * gt).reshape(B, -1).sum(1)
return inter_cnt
def cal_union(self, seg, gt, inter_cnt=None):
"""Calculate mask union."""
B = seg.shape[0]
if inter_cnt is None:
inter_cnt = self.cal_intersection(seg, gt)
union_cnt = seg.reshape(B, -1).sum(1) + gt.reshape(B, -1).sum(1) - inter_cnt
return union_cnt
def cal_iou(self, seg, gt):
"""Calculate mask IOU."""
inter_cnt = self.cal_intersection(seg, gt)
union_cnt = self.cal_union(seg, gt, inter_cnt)
return 1.0 * inter_cnt / (union_cnt + 1e-6)
def compute(self):
"""Compute mIOU."""
mIoUs = self.total / (1e-6 + self.cnt)
mIoU = mIoUs.sum() / (self.cnt > 0).sum()
return mIoU
def compute_with_ids(self, ids=None):
"""Compute mIOU with IDs."""
if ids is not None:
total = self.total[torch.tensor(np.array(ids)).long()]
cnt = self.cnt[torch.tensor(np.array(ids)).long()]
else:
total = self.total
cnt = self.cnt
mIoUs = total / (1e-6 + cnt)
mIoU = mIoUs.sum() / (cnt > 0).sum()
return mIoU
class MAL(pl.LightningModule):
"""Base MAL model."""
def __init__(self, cfg=None, num_iter_per_epoch=None, categories=None):
"""Initialize MAL model.
Args:
cfg (OmegaConfig): Hydra config
num_iter_per_epoch (int): Number of iterations per epoch
categories (list): categories in the COCO format annotation
"""
super().__init__()
# loss term hyper parameters
self.num_convs = cfg.model.mask_head_num_convs
self.loss_mil_weight = cfg.train.loss_mil_weight
self.loss_crf_weight = cfg.train.loss_crf_weight
self.loss_crf_step = cfg.train.loss_crf_step
self.cfg = cfg
self.mask_thres = cfg.train.mask_thres
self.num_classes = len(categories) + 1
self.mIoUMetric = MIoUMetrics(num_classes=self.num_classes)
self.areaMIoUMetrics = nn.ModuleList([MIoUMetrics(num_classes=self.num_classes) for _ in range(3)])
if self.cfg.evaluate.comp_clustering:
self.clusteringScoreMetrics = torchmetrics.MeanMetric()
backbone_type = cfg.model.arch
self.categories = categories
if backbone_type.lower().startswith('vit'):
if 'tiny' in backbone_type.lower():
in_channel = 192
elif 'small' in backbone_type.lower():
in_channel = 384
elif 'base' in backbone_type.lower():
in_channel = 768
elif 'large' in backbone_type.lower():
in_channel = 1024
else:
raise NotImplementedError
elif backbone_type.lower().startswith('fan'):
if 'tiny' in backbone_type.lower():
in_channel = 192
elif 'small' in backbone_type.lower():
in_channel = 448
elif 'base' in backbone_type.lower():
in_channel = 448
elif 'large' in backbone_type.lower():
in_channel = 480
else:
raise NotImplementedError
else:
raise NotImplementedError("Only `vit` and `fan` are supported.")
self.mean_field = MeanField(cfg=self.cfg)
self.student = MALStudentNetwork(in_channel, cfg=cfg)
self.teacher = MALTeacherNetwork(in_channel, cfg=cfg)
self.denormalize = Denormalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
self._optim_type = cfg.train.optim_type
self._lr = cfg.train.lr
self._wd = cfg.train.wd
self._momentum = cfg.train.optim_momentum
if num_iter_per_epoch is not None:
self._num_iter_per_epoch = num_iter_per_epoch // len(self.cfg.gpu_ids)
self.cfg = cfg
self.vis_cnt = 0
self.local_step = 0
# Enable manual optimization
self.automatic_optimization = False
self.status_logging_dict = {}
def configure_optimizers(self):
"""Configure optimizers."""
optimizer = AdamWwStep(
self.parameters(), eps=self.cfg.train.optim_eps,
betas=self.cfg.train.optim_betas,
lr=self._lr, weight_decay=self._wd)
return optimizer
def crf_loss(self, img, seg, tseg, boxmask):
"""CRF loss."""
refined_mask = self.mean_field(img, tseg, targets=boxmask)
return self.dice_loss(seg, refined_mask).mean(), refined_mask
def dice_loss(self, pred, target):
"""DICE loss.
replace cross-entropy like loss in the original paper:
(https://papers.nips.cc/paper/2019/file/e6e713296627dff6475085cc6a224464-Paper.pdf).
Args:
pred (torch.Tensor): [B, embed_dim]
target (torch.Tensor): [B, embed_dim]
Return:
loss (torch.Tensor): [B]
"""
pred = pred.contiguous().view(pred.size()[0], -1).float()
target = target.contiguous().view(target.size()[0], -1).float()
a = torch.sum(pred * target, 1)
b = torch.sum(pred * pred, 1) + 0.001
c = torch.sum(target * target, 1) + 0.001
d = (2 * a) / (b + c)
return 1 - d
def mil_loss(self, pred, target):
"""Multi-instance loss.
Args:
pred (torch.Tensor): size of [batch_size, 128, 128], where 128 is input_size // 4
target (torch.Tensor): size of [batch_size, 128, 128], where 128 is input_size // 4
Return:
loss (torch.Tensor): size of [batch_size]
"""
row_labels = target.max(1)[0]
column_labels = target.max(2)[0]
row_input = pred.max(1)[0]
column_input = pred.max(2)[0]
loss = self.dice_loss(column_input, column_labels)
loss += self.dice_loss(row_input, row_labels)
return loss
def training_step(self, x):
"""training step."""
optimizer = self.optimizers()
loss = {}
image = x['image']
local_step = self.local_step
self.local_step += 1
if 'timage' in x.keys():
timage = x['timage']
else:
timage = image
student_output = self.student(image, x['mask'], x['bbox'])
teacher_output = self.teacher(timage, x['mask'], x['bbox'])
B, oh, ow = student_output['seg'].shape[0], student_output['seg'].shape[2], student_output['seg'].shape[3]
mask = F.interpolate(x['mask'], size=(oh, ow), mode='bilinear', align_corners=False).reshape(-1, oh, ow)
if 'image' in x:
student_seg_sigmoid = torch.sigmoid(student_output['seg'])[:, 0].float()
teacher_seg_sigmoid = torch.sigmoid(teacher_output['seg'])[:, 0].float()
# Multiple instance learning Loss
loss_mil = self.mil_loss(student_seg_sigmoid, mask)
# Warmup loss weight for multiple instance learning loss
if self.current_epoch > 0:
step_mil_loss_weight = 1
else:
step_mil_loss_weight = min(1, 1. * local_step / self.cfg.train.loss_mil_step)
loss_mil *= step_mil_loss_weight
loss_mil = loss_mil.sum() / (loss_mil.numel() + 1e-4) * self.loss_mil_weight
loss.update({'mil': loss_mil})
# Tensorboard logs
self.log("train/loss_mil", loss_mil, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
# Conditional Random Fields Loss
th, tw = oh * self.cfg.train.crf_size_ratio, ow * self.cfg.train.crf_size_ratio
# resize image
scaled_img = F.interpolate(image, size=(th, tw), mode='bilinear', align_corners=False).reshape(B, -1, th, tw)
# resize student segmentation
scaled_stu_seg = F.interpolate(student_seg_sigmoid[None, ...], size=(th, tw), mode='bilinear', align_corners=False).reshape(B, th, tw)
# resize teacher segmentation
scaled_tea_seg = F.interpolate(teacher_seg_sigmoid[None, ...], size=(th, tw), mode='bilinear', align_corners=False).reshape(B, th, tw)
# resize mask
scaled_mask = F.interpolate(x['mask'], size=(th, tw), mode='bilinear', align_corners=False).reshape(B, th, tw)
# loss_crf, pseudo_label
loss_crf, _ = self.crf_loss(scaled_img, scaled_stu_seg, (scaled_stu_seg + scaled_tea_seg) / 2, scaled_mask)
if self.current_epoch > 0:
step_crf_loss_weight = 1
else:
step_crf_loss_weight = min(1. * local_step / self.loss_crf_step, 1.)
loss_crf *= self.loss_crf_weight * step_crf_loss_weight
loss.update({'crf': loss_crf})
self.log("train/loss_crf", loss_crf, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
else:
raise NotImplementedError
total_loss = sum(loss.values())
self.log("train/loss", total_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("lr", optimizer.param_groups[0]['lr'], on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("train/bs", image.shape[0], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True)
optimizer.zero_grad()
self.manual_backward(total_loss)
optimizer.step()
if self._optim_type == 'adamw':
adjust_learning_rate(optimizer, 1. * local_step / self._num_iter_per_epoch + self.current_epoch, self.cfg)
self.teacher.update(self.student)
return total_loss
def training_epoch_end(self, outputs):
"""On training epoch end."""
self.local_step = 0
average_train_loss = 0.0
for out in outputs:
average_train_loss += out['loss'].item()
average_train_loss /= len(outputs)
self.status_logging_dict["train_loss"] = average_train_loss
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and Val metrics generated.",
status_level=status_logging.Status.RUNNING
)
def validation_step(self, batch, batch_idx, return_mask=False):
"""Validation step."""
if self.cfg.dataset.load_mask:
imgs, gt_masks, masks, labels, ids, boxmasks, boxes, ext_boxes, ext_hs, ext_ws =\
batch['image'], batch['gtmask'], batch['mask'], batch['compact_category_id'], \
batch['id'], batch['boxmask'], batch['bbox'], batch['ext_boxes'], batch['ext_h'], batch['ext_w']
else:
imgs, gt_masks, masks, labels, ids, boxmasks, boxes, ext_boxes, ext_hs, ext_ws =\
batch['image'], batch['boxmask'], batch['boxmask'], batch['compact_category_id'], \
batch['id'], batch['boxmask'], batch['bbox'], batch['ext_boxes'], batch['ext_h'], batch['ext_w']
_, _, H, W = imgs.shape # B, C, H, W
denormalized_images = self.denormalize(imgs.cpu().numpy().transpose(0, 2, 3, 1)).astype(np.uint8)
labels = labels.cpu().numpy()
if self.cfg.evaluate.use_mixed_model_test:
s_outputs = self.student(imgs, batch['boxmask'], batch['bbox'])
t_outputs = self.teacher(imgs, batch['boxmask'], batch['bbox'])
segs = (s_outputs['seg'] + t_outputs['seg']) / 2
else:
if self.cfg.evaluate.use_teacher_test:
outputs = self.teacher(imgs, batch['boxmask'], batch['bbox'])
else:
outputs = self.student(imgs, batch['boxmask'], batch['bbox'])
segs = outputs['seg']
if self.cfg.evaluate.use_flip_test:
if self.cfg.evaluate.use_mixed_model_test:
s_outputs = self.student(torch.flip(imgs, [3]), batch['boxmask'], batch['bbox'])
t_outputs = self.teacher(torch.flip(imgs, [3]), batch['boxmask'], batch['bbox'])
flipped_segs = torch.flip((s_outputs['seg'] + t_outputs['seg']) / 2, [3])
segs = (flipped_segs + segs) / 2
else:
if self.cfg.evaluate.use_teacher_test:
flip_outputs = self.teacher(torch.flip(imgs, [3]), batch['boxmask'], batch['bbox'])
else:
flip_outputs = self.student(torch.flip(imgs, [3]), batch['boxmask'], batch['bbox'])
segs = (segs + torch.flip(flip_outputs['seg'], [3])) / 2
segs = F.interpolate(segs, (H, W), align_corners=False, mode='bilinear')
segs = segs.sigmoid()
thres_list = [0, 32**2, 96 ** 2, 1e5**2]
segs = segs * boxmasks
areas = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
binseg = segs.clone()
for idx, (lth, hth) in enumerate(zip(thres_list[:-1], thres_list[1:])):
obj_ids = ((lth < areas) * (areas <= hth)).cpu().numpy()
if obj_ids.sum() > 0:
binseg[obj_ids] = (binseg[obj_ids] > self.mask_thres[idx]).float()
tb_logger = self.logger.experiment
epoch_count = self.current_epoch
batch_ious = []
img_pred_masks = []
for idx, (img_h, img_w, ext_h, ext_w, ext_box, seg, gt_mask, area, label) in enumerate(zip(batch['height'], batch['width'], ext_hs, ext_ws, ext_boxes, segs, gt_masks, areas, labels)):
roi_pred_mask = F.interpolate(seg[None, ...], (ext_h, ext_w), mode='bilinear', align_corners=False)[0][0]
h, w = int(img_h), int(img_w)
img_pred_mask_shape = h, w
img_pred_mask = np.zeros(img_pred_mask_shape).astype(np.float32)
img_pred_mask[max(ext_box[1], 0):min(ext_box[3], h),
max(ext_box[0], 0):min(ext_box[2], w)] = \
roi_pred_mask[max(0 - ext_box[1], 0):ext_h + min(0, h - ext_box[3]),
max(0 - ext_box[0], 0):ext_w + min(0, w - ext_box[2])].cpu().numpy()
for idx, (lth, hth) in enumerate(zip(thres_list[:-1], thres_list[1:])):
if lth < area <= hth:
img_pred_mask = (img_pred_mask > self.mask_thres[idx]).astype(np.float32)
img_pred_masks.append(img_pred_mask[None, ...])
if self.cfg.dataset.load_mask:
iou = self.mIoUMetric.cal_iou(img_pred_mask[np.newaxis, ...], gt_mask.data[np.newaxis, ...])
# overall mask IoU
self.mIoUMetric.update(int(label), iou[0])
batch_ious.extend(iou)
# Small/Medium/Large IoU
for jdx, (lth, hth) in enumerate(zip(thres_list[:-1], thres_list[1:])):
obj_ids = ((lth < area) * (area <= hth)).cpu().numpy()
if obj_ids.sum() > 0:
self.areaMIoUMetrics[jdx].update_with_ious(labels[obj_ids], iou[obj_ids])
# Tensorboard vis
if self.cfg.dataset.load_mask:
for idx, batch_iou, img, seg, label, gt_mask, mask, _, area in zip(ids, batch_ious, denormalized_images, segs, labels, gt_masks, masks, boxes, areas):
if area > 64**2 and batch_iou < 0.78 and self.vis_cnt <= 100:
seg = seg.cpu().numpy().astype(np.float32)[0]
mask = mask.data
seg = cv2.resize(seg, (W, H), interpolation=cv2.INTER_LINEAR)
# seg = (seg > self.mask_thres).astype(np.uint8)
seg = (seg * 255).astype(np.uint8)
seg = cv2.applyColorMap(seg, cv2.COLORMAP_JET)
tseg = cv2.applyColorMap((mask[0] > 0.5).cpu().numpy().astype(np.uint8) * 255, cv2.COLORMAP_JET)
vis = cv2.addWeighted(img, 0.5, seg, 0.5, 0)
tvis = cv2.addWeighted(img, 0.5, tseg, 0.5, 0)
tb_logger.add_image('val/vis_{}'.format(int(idx)), vis, epoch_count, dataformats="HWC")
tb_logger.add_image('valgt/vis_{}'.format(int(idx)), tvis, epoch_count, dataformats="HWC")
self.vis_cnt += 1
ret_dict = dict()
if return_mask:
ret_dict['img_pred_masks'] = img_pred_masks
if self.cfg.dataset.load_mask:
ret_dict['ious'] = batch_ious
return ret_dict
def get_parameter_groups(self, print_fn=print):
"""Get parameter groups."""
groups = ([], [], [], [])
for name, value in self.named_parameters():
# pretrained weights
if 'backbone' in name:
if 'weight' in name:
# print_fn(f'pretrained weights : {name}')
groups[0].append(value)
else:
# print_fn(f'pretrained bias : {name}')
groups[1].append(value)
# scracthed weights
else:
if 'weight' in name:
if print_fn is not None:
print_fn(f'scratched weights : {name}')
groups[2].append(value)
else:
if print_fn is not None:
print_fn(f'scratched bias : {name}')
groups[3].append(value)
return groups
def validation_epoch_end(self, validation_step_outputs):
"""On validation epoch end."""
mIoU = self.mIoUMetric.compute()
self.log("val/mIoU", mIoU, on_epoch=True, prog_bar=True, sync_dist=True)
self.status_logging_dict["mIoU"] = mIoU.item()
if dist.get_rank() == 0:
print("val/mIoU: {}".format(mIoU))
if "coco" in self.cfg.dataset.type:
# cat_kv = dict([(cat["name"], cat["id"]) for cat in self.categories])
if self.cfg.evaluate.comp_clustering:
clustering_score = self.clusteringScoreMetrics.compute()
self.log("val/cluster_score", clustering_score, on_epoch=True, prog_bar=True, sync_dist=True)
self.status_logging_dict["val_cluster_score"] = str(clustering_score)
if dist.get_rank() == 0:
if self.cfg.evaluate.comp_clustering:
print("val/cluster_score", clustering_score)
else:
raise NotImplementedError
self.mIoUMetric.reset()
self.vis_cnt = 0
for i, name in zip(range(len(self.areaMIoUMetrics)), ["small", "medium", "large"]):
area_mIoU = self.areaMIoUMetrics[i].compute()
self.log("val/mIoU_{}".format(name), area_mIoU, on_epoch=True, sync_dist=True)
self.status_logging_dict["mIoU_{}".format(name)] = area_mIoU.item()
if dist.get_rank() == 0:
print("val/mIoU_{}: {}".format(name, area_mIoU))
self.areaMIoUMetrics[i].reset()
if not self.training:
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Evaluation metrics generated.",
status_level=status_logging.Status.RUNNING
)
class MALPseudoLabels(MAL):
"""MAL model for pseudo label generation."""
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.box_inputs = None
def validation_step(self, batch, batch_idx):
"""Validation step."""
pred_dict = super().validation_step(batch, batch_idx, return_mask=True)
pred_seg = pred_dict['img_pred_masks']
if self.cfg.dataset.load_mask:
ious = pred_dict['ious']
ret = []
cnt = 0
# t = time.time()
for seg, (x0, y0, x1, y1), idx, image_id, category_id in zip(
pred_seg, batch['bbox'], batch['id'],
batch.get('image_id', batch.get('video_id', None)),
batch['category_id']):
# seg, ext_box, idx, image_id
# sh, sw = ey1 - ey0, ex1 - ex0
# oseg = np.array(Image.fromarray(seg[0].cpu().numpy()).resize((sw, sh)))
# seg_label = np.zeros((h, w), dtype=np.uint8)
# seg_label[max(0, ey0): min(h, ey1), max(0, ex0): min(w, ex1)] = \
# oseg[max(0, -ey0): sh - max(ey1 - h, 0), \
# max(0, -ex0): sw - max(ex1 - w, 0)]
encoded_mask = encode(np.asfortranarray(seg[0].astype(np.uint8)))
encoded_mask['counts'] = encoded_mask['counts'].decode('ascii')
labels = {
"bbox": [float(x0), float(y0), float(x1 - x0), float(y1 - y0)],
"id": int(idx),
"category_id": int(category_id),
"segmentation": encoded_mask,
"iscrowd": 0,
"area": float(x1 - x0) * float(y1 - y0),
"image_id": int(image_id)
}
if 'score' in batch.keys():
labels['score'] = float(batch['score'][cnt].cpu().numpy())
if self.cfg.dataset.load_mask:
labels['iou'] = float(ious[cnt])
cnt += 1
ret.append(labels)
if batch.get('ytvis_idx', None) is not None:
for ytvis_idx, labels in zip(batch['ytvis_idx'], ret):
labels['ytvis_idx'] = list(map(int, ytvis_idx))
return ret
def validation_epoch_end(self, validation_step_outputs):
"""On validation epoch end."""
super().validation_epoch_end(validation_step_outputs)
ret = list(itertools.chain.from_iterable(validation_step_outputs))
if self.trainer.strategy.root_device.index > 0:
with open(f"{self.cfg.inference.label_dump_path}.part{self.trainer.strategy.root_device.index}", "w") as f:
json.dump(ret, f)
torch.distributed.barrier()
else:
val_ann_path = self.cfg.inference.ann_path
with open(val_ann_path, "r") as f:
anns = json.load(f)
torch.distributed.barrier()
for i in range(1, len(self.cfg.gpu_ids)):
with open("{}.part{}".format(self.cfg.inference.label_dump_path, i), "r") as f:
obj = json.load(f)
ret.extend(obj)
os.remove("{}.part{}".format(self.cfg.inference.label_dump_path, i))
if ret[0].get('ytvis_idx', None) is None:
# for COCO format
_ret = []
_ret_set = set()
for ann in ret:
if ann['id'] not in _ret_set:
_ret_set.add(ann['id'])
_ret.append(ann)
anns['annotations'] = _ret
else:
# for YouTubeVIS format
for inst_ann in anns['annotations']:
len_video = len(inst_ann['bboxes'])
inst_ann['segmentations'] = [None for _ in range(len_video)]
for seg_ann in ret:
inst_idx, frame_idx = seg_ann['ytvis_idx']
anns['annotations'][inst_idx]['segmentations'][frame_idx] = seg_ann['segmentation']
with open(self.cfg.inference.label_dump_path, "w") as f:
json.dump(anns, f)
if self.box_inputs is not None:
print("Start evaluating the results...")
cocoGt = COCO(self.cfg.val_ann_path)
cocoDt = cocoGt.loadRes(self.cfg.label_dump_path + ".result")
for iou_type in ['bbox', 'segm']:
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
coco_metrics = cocoEval.stats
for i, name in enumerate(['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',
'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']):
self.status_logging_dict[f"{name}_{iou_type}"] = coco_metrics[i]
if not self.training:
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Evaluation metrics generated.",
status_level=status_logging.Status.RUNNING
)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/models/mal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""Transformer (ViT and FAN) builder."""
from functools import partial
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from nvidia_tao_pytorch.cv.backbone.fan import (
fan_tiny_12_p16_224,
fan_small_12_p16_224,
fan_base_18_p16_224,
fan_large_24_p16_224,
fan_tiny_8_p4_hybrid,
fan_small_12_p4_hybrid,
fan_base_16_p4_hybrid,
fan_large_16_p4_hybrid
)
from nvidia_tao_pytorch.cv.backbone.vision_transformer import VisionTransformer
fan_dict = {
"fan_tiny_12_p16_224": fan_tiny_12_p16_224,
"fan_small_12_p16_224": fan_small_12_p16_224,
"fan_base_18_p16_224": fan_base_18_p16_224,
"fan_large_24_p16_224": fan_large_24_p16_224,
"fan_tiny_8_p4_hybrid": fan_tiny_8_p4_hybrid,
"fan_small_12_p4_hybrid": fan_small_12_p4_hybrid,
"fan_base_16_p4_hybrid": fan_base_16_p4_hybrid,
"fan_large_16_p4_hybrid": fan_large_16_p4_hybrid
}
urls_dic = {
"vit-deit-tiny/16": "https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
"vit-deit-small/16": "https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
"vit-deit-base/16": "https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
"vit-deit-base-distilled/16": "https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
"vit-deit-iii-base-224/16": "https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth",
"vit-mocov3-base/16": "https://dl.fbaipublicfiles.com/moco-v3/vit-b-300ep/vit-b-300ep.pth.tar",
"vit-mae-base/16": "https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth",
'vit-mae-large/16': "https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth",
'vit-mae-huge/14': 'https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth'
}
def get_vit(cfg=None, load_imagenet_weights=False):
"""Build ViT models.
Args:
cfg (OmegaConfig): Hydra config
load_imagenet_weights (bool): Whether to load imagenet weights
Return:
model: ViT model
"""
arch = cfg.model.arch
if '16' in arch:
patch_size = 16
elif '8' in arch:
patch_size = 8
elif '14' in arch:
patch_size = 14
else:
raise ValueError("Only 8/14/16 are supported.")
if 'tiny' in arch.lower():
embed_dim = 192
num_heads = 3
depth = 12
if 'small' in arch.lower():
embed_dim = 384
num_heads = 6
depth = 12
elif 'base' in arch.lower():
embed_dim = 768
num_heads = 12
depth = 12
elif 'large' in arch.lower():
embed_dim = 1024
num_heads = 16
depth = 24
elif 'huge' in arch.lower():
embed_dim = 1280
num_heads = 16
depth = 32
else:
raise ValueError("Only tiny/small/base/large/huge are supported.")
model = VisionTransformer(
patch_size=patch_size, embed_dim=embed_dim, depth=depth,
num_heads=num_heads, mlp_ratio=4, qkv_bias=True, drop_path_rate=cfg.model.vit_dpr,
norm_layer=partial(nn.LayerNorm, eps=1e-6), frozen_stages=cfg.model.frozen_stages)
if load_imagenet_weights:
path = urls_dic[arch]
if path.startswith('http'):
state_dict = model_zoo.load_url(path)
else:
state_dict = torch.load(path)
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
if 'model' in state_dict.keys():
state_dict = state_dict['model']
model.load_state_dict(state_dict, strict=False)
return model
def get_fan(cfg, load_imagenet_weights=False):
"""Build FAN models.
Args:
cfg (OmegaConfig): Hydra config
load_imagenet_weights (bool): Whether to load imagenet weights
Return:
model: FAN model
"""
arch = cfg.model.arch
if arch in list(fan_dict.keys()):
return fan_dict[arch](pretrained=load_imagenet_weights)
raise ValueError(f"Only {list(fan_dict.keys())} are supported.")
def build_model(cfg):
"""Model builder.
Args:
cfg (OmegaConfig): Hydra config
Return:
backbone: either ViT or FAN model
"""
if 'vit' in cfg.model.arch:
backbone = get_vit(cfg, load_imagenet_weights=False)
elif 'fan' in cfg.model.arch:
backbone = get_fan(cfg, load_imagenet_weights=False)
else:
raise ValueError('Only vit and fan are supported.')
return backbone
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/models/vit_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizer utils."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/optimizers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""AdamW optimizer with step."""
from torch.optim import AdamW
class AdamWwStep(AdamW):
"""AdamW optimizer with step."""
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
for param_group in self.param_groups:
param_group['step'] = 0
param_group['epoch'] = 0
def step(self, closure=None):
"""Step."""
super().step(closure)
for param_group in self.param_groups:
param_group['step'] = param_group['step'] + 1
def next_epoch(self):
"""Next epoch."""
for param_group in self.param_groups:
param_group['epoch'] += 1
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/optimizers/adamw.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL scripts."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""MAL training script."""
import logging
import os
import glob
import warnings
import torch
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.mal.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.mal.datasets.pl_data_module import WSISDataModule
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.mal.models.mal import MAL
from nvidia_tao_pytorch.cv.mal.utils.config_utils import update_config
warnings.filterwarnings("ignore")
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level='INFO')
logger = logging.getLogger(__name__)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="train", schema=ExperimentConfig
)
def run_experiment(cfg: ExperimentConfig) -> None:
"""Run training experiment."""
# set random seed
seed_everything(cfg.train.seed)
cfg = update_config(cfg, 'train')
os.makedirs(cfg.results_dir, exist_ok=True)
status_logger_callback = TAOStatusLogger(
cfg.results_dir,
append=True,
num_epochs=cfg.train.num_epochs
)
status_logging.set_status_logger(status_logger_callback.logger)
# gpu indices
if len(cfg.gpu_ids) == 0:
cfg.gpu_ids = list(range(torch.cuda.device_count()))
cfg.train.lr = cfg.train.lr * len(cfg.gpu_ids) * cfg.train.batch_size
cfg.train.min_lr = cfg.train.lr * cfg.train.min_lr_rate
num_workers = len(cfg.gpu_ids) * cfg.dataset.num_workers_per_gpu
logger.info("Setting up dataloader...")
data_loader = WSISDataModule(
num_workers=num_workers,
load_train=True,
load_val=True, cfg=cfg)
num_iter_per_epoch = len(data_loader.train_dataloader())
ModelCheckpoint.FILE_EXTENSION = ".pth"
checkpoint_callback = ModelCheckpoint(
dirpath=cfg.results_dir,
filename=f'{cfg.model.arch.replace("/", "-")}' + '{epoch:03d}',
save_top_k=-1,
every_n_epochs=cfg.train.save_every_k_epoch,
save_weights_only=True,
save_last=False)
resume_checkpoint_callback = ModelCheckpoint(
dirpath=cfg.results_dir,
filename=f'{cfg.model.arch.replace("/", "-")}_resume',
save_top_k=1,
every_n_epochs=cfg.train.save_every_k_epoch,
save_last=False)
resume_ckpt = sorted(glob.glob(
os.path.join(cfg.results_dir, f'{cfg.model.arch.replace("/", "-")}_resume*')))
if resume_ckpt:
resume_ckpt = resume_ckpt[-1]
logger.info(f"Training will resume from {resume_ckpt}.")
cfg.checkpoint = None
logger.info("Building MAL models...")
model = MAL(
cfg=cfg, num_iter_per_epoch=num_iter_per_epoch,
categories=data_loader._train_data_loader.dataset.coco.dataset['categories'])
trainer = Trainer(
gpus=cfg.gpu_ids,
num_nodes=cfg.num_nodes,
strategy=cfg.strategy,
devices=None,
callbacks=[status_logger_callback, checkpoint_callback, resume_checkpoint_callback],
accelerator='gpu',
default_root_dir=cfg.results_dir,
max_epochs=cfg.train.num_epochs,
precision=16 if cfg.train.use_amp else 32,
check_val_every_n_epoch=cfg.train.val_interval,
accumulate_grad_batches=cfg.train.accum_grad_batches)
trainer.fit(model, data_loader, ckpt_path=resume_ckpt or None)
if __name__ == '__main__':
try:
run_experiment()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""MAL inference script."""
import os
import warnings
import torch
from pytorch_lightning import Trainer
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.mal.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.mal.datasets.pl_data_module import WSISDataModule
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.mal.models.mal import MALPseudoLabels
from nvidia_tao_pytorch.cv.mal.utils.config_utils import update_config
warnings.filterwarnings("ignore")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="infer", schema=ExperimentConfig
)
def run_inference(cfg: ExperimentConfig) -> None:
"""Run pseudo-label generation."""
cfg = update_config(cfg, 'inference')
os.makedirs(cfg.results_dir, exist_ok=True)
# Set status logging
status_file = os.path.join(cfg.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting MAL inference"
)
# gpu indices
if len(cfg.gpu_ids) == 0:
cfg.gpu_ids = list(range(torch.cuda.device_count()))
cfg.train.lr = 0
cfg.train.min_lr = 0
num_workers = len(cfg.gpu_ids) * cfg.dataset.num_workers_per_gpu
# override data path and batch_size
cfg.dataset.val_ann_path = cfg.inference.ann_path
cfg.dataset.val_img_dir = cfg.inference.img_dir
cfg.dataset.load_mask = cfg.inference.load_mask
cfg.train.batch_size = cfg.inference.batch_size
cfg.evaluate.use_mixed_model_test = False
cfg.evaluate.use_teacher_test = False
cfg.evaluate.comp_clustering = False
cfg.evaluate.use_flip_test = False
data_loader = WSISDataModule(
num_workers=num_workers,
load_train=False,
load_val=True, cfg=cfg)
# Phase 2: Generating pseudo-labels
model = MALPseudoLabels(
cfg=cfg,
categories=data_loader._val_data_loader.dataset.coco.dataset['categories'])
trainer = Trainer(
gpus=cfg.gpu_ids,
strategy=cfg.strategy,
devices=1,
accelerator='gpu',
precision=16,
check_val_every_n_epoch=1,
resume_from_checkpoint=cfg.checkpoint
)
trainer.validate(model, ckpt_path=cfg.checkpoint, dataloaders=data_loader.val_dataloader())
if __name__ == '__main__':
try:
run_inference()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""MAL evaluation script."""
import os
import warnings
import torch
from pytorch_lightning import Trainer
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.mal.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.mal.datasets.pl_data_module import WSISDataModule
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.mal.models.mal import MAL
from nvidia_tao_pytorch.cv.mal.utils.config_utils import update_config
warnings.filterwarnings("ignore")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="eval", schema=ExperimentConfig
)
def run_evaluation(cfg: ExperimentConfig) -> None:
"""Run evaluation."""
cfg = update_config(cfg, 'evaluate')
os.makedirs(cfg.results_dir, exist_ok=True)
# Set status logging
status_file = os.path.join(cfg.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting MAL evaluation"
)
# gpu indices
if len(cfg.gpu_ids) == 0:
cfg.gpu_ids = list(range(torch.cuda.device_count()))
cfg.train.lr = 0
cfg.train.min_lr = 0
cfg.train.batch_size = cfg.evaluate.batch_size
num_workers = len(cfg.gpu_ids) * cfg.dataset.num_workers_per_gpu
data_loader = WSISDataModule(
num_workers=num_workers,
load_train=False,
load_val=True, cfg=cfg)
model = MAL(
cfg=cfg, num_iter_per_epoch=1,
categories=data_loader._val_data_loader.dataset.coco.dataset['categories'])
trainer = Trainer(
devices=1,
gpus=cfg.gpu_ids,
num_nodes=cfg.num_nodes,
strategy=cfg.strategy,
accelerator='gpu', max_epochs=-1,
precision=16, check_val_every_n_epoch=1,
resume_from_checkpoint=cfg.checkpoint,
accumulate_grad_batches=1)
trainer.validate(model, ckpt_path=cfg.checkpoint, dataloaders=data_loader.val_dataloader())
if __name__ == '__main__':
try:
run_evaluation()
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL entrypoint."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define entrypoint to run tasks for MAL."""
import importlib
import os
import pkgutil
import argparse
import subprocess
import sys
from nvidia_tao_pytorch.cv.mal import scripts
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Args:
script (Module): Input scripts.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TAO arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written. (DEFAULT: ./)",
required=False,
)
parser.add_argument(
"-e",
"--experiment_spec_file",
help="Path to the experiment spec file.",
required=True)
parser.add_argument(
"-g",
"--gpus",
help="Number of GPUs or gpu index to use.",
type=str,
default=None
)
parser.add_argument(
"-o",
"--output_specs_dir",
help="Path to a target folder where experiment spec files will be downloaded.",
default=None
)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
if args.subtask not in ["download_specs", "pitch_stats"]:
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# Find relevant module and pass args.
if args.subtask in ["train", "evaluate", "inference"]:
if args.results_dir:
script_args += " results_dir=" + args.results_dir
if args.gpus:
try:
script_args += f" gpu_ids=[{','.join([str(i) for i in range(int(args.gpus))])}]"
except ValueError:
script_args += f" gpu_ids={args.gpus}"
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
exit(1)
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"MAL",
add_help=True,
description="TAO Toolkit entrypoint for MAL"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(
parser, subtasks
)
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/mal/entrypoint/mal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification root module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional, List
from dataclasses import dataclass, field
@dataclass
class ReIDModelConfig:
"""Re-Identification model configuration for training, testing & validation."""
backbone: str = "resnet_50"
last_stride: int = 1
pretrain_choice: str = "imagenet"
pretrained_model_path: Optional[str] = None
input_channels: int = 3
input_width: int = 128
input_height: int = 256
neck: str = "bnneck"
feat_dim: int = 256
neck_feat: str = "after"
metric_loss_type: str = "triplet"
with_center_loss: bool = False
with_flip_feature: bool = False
label_smooth: bool = True
@dataclass
class OptimConfig:
"""Optimizer configuration for the LR scheduler."""
name: str = "Adam"
lr_monitor: str = "val_loss"
steps: List[int] = field(default_factory=lambda: [40, 70])
gamma: float = 0.1
bias_lr_factor: float = 1
weight_decay: float = 0.0005
weight_decay_bias: float = 0.0005
warmup_factor: float = 0.01
warmup_iters: int = 10
warmup_method: str = 'linear'
base_lr: float = 0.00035
momentum: float = 0.9
center_loss_weight: float = 0.0005
center_lr: float = 0.5
triplet_loss_margin: float = 0.3
@dataclass
class ReIDDatasetConfig:
"""Re-Identification Dataset configuration template."""
train_dataset_dir: Optional[str] = None
test_dataset_dir: Optional[str] = None
query_dataset_dir: Optional[str] = None
num_classes: int = 751
batch_size: int = 64
val_batch_size: int = 128
num_workers: int = 8
pixel_mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406])
pixel_std: List[float] = field(default_factory=lambda: [0.226, 0.226, 0.226])
padding: int = 10
prob: float = 0.5
re_prob: float = 0.5
sampler: str = "softmax_triplet"
num_instances: int = 4
@dataclass
class ReIDReRankingConfig:
"""Re-Ranking configuration template for evaluation."""
re_ranking: bool = False
k1: int = 20
k2: int = 6
lambda_value: float = 0.3
max_rank: int = 10
num_query: int = 10
@dataclass
class ReIDTrainExpConfig:
"""Train experiment configuration template."""
results_dir: Optional[str] = None
gpu_ids: List[int] = field(default_factory=lambda: [0])
resume_training_checkpoint_path: Optional[str] = None
optim: OptimConfig = OptimConfig()
num_epochs: int = 1
checkpoint_interval: int = 5
grad_clip: float = 0.0
@dataclass
class ReIDInferenceExpConfig:
"""Inference experiment configuration template."""
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
output_file: Optional[str] = None
test_dataset: Optional[str] = None
query_dataset: Optional[str] = None
gpu_id: int = 0
@dataclass
class ReIDEvalExpConfig:
"""Evaluation experiment configuration template."""
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
output_sampled_matches_plot: Optional[str] = None
output_cmc_curve_plot: Optional[str] = None
test_dataset: Optional[str] = None
query_dataset: Optional[str] = None
gpu_id: int = 0
@dataclass
class ReIDExportExpConfig:
"""Export experiment configuraiton template."""
results_dir: Optional[str] = None
checkpoint: Optional[str] = None
onnx_file: Optional[str] = None
gpu_id: int = 0
@dataclass
class ExperimentConfig:
"""Experiment config."""
results_dir: Optional[str] = None
encryption_key: Optional[str] = None
model: ReIDModelConfig = ReIDModelConfig()
dataset: ReIDDatasetConfig = ReIDDatasetConfig()
re_ranking: ReIDReRankingConfig = ReIDReRankingConfig()
train: ReIDTrainExpConfig = ReIDTrainExpConfig()
inference: ReIDInferenceExpConfig = ReIDInferenceExpConfig()
evaluate: ReIDEvalExpConfig = ReIDEvalExpConfig()
export: ReIDExportExpConfig = ReIDExportExpConfig()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Ranking Module for getting metrics."""
import numpy as np
from typing import List
EPSILON = 1e-10
def calc_euclidean_dist(qf: np.array, gf: np.array) -> np.array:
"""Calculate the Euclidean distance between query features and gallery features.
Args:
qf (np.array): Query features of shape (m x n).
gf (np.array): Gallery features of shape (p x q).
Returns:
np.array: Distance matrix of shape (m x p).
"""
dist_mat = 2 - (2 * np.dot(qf, gf.T))
dist_mat = np.sqrt(np.clip(dist_mat, 0, 4)) / 2
return dist_mat
def calc_batch_euclidean_dist(qf: np.array, gf: np.array, N: int = 6000) -> np.array:
"""Calculate the Euclidean distance between query features and gallery features in batches.
Args:
qf (np.array): Query features of shape (m x n).
gf (np.array): Gallery features of shape (p x q).
N (int, optional): Batch size. Defaults to 6000.
Returns:
np.array: Distance matrix of shape (m x p).
"""
m = qf.shape[0]
n = gf.shape[0]
dist_mat: List[np.array] = list()
for j in range(n // N + 1):
temp_gf = gf[j * N:j * N + N]
temp_qd: List[np.array] = list()
for i in range(m // N + 1):
temp_qf = qf[i * N:i * N + N]
temp_d = calc_euclidean_dist(temp_qf, temp_gf)
temp_qd.append(temp_d)
temp_qd = np.concatenate(temp_qd, axis=0)
temp_qd = temp_qd / (np.max(temp_qd, axis=0) + EPSILON)
dist_mat.append(temp_qd.T)
dist_mat = np.concatenate(dist_mat, axis=0)
return dist_mat
def compute_batch_topk(qf: np.array, gf: np.array, k1: int, N: int = 6000) -> np.array:
"""Compute the top-k nearest neighbors and return (k+1) results.
Args:
qf (np.array): Query features of shape (m x n).
gf (np.array): Gallery features of shape (p x q).
k1 (int): k value for computing k-reciprocal feature.
N (int, optional): Batch size. Defaults to 6000.
Returns:
np.array: Initial rank matrix of shape (m x k1).
"""
m = qf.shape[0]
n = gf.shape[0]
initial_rank: List[np.array] = list()
for j in range(n // N + 1):
temp_gf = gf[j * N:j * N + N]
temp_qd: List[np.array] = list()
for i in range(m // N + 1):
temp_qf = qf[i * N:i * N + N]
temp_d = calc_euclidean_dist(temp_qf, temp_gf)
temp_qd.append(temp_d)
temp_qd = np.concatenate(temp_qd, axis=0)
temp_qd = temp_qd / (np.max(temp_qd, axis=0) + EPSILON)
temp_qd = temp_qd.T
initial_rank.append(np.argsort(temp_qd, axis=1)[:, :k1])
initial_rank = np.concatenate(initial_rank, axis=0)
return initial_rank
def compute_batch_v(feat: np.array, R: List[np.array], all_num: int) -> np.array:
"""Compute the vectors of k-reciprocal nearest neighbors.
Args:
feat (np.array): Feature embeddings.
R (List[np.array]): k-reciprocal expansion indices.
all_num (int): Length of all the features.
Returns:
np.array: k-reciprocal nearest neighbors matrix of shape (all_num x all_num).
"""
V = np.zeros((all_num, all_num), dtype=np.float32)
m = feat.shape[0]
for i in range(m):
temp_gf = feat[i].reshape(1, -1)
temp_qd = calc_euclidean_dist(temp_gf, feat)
temp_qd = temp_qd / (np.max(temp_qd) + EPSILON)
temp_qd = temp_qd.squeeze()
temp_qd = temp_qd[R[i]]
weight = np.exp(-temp_qd)
weight_sum = np.sum(weight)
if weight_sum > 0:
weight = weight / weight_sum
V[i, R[i]] = weight.astype(np.float32)
return V
def get_k_reciprocal_index(initial_rank: np.array, i: int, k1: int) -> np.array:
"""Get the k-reciprocal nearest neighbor index.
Args:
initial_rank (np.array): Initial rank matrix.
i (int): Index in the k-reciprocal neighbor set.
k1 (int): k value for computing k-reciprocal feature.
Returns:
np.array: k-reciprocal nearest neighbor index.
"""
forward_k_neigh_index = initial_rank[i, :k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
return forward_k_neigh_index[fi]
def re_rank(prob_feat: np.array, gal_feat: np.array, k1: int, k2: int, lambda_value: float) -> np.array:
"""Apply re-ranking for distance computation.
Args:
prob_feat (np.array): Probe features.
gal_feat (np.array): Gallery features.
k1 (int): k value for computing k-reciprocal feature.
k2 (int): k value for local value expansion.
lambda_value (float): Lambda for original distance when combining with Jaccard distance.
Returns:
np.array: Final distance matrix.
"""
query_num = prob_feat.shape[0]
all_num = query_num + gal_feat.shape[0]
feat = np.append(prob_feat, gal_feat, axis=0)
initial_rank = compute_batch_topk(feat, feat, k1 + 1, N=6000)
del prob_feat
del gal_feat
R: List[np.array] = list()
for i in range(all_num):
k_reciprocal_index = get_k_reciprocal_index(initial_rank, i, k1)
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_k_reciprocal_index = get_k_reciprocal_index(initial_rank, candidate, int(np.around(k1 / 2.)))
if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len(candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
R.append(k_reciprocal_expansion_index)
V = compute_batch_v(feat, R, all_num)
del R
initial_rank = initial_rank[:, :k2]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float16)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i], :], axis=0)
V = V_qe
del V_qe
del initial_rank
inv_index: List[int] = list()
for i in range(all_num):
inv_index.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros((query_num, all_num), dtype=np.float32)
for i in range(query_num):
temp_min = np.zeros(shape=[1, all_num], dtype=np.float32)
ind_non_zero = np.where(V[i, :] != 0)[0]
ind_images = [inv_index[ind] for ind in ind_non_zero]
for j in range(len(ind_non_zero)):
temp_min[0, ind_images[j]] = temp_min[0, ind_images[j]] + np.minimum(V[i, ind_non_zero[j]],
V[ind_images[j], ind_non_zero[j]])
jaccard_dist[i] = 1 - temp_min / (2. - temp_min)
del V
original_dist = calc_batch_euclidean_dist(feat, feat[:query_num, :])
del feat
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
final_dist = np.clip(final_dist, 0, 1)
return final_dist
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/utils/re_ranking.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification Metrics."""
import numpy as np
import torch
from nvidia_tao_pytorch.cv.re_identification.utils.eval_reid import eval_func
from nvidia_tao_pytorch.cv.re_identification.utils.re_ranking import re_rank
def euclidean_distance(qf, gf):
"""Compute the euclidean distance between two given matrices.
Args:
qf (torch.Tensor): Matrix A of size (m x n)
gf (torch.Tensor): Matrix B of size (p x q)
Returns:
numpy.ndarray: A numpy array of euclidean distance, of size (m x p).
"""
m = qf.shape[0]
n = gf.shape[0]
dist_mat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
dist_mat.addmm_(qf, gf.t(), beta=1, alpha=-2)
return dist_mat.cpu().numpy()
def cosine_similarity(qf, gf):
"""Compute the cosine similarity between two given matrices.
Args:
qf (torch.Tensor): Matrix A of size (m x n)
gf (torch.Tensor): Matrix B of size (p x q)
Returns:
numpy.ndarray: A numpy array of cosine similarity, of size (m x p).
"""
epsilon = 0.00001
dist_mat = qf.mm(gf.t())
qf_norm = torch.norm(qf, p=2, dim=1, keepdim=True) # mx1
gf_norm = torch.norm(gf, p=2, dim=1, keepdim=True) # nx1
qg_normdot = qf_norm.mm(gf_norm.t())
dist_mat = dist_mat.mul(1 / qg_normdot).cpu().numpy()
dist_mat = np.clip(dist_mat, -1 + epsilon, 1 - epsilon)
dist_mat = np.arccos(dist_mat)
return dist_mat
class R1_mAP():
"""Class to compute the rank-1 mean Average Precision (mAP) for re-identification.
This class provides the functions to compute the rank-1 mean Average Precision,
a common evaluation metric in person re-identification tasks.
"""
def __init__(self, num_query, cfg, prepare_for_training, feat_norm=True):
"""Initialize the R1_mAP class with the given configuration.
Args:
num_query (int): The number of query images.
cfg (dict): Configuration dictionary containing re_ranking parameters.
prepare_for_training (bool): Specify whether the data is prepared for training.
feat_norm (bool, optional): Whether to normalize the feature vectors. Defaults to True.
"""
super(R1_mAP, self).__init__()
self.num_query = num_query
self.max_rank = cfg["re_ranking"]["max_rank"]
self.feat_norm = feat_norm
self.feats = []
self.pids = []
self.camids = []
self.img_paths = []
self.cfg = cfg
self.prepare_for_training = prepare_for_training
def reset(self):
"""Reset the stored feature vectors, person IDs, camera IDs, and image paths."""
self.feats = []
self.pids = []
self.camids = []
self.img_paths = []
def update(self, feat, pid, camid, img_path):
"""Update the stored feature vectors, person IDs, camera IDs, and image paths with new data.
Args:
feat (torch.Tensor): The feature vectors.
pid (list): The person IDs.
camid (list): The camera IDs.
img_path (list): The image paths.
"""
self.feats.append(feat)
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
self.img_paths.extend(img_path)
def compute(self):
"""Compute the rank-1 mean Average Precision (mAP) and CMC rank list.
Returns:
list: The Cumulative Matching Characteristics (CMC) rank list.
float: The mean Average Precision (mAP) score.
"""
feats = torch.cat(self.feats, dim=0)
if self.feat_norm:
print("The test features are normalized.")
feats = torch.nn.functional.normalize(feats, dim=1, p=2)
# query
qf = feats[:self.num_query]
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
q_img_paths = self.img_paths[:self.num_query]
# gallery
gf = feats[self.num_query:]
g_pids = np.asarray(self.pids[self.num_query:])
g_camids = np.asarray(self.camids[self.num_query:])
g_img_paths = self.img_paths[self.num_query:]
m, n = qf.shape[0], gf.shape[0]
distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
distmat.addmm_(qf, gf.t(), beta=1, alpha=-2)
distmat = distmat.cpu().numpy()
cmc, mAP = eval_func(self.cfg, distmat, q_pids, g_pids, q_camids, g_camids, q_img_paths, g_img_paths, self.prepare_for_training)
return cmc, mAP
class R1_mAP_reranking():
"""Class to compute the rank-1 mean Average Precision (mAP) with re-ranking for re-identification.
This class provides the functions to compute the rank-1 mean Average Precision with re-ranking,
a common evaluation metric in person re-identification tasks.
"""
def __init__(self, num_query, cfg, prepare_for_training, feat_norm=True):
"""Initialize the R1_mAP_reranking class with the given configuration.
Args:
num_query (int): The number of query images.
cfg (dict): Configuration dictionary containing re_ranking parameters.
prepare_for_training (bool): Specify whether the data is prepared for training.
feat_norm (bool, optional): Whether to normalize the feature vectors. Defaults to True.
"""
super(R1_mAP_reranking, self).__init__()
self.num_query = num_query
self.max_rank = cfg["re_ranking"]["max_rank"]
self.feat_norm = feat_norm
self.feats = []
self.pids = []
self.camids = []
self.img_paths = []
self.cfg = cfg
self.prepare_for_training = prepare_for_training
def reset(self):
"""Reset the stored feature vectors, person IDs, camera IDs, and image paths."""
self.feats = []
self.pids = []
self.camids = []
self.img_paths = []
def update(self, feat, pid, camid, img_path):
"""Update the stored feature vectors, person IDs, camera IDs, and image paths with new data.
Args:
feat (torch.Tensor): The feature vectors.
pid (list): The person IDs.
camid (list): The camera IDs.
img_path (list): The image paths.
"""
self.feats.append(feat)
self.pids.extend(np.asarray(pid))
self.camids.extend(np.asarray(camid))
self.img_paths.extend(img_path)
def compute(self):
"""Compute the rank-1 mean Average Precision (mAP) and CMC rank list using re-ranking.
This method first applies re-ranking on the feature vectors, then computes the mAP and CMC rank list.
Returns:
list: The Cumulative Matching Characteristics (CMC) rank list.
float: The mean Average Precision (mAP) score.
"""
feats = torch.cat(self.feats, dim=0)
if self.feat_norm:
print("The test features are normalized.")
feats = torch.nn.functional.normalize(feats, dim=1, p=2)
# query
qf = feats[:self.num_query].cpu().numpy()
q_pids = np.asarray(self.pids[:self.num_query])
q_camids = np.asarray(self.camids[:self.num_query])
q_img_paths = self.img_paths[:self.num_query]
# gallery
gf = feats[self.num_query:].cpu().numpy()
g_pids = np.asarray(self.pids[self.num_query:])
g_camids = np.asarray(self.camids[self.num_query:])
g_img_paths = self.img_paths[self.num_query:]
# m, n = qf.shape[0], gf.shape[0]
# distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
# torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
# distmat.addmm_(1, -2, qf, gf.t())
# distmat = distmat.cpu().numpy()
print("The distance matrix is processed by re-ranking.")
distmat = re_rank(qf, gf, k1=self.cfg["re_ranking"]["k1"], k2=self.cfg["re_ranking"]["k2"], lambda_value=self.cfg["re_ranking"]["lambda_value"])
cmc, mAP = eval_func(self.cfg, distmat, q_pids, g_pids, q_camids, g_camids, q_img_paths, g_img_paths, self.prepare_for_training)
return cmc, mAP
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/utils/reid_metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Scheduler Module for Re-Identification."""
from bisect import bisect_right
import torch
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
"""Custom learning rate scheduler with initial warm-up phase.
This scheduler adjusts the learning rate according to the schedule defined by the `milestones`.
It also supports a warm-up phase at the start of training, where the learning rate is initially smaller
and gradually ramps up to its initial value.
Inherits from PyTorch's torch.optim.lr_scheduler._LRScheduler class.
Attributes:
milestones (list): List of epoch indices. The learning rate is decreased at these epochs.
gamma (float): Multiplicative factor of learning rate decay.
warmup_factor (float): Multiplicative factor of learning rate applied during the warm-up phase.
warmup_iters (int): Number of epochs for the warm-up phase.
warmup_method (str): The method for the warm-up phase, either 'constant' or 'linear'.
"""
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
"""Initialize the learning rate scheduler.
Args:
optimizer (torch.optim.Optimizer): Wrapped optimizer.
milestones (list of int): List of epoch indices. Must be increasing.
gamma (float, optional): Factor by which the learning rate is reduced. Defaults to 0.1.
warmup_factor (float, optional): Factor for computing the starting warmup learning rate. Defaults to 1/3.
warmup_iters (int, optional): Number of warmup epochs at the start of training. Defaults to 500.
warmup_method (str, optional): Warmup method to use, either 'constant' or 'linear'. Defaults to 'linear'.
last_epoch (int, optional): The index of the last epoch. Defaults to -1.
Raises:
ValueError: If `milestones` are not in increasing order, or `warmup_method` is not 'constant' or 'linear'.
"""
if not list(milestones) == sorted(milestones):
raise ValueError("Milestones should be a list of" " increasing integers. Got {}".format(milestones))
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
"""Compute the learning rate at the current epoch.
Returns:
list of float: Learning rates for each parameter group.
"""
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = self.last_epoch / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/utils/scheduler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for re-identification."""
import os
import random
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
def check_and_create(d):
"""
Check if a directory path is valid and create it.
Args:
d (str): The path of the directory to create.
Forward:
If the directory doesn't exist, it will be created.
"""
if not os.path.isdir(d):
os.makedirs(d)
def data_to_device(data):
"""
Transfer numpy data to GPU.
Args:
data (Numpy): Image data.
Returns:
data (Tensor): Processed image data.
Forward:
Transfers the input data to the GPU memory. If the input data is a list,
each item will be individually transferred.
"""
if isinstance(data, list):
cuda_data = []
for item in data:
cuda_item = item.cuda(non_blocking=True)
cuda_data.append(cuda_item)
else:
cuda_data = data.cuda(non_blocking=True)
return cuda_data
def read_image(img_path):
"""
Check if the image path is valid and read the image.
Args:
img_path (str): Image path.
Returns:
img (Pillow): Image data.
Forward:
Reads an image file from the provided path and converts it to RGB format.
If the file does not exist, a FileNotFoundError will be raised.
"""
if not os.path.exists(img_path):
raise FileNotFoundError("{} does not exist".format(img_path))
img = Image.open(img_path).convert('RGB')
return img
def plot_evaluation_results(num_queries, query_maps, max_rank, output_file):
"""
Plot evaluation results from queries.
This method will plot a Mx(N+1) grid for images from query & gallery folders.
Query images will be randomly sampled from their folder. The closest matching
N gallery images will be plotted besides the query images.
M = num_queries
N = max_rank
The image in the first column comes from the query image folder.
The image in the rest of the columns will come from the nearest
matches from the gallery folder.
A blue border is drawn over the images in the first column.
A green border over an image indicates a true positive match.
A red border over an image indicates a false positive match.
This plot is saved using matplotlib at output_file location.
Args:
num_queries (int): Number of queries to plot.
query_maps (list(list)): List of query images mapped with test images with their corresponding match status.
max_rank (int): Max rank to plot.
output_file (str): Output file to plot.
Forward:
Plots a grid of images showcasing the matches found for each query image.
The grid will have a width of max_rank + 1 and a height of num_queries.
Images are color-coded based on their match status. The plot is then saved to the specified output file.
"""
# Create a Mx(N+1) grid.
fig, ax = plt.subplots(num_queries, max_rank + 1)
fig.suptitle('Sampled Matches')
# Shuffle the data for creating a sampled plot
random.shuffle(query_maps)
query_maps = query_maps[:num_queries]
# Iterate through query_maps
for row, collections in enumerate(query_maps):
for col, collection in enumerate(collections):
# Images belongs to column no. 2 to N
if col != 0:
img_path, keep = collection
string = "Rank " + str(col)
if keep: # Correct match
outline = "green"
else: # Incorrect match
outline = "red"
# Image belongs in the 1st column
else:
img_path, _ = collection
outline = "blue"
string = "Query"
img = read_image(img_path)
draw = ImageDraw.Draw(img)
width, height = img.size
draw.rectangle([(0, 0), (width, height)], fill=None, outline=outline, width=10)
ax[row, col].imshow(img)
ax[row, col].tick_params(top=False, bottom=False, left=False, right=False,
labelleft=False, labelbottom=False)
if row == len(query_maps) - 1:
# Beautify the text
ax[row, col].set_xlabel(string, rotation=80)
# Beautify the grid
plt.gcf().subplots_adjust(bottom=0.2)
# Save the plot
plt.savefig(output_file)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eval for Re-Identification Module."""
import numpy as np
from nvidia_tao_pytorch.cv.re_identification.utils.common_utils import plot_evaluation_results
def eval_func(cfg, distmat, q_pids, g_pids, q_camids, g_camids, q_img_paths, g_img_paths, prepare_for_training):
"""Evaluates person re-identification (ReID) performance using Market1501 metric.
For each query identity, it discards gallery images from the same camera view.
After that, it calculates the cumulative matching characteristics (CMC) curve and mean Average Precision (mAP).
If the program is not in training mode and if plotting is enabled, it also plots the evaluation results.
Args:
cfg (DictConfig): Configuration file.
distmat (numpy.ndarray): Pairwise distance matrix between query and gallery features.
q_pids (numpy.ndarray): Array containing query person IDs.
g_pids (numpy.ndarray): Array containing gallery person IDs.
q_camids (numpy.ndarray): Array containing query camera IDs.
g_camids (numpy.ndarray): Array containing gallery camera IDs.
q_img_paths (list of str): List containing query image paths.
g_img_paths (list of str): List containing gallery image paths.
prepare_for_training (bool): Flag indicating whether the system is in training mode.
Returns:
list: The Cumulative Matching Characteristics (CMC) rank list.
float: The mean Average Precision (mAP) score.
"""
num_q, num_g = distmat.shape
max_rank = cfg["re_ranking"]["max_rank"]
if num_g < max_rank:
max_rank = num_g
indices = np.argsort(distmat, axis=1)
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_AP = []
num_valid_q = 0. # number of valid query
query_maps = []
for q_idx in range(num_q):
query_map = []
# get query pid and camid
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
q_img_path = q_img_paths[q_idx]
# build the first column of the sampled matches image output
query_map.append([q_img_path, False])
# remove gallery samples that have the same pid and camid with query
order = indices[q_idx]
remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
res_list = list(map(g_img_paths.__getitem__, order))
# build the rest of the columns of the sampled matches image output
for g_img_path, value in zip(res_list[:max_rank], matches[q_idx][:max_rank]):
query_map.append([g_img_path, value])
query_maps.append(query_map)
# compute cmc curve
# binary vector, positions with value 1 are correct matches
orig_cmc = matches[q_idx][keep]
if not np.any(orig_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = orig_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
# compute average precision
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = orig_cmc.sum()
tmp_cmc = orig_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * orig_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
if not prepare_for_training and cfg["evaluate"]["output_sampled_matches_plot"]:
plot_evaluation_results(cfg["re_ranking"]["num_query"], query_maps, max_rank, cfg["evaluate"]["output_sampled_matches_plot"])
assert num_valid_q > 0, "Error: all query identities do not appear in gallery."
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
return all_cmc, mAP
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/utils/eval_reid.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification scripts module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export re-identification model to ONNX."""
import os
import torch
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.re_identification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.re_identification.model.pl_reid_model import ReIdentificationModel
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""CLI wrapper to run export.
This function serves as the entry point for the export script.
It loads the experiment specification, updates the results directory,
and calls the 'run_export' function. It also handles various exceptions
and logs the export status.
Args:
cfg (ExperimentConfig): The experiment configuration retrieved from the Hydra configuration files.
Raises:
KeyboardInterrupt: If the process was interrupted manually.
SystemExit: If the system or the program initiated the exit.
Exception: For any other type of exception that occurred.
"""
try:
cfg = update_results_dir(cfg, task="export")
run_export(cfg, results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Export finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Export was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
def run_export(args, results_dir):
"""Run the export of the pose classification model to ONNX.
This function handles the export process, including loading the model,
creating dummy input, and exporting the model to an ONNX file.
It also performs encryption on the ONNX file.
Args:
args (dict): Dictionary of parsed arguments to run export.
results_dir (str): Directory to output results.
Raises:
AssertionError: If the default output file already exists.
Exception: If any error occurs during the export process.
"""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting Re-identification export")
gpu_id = args['export']['gpu_id']
torch.cuda.set_device(gpu_id)
model_path = args['export']['checkpoint']
# Parsing command line arguments.
key = args['encryption_key']
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
onnx_file = args['export']['onnx_file']
experiment_config = args
# Set default output filename if the filename
# isn't provided over the command line.
if onnx_file is None:
split_name = os.path.splitext(model_path)[0]
onnx_file = "{}.onnx".format(split_name)
assert not os.path.exists(onnx_file), "Default output file {} already "\
"exists.".format(onnx_file)
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(onnx_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
# load model
pl_model = ReIdentificationModel.load_from_checkpoint(experiment_config["export"]["checkpoint"],
map_location="cpu",
experiment_spec=experiment_config,
prepare_for_training=False,
export=True)
model = pl_model.model
model.eval()
model.cuda()
input_names = ["input"]
output_names = ["fc_pred"]
# create dummy input
dummy_input = torch.randn(1, experiment_config["model"]["input_channels"],
experiment_config["model"]["input_height"], experiment_config["model"]["input_width"]).cuda()
dynamic_axes = {"input": {0: "batch"}, "fc_pred": {0: "batch", 1: "embedding_size"}}
# export
torch.onnx.export(model,
dummy_input,
onnx_file,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
verbose=True)
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Re-Identification model."""
import os
import re
from nvidia_tao_pytorch.core.connectors.checkpoint_connector import TLTCheckpointConnector
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.re_identification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.re_identification.model.pl_reid_model import ReIdentificationModel
from nvidia_tao_pytorch.cv.re_identification.utils.common_utils import check_and_create
from pytorch_lightning import Trainer
from pytorch_lightning.strategies import DDPStrategy
from pytorch_lightning.callbacks import ModelCheckpoint
def run_experiment(experiment_config,
results_dir,
key):
"""
Start the training process.
This function initializes the re-identification model with the provided experiment configuration.
It sets up the necessary components such as the status logger and checkpoint callbacks.
The training is performed using the PyTorch Lightning Trainer.
Args:
experiment_config (ExperimentConfig): The experiment configuration containing the model, training, and other parameters.
results_dir (str): The directory to save the trained model checkpoints and logs.
key (str): The encryption key for intermediate checkpoints.
Raises:
AssertionError: If checkpoint_interval is greater than num_epochs.
"""
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
reid_model = ReIdentificationModel(experiment_config, prepare_for_training=True)
check_and_create(results_dir)
num_epochs = experiment_config['train']['num_epochs']
checkpoint_interval = experiment_config['train']['checkpoint_interval']
assert checkpoint_interval <= num_epochs, (
f"Checkpoint interval {checkpoint_interval} > Number of epochs {num_epochs}. "
f"Please set experiment_config.train.checkpoint_interval < {num_epochs}"
)
status_logger_callback = TAOStatusLogger(results_dir, append=True, num_epochs=num_epochs)
status_logging.set_status_logger(status_logger_callback.logger)
grad_clip = experiment_config['train']['grad_clip']
gpus_ids = experiment_config['train']["gpu_ids"]
acc_flag = None
if len(gpus_ids) > 1:
acc_flag = DDPStrategy(find_unused_parameters=False)
trainer = Trainer(gpus=gpus_ids,
max_epochs=num_epochs,
check_val_every_n_epoch=experiment_config['train']['checkpoint_interval'],
default_root_dir=results_dir,
num_sanity_val_steps=0,
accelerator='gpu',
strategy=acc_flag,
replace_sampler_ddp=False,
sync_batchnorm=True,
gradient_clip_val=grad_clip)
# Overload connector to enable intermediate ckpt encryption and decryption.
resume_ckpt = experiment_config['train']['resume_training_checkpoint_path']
trainer._checkpoint_connector = TLTCheckpointConnector(trainer)
if resume_ckpt is not None:
trainer._checkpoint_connector.resume_checkpoint_path = resume_ckpt
# setup checkpointer:
ModelCheckpoint.FILE_EXTENSION = ".tlt"
checkpoint_callback = ModelCheckpoint(every_n_epochs=checkpoint_interval,
dirpath=results_dir,
monitor=None,
save_top_k=-1,
filename='reid_model_{epoch:03d}')
if resume_ckpt:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_ckpt}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_ckpt)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1 # make sure callback epoch matches resumed epoch
trainer.callbacks.append(status_logger_callback)
trainer.callbacks.append(checkpoint_callback)
trainer.fit(reid_model)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""
Run the training process.
This function serves as the entry point for the training script.
It loads the experiment specification, obfuscates logs, updates the results directory, and calls the 'run_experiment' function.
Args:
cfg (ExperimentConfig): The experiment configuration retrieved from the Hydra configuration files.
Raises:
KeyboardInterrupt: If the training is interrupted manually.
SystemExit: If the system or program finishes abruptly.
Exception: For any other types of exceptions thrown during training.
"""
try:
cfg = update_results_dir(cfg, task="train")
run_experiment(experiment_config=cfg,
key=cfg.encryption_key,
results_dir=cfg.results_dir)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Training finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference on single patch."""
import os
import torch
from tqdm import tqdm
import json
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.re_identification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.re_identification.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.re_identification.inference.inferencer import Inferencer
from nvidia_tao_pytorch.cv.re_identification.model.pl_reid_model import ReIdentificationModel
from nvidia_tao_pytorch.cv.re_identification.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
def run_experiment(experiment_config, results_dir, key):
"""
Start the inference process.
This function initializes the necessary components for inference, including the model, data loader,
and inferencer. It performs inference on the provided data and saves the results in the specified output file.
Args:
experiment_config (dict): The experiment configuration containing the model and inference parameters.
results_dir (str): The directory to save the status and log files.
key (str): The encryption key for intermediate checkpoints.
Raises:
Exception: If any error occurs during the inference process.
"""
results_dir = experiment_config.inference.results_dir
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Re-identification inference"
)
gpu_id = experiment_config.inference.gpu_id
torch.cuda.set_device(gpu_id)
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# build dataloader
_, dataloader, _, _ = build_dataloader(experiment_config, is_train=False)
# build inferencer @TODO TRT support
model = ReIdentificationModel.load_from_checkpoint(experiment_config["inference"]["checkpoint"],
map_location="cpu",
experiment_spec=experiment_config,
prepare_for_training=False)
infer = Inferencer(model)
# do inference
progress = tqdm(dataloader)
results = []
with torch.no_grad():
for data, _, _, img_paths in progress:
feats = infer.inference(data)
for img_path, feat in zip(img_paths, feats):
result = {"img_path": img_path, "embedding": feat.cpu().numpy().tolist()}
results.append(result)
# save the output
output_file = open(experiment_config["inference"]["output_file"], "w")
results = json.dumps(results, indent=4)
output_file.write(results)
output_file.close()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""
Run the inference process.
This function initializes the experiment and sets up logging. It calls run_experiment
to perform inference on the data according to the experiment configuration, and handles
any exceptions that occur during the process.
Args:
cfg (DictConfig): Configuration file.
"""
try:
cfg = update_results_dir(cfg, task="inference")
run_experiment(experiment_config=cfg,
results_dir=cfg.results_dir,
key=cfg.encryption_key)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully."
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained re-identification model."""
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from tabulate import tabulate
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.re_identification.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.re_identification.dataloader.build_data_loader import build_dataloader, list_dataset
from nvidia_tao_pytorch.cv.re_identification.inference.inferencer import Inferencer
from nvidia_tao_pytorch.cv.re_identification.model.pl_reid_model import ReIdentificationModel
from nvidia_tao_pytorch.cv.re_identification.utils.common_utils import check_and_create
from nvidia_tao_pytorch.cv.re_identification.utils.reid_metric import R1_mAP, R1_mAP_reranking
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
from nvidia_tao_pytorch.core.utilities import update_results_dir
def run_experiment(experiment_config, results_dir, key):
"""
Run the evaluation process.
This function initializes the necessary components for evaluation, including the model, data loader,
and inferencer. It performs evaluation on the test dataset and computes evaluation metrics.
Args:
experiment_config (dict): The experiment configuration containing the model and evaluation parameters.
results_dir (str): The directory to save the evaluation results.
key (str): The encryption key for intermediate checkpoints.
Raises:
Exception: If any error occurs during the evaluation process.
"""
results_dir = experiment_config.evaluate.results_dir
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(status_logging.StatusLogger(filename=status_file, append=True))
status_logging.get_status_logger().write(status_level=status_logging.Status.STARTED, message="Starting Re-Identification evaluation")
gpu_id = experiment_config.evaluate.gpu_id
torch.cuda.set_device(gpu_id)
# set the encryption key:
TLTPyTorchCookbook.set_passphrase(key)
# build dataloader
_, dataloader, _, _ = build_dataloader(experiment_config, is_train=False)
model = ReIdentificationModel.load_from_checkpoint(experiment_config["evaluate"]["checkpoint"],
map_location="cpu",
experiment_spec=experiment_config,
prepare_for_training=False)
infer = Inferencer(model)
# do inference
progress = tqdm(dataloader)
query_top_dir = experiment_config["evaluate"]["query_dataset"]
query_dict = list_dataset(query_top_dir)
if experiment_config["re_ranking"]["re_ranking"]:
metrics = R1_mAP_reranking(len(query_dict), experiment_config, False, feat_norm=True)
else:
metrics = R1_mAP(len(query_dict), experiment_config, False, feat_norm=True)
metrics.reset()
for data, pids, camids, img_paths in progress:
with torch.no_grad():
output = infer.inference(data)
metrics.update(output, pids, camids, img_paths)
cmc, mAP = metrics.compute()
table = []
table.append(["mAP", "{:.1%}".format(mAP)])
status_logging.get_status_logger().kpi = {"mAP": round(mAP, 1)}
status_logging.get_status_logger().write(message="Evaluation metrics generated.", status_level=status_logging.Status.RUNNING)
for r in [1, 5, 10]:
# print("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
table.append(["CMC curve, Rank-" + "{:<3}".format(r), "{:.1%}".format(cmc[r - 1])])
print(tabulate(table, headers=["Name", "Score"], floatfmt=".4f", tablefmt="fancy_grid"))
plt.figure()
cmc_percentages = [value * 100 for value in cmc]
plt.xticks(np.arange(len(cmc_percentages)), np.arange(1, len(cmc_percentages) + 1))
plt.plot(cmc_percentages, marker="*")
plt.title('Cumulative Matching Characteristics (CMC) Curve')
plt.grid()
plt.ylabel('Matching Rate[%]')
plt.xlabel('Rank')
plt.savefig(experiment_config["evaluate"]["output_cmc_curve_plot"])
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="experiment", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""
Run the evaluation process.
This function serves as the entry point for the evaluation script.
It loads the experiment specification, updates the results directory, and calls the 'run_experiment' function.
Args:
cfg (ExperimentConfig): The experiment configuration retrieved from the Hydra configuration files.
"""
try:
cfg = update_results_dir(cfg, task="evaluate")
run_experiment(experiment_config=cfg,
results_dir=cfg.results_dir,
key=cfg.encryption_key)
status_logging.get_status_logger().write(
status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully"
)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script for the re-identification task."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to re-identification."""
import importlib
import os
import pkgutil
import argparse
import subprocess
import sys
from time import time
import nvidia_tao_pytorch.cv.re_identification.scripts as scripts
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function dynamically discovers all modules in the given package. This is helpful for finding all scripts
that are associated with a specific task in a package.
Args:
package (module): The package from which to gather subtask modules.
Returns:
dict: A dictionary mapping the subtask names (keys) to a dictionary containing the full module name and absolute path
of the subtask module (values).
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, network=None):
"""Launches a subtask based on command line arguments.
This function uses argparse to parse command line arguments for a task. After processing these arguments,
it runs the corresponding subtask script with the parsed arguments. It also collects telemetry data during the
execution of the subtask and sends it upon completion.
Args:
parser (argparse.ArgumentParser): ArgumentParser object to parse command line arguments.
subtasks (dict): A dictionary mapping the subtask names (keys) to a dictionary containing the full module name
and absolute path of the subtask module (values).
network (str, optional): The name of the network running the training. If not provided, defaults to "tao_pytorch".
"""
# Subtasks for a given model.
if network is None:
network = "tao_pytorch"
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
if args.subtask in ["train"]:
if args.results_dir:
script_args += " results_dir=" + args.results_dir
# Add encryption key.
if args.subtask in ["train", "evaluate", "inference", "export"]:
if args.key is not None:
script_args += " encryption_key=" + args.key
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=1,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def main():
"""Main entry point for the script.
This function creates an argument parser, uses the get_subtasks function to discover all subtasks in the 'scripts'
package, and then uses the launch function to run the chosen subtask based on the parsed command line arguments.
"""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"re_identification", add_help=True, description="Transfer Learning Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="re_identification")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/entrypoint/re_identification.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Triplet Loss for traning."""
import torch
from torch import nn
def normalize(x, axis=-1):
"""Normalize a Tensor to unit length along the specified dimension.
Args:
x (torch.Tensor): The data to normalize.
axis (int, optional): The axis along which to normalize. Defaults to -1.
Returns:
torch.Tensor: The normalized data.
"""
x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
return x
def euclidean_dist(x, y):
"""Compute the euclidean distance between two tensors.
Args:
x (torch.Tensor): The first input tensor.
y (torch.Tensor): The second input tensor.
Returns:
torch.Tensor: The euclidean distance between x and y.
"""
m, n = x.size(0), y.size(0)
xx = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n)
yy = torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t()
dist = xx + yy
dist.addmm_(x, y.t(), beta=1, alpha=-2)
dist = dist.clamp(min=1e-12).sqrt() # for numerical stability
return dist
def hard_example_mining(dist_mat, labels):
"""Perform hard example mining for Triplet loss.
For each anchor, find the hardest positive and negative samples.
Args:
dist_mat (torch.Tensor): The distance matrix.
labels (torch.Tensor): The labels tensor.
Returns:
torch.Tensor: The hardest positive samples distances for each anchor.
torch.Tensor: The hardest negative samples distances for each anchor.
"""
assert len(dist_mat.size()) == 2, "The distance matrix generated should have a length of 2."
assert dist_mat.size(0) == dist_mat.size(1), "The distance matrix generated should be a square matrix."
N = dist_mat.size(0)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, _ = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, _ = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
return dist_ap, dist_an
class TripletLoss(object):
"""Triplet Loss for training deep embedding models."""
def __init__(self, margin=None):
"""Initialize TripletLoss module.
Args:
margin (float, optional): Margin for the triplet loss. Defaults to None.
"""
self.margin = margin
if margin is not None:
self.ranking_loss = nn.MarginRankingLoss(margin=margin)
else:
self.ranking_loss = nn.SoftMarginLoss()
def __call__(self, global_feat, labels, normalize_feature=False):
"""Compute the Triplet Loss.
Args:
global_feat (torch.Tensor): The feature embeddings.
labels (torch.Tensor): The corresponding labels.
normalize_feature (bool, optional): Whether to normalize the features or not. Defaults to False.
Returns:
list: The triplet loss value.
torch.Tensor: The hardest positive samples distances for each anchor.
torch.Tensor: The hardest negative samples distances for each anchor.
"""
if normalize_feature:
global_feat = normalize(global_feat, axis=-1)
dist_mat = euclidean_dist(global_feat, global_feat)
dist_ap, dist_an = hard_example_mining(
dist_mat, labels)
y = dist_an.new().resize_as_(dist_an).fill_(1)
if self.margin is not None:
loss = self.ranking_loss(dist_an, dist_ap, y)
else:
loss = self.ranking_loss(dist_an - dist_ap, y)
return loss, dist_ap, dist_an
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
"""Initialize the CrossEntropyLabelSmooth class.
Args:
num_classes (int): Number of classes.
epsilon (float, optional): Smoothing factor. Defaults to 0.1.
use_gpu (bool, optional): Whether to use gpu for computation. Defaults to True.
"""
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""Compute the loss based on inputs and targets.
Args:
inputs (torch.Tensor): Prediction matrix (before softmax) with shape (batch_size, num_classes).
targets (torch.Tensor): Ground truth labels with shape (num_classes).
Returns:
list: Loss values.
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
if self.use_gpu:
targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).mean(0).sum()
return loss
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/model/triplet_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model builder interface."""
import torch
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import patch_decrypt_checkpoint
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
def load_pretrained_weights(pretrained_backbone_path):
"""Load pretrained weights from the provided path.
This function decrypts the encrypted state dictionary if necessary, and restructures the keys to remove the
'model.' prefix from each key (if it exists). If the "state_dict" key is not present in the loaded data,
it simply returns the loaded data.
Args:
pretrained_backbone_path (str): The file path of the pretrained backbone model weights.
Returns:
dict: A dictionary containing the model's state dict, with keys adjusted as necessary.
Raises:
PermissionError: If the loaded state dict is encrypted but no encryption key is provided.
"""
temp = torch.load(pretrained_backbone_path,
map_location="cpu")
if temp.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
temp = patch_decrypt_checkpoint(temp, key)
if "state_dict" not in temp:
return temp
new_state_dict = {}
for key, value in list(temp["state_dict"].items()):
if "model" in key:
new_key = ".".join(key.split(".")[1:])
new_state_dict[new_key] = value
else:
new_state_dict[key] = value
return new_state_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/model/reid_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Baseline Module for Re-Identification."""
import torch
from torch import nn
from nvidia_tao_pytorch.cv.re_identification.model.resnet import Bottleneck, ResNet, BasicBlock
def weights_init_kaiming(m):
"""Initializes weights using Kaiming Normal initialization.
Args:
m (torch.nn.Module): PyTorch module whose weights are to be initialized.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
"""Initializes the weights of a classifier layer.
Args:
m (torch.nn.Module): PyTorch module whose weights are to be initialized.
"""
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class Baseline(nn.Module):
"""Baseline model for re-identification tasks.
This class generates a model based on the provided configuration. The model
is primarily a ResNet variant, with additional features like bottleneck and classifier
layers. The ResNet architecture can be one of the following variants: 18, 34, 50, 101, 152.
Attributes:
in_planes (int): Dimensionality of the input features.
base (ResNet): Base ResNet model.
gap (torch.nn.AdaptiveAvgPool2d): Global Average Pooling layer.
num_classes (int): Number of output classes.
neck (str): Specifies the neck architecture of the model.
neck_feat (str): Specifies whether neck features are used.
if_flip_feat (bool): Whether to flip the features or not.
classifier (torch.nn.Linear): Classifier layer of the model.
bottleneck (torch.nn.BatchNorm1d): Optional bottleneck layer of the model.
"""
def __init__(self, cfg, num_classes):
"""Initializes the Baseline model with provided configuration and number of classes.
Args:
cfg (DictConfig): Configuration object containing model parameters.
num_classes (int): Number of output classes.
"""
super(Baseline, self).__init__()
self.in_planes = cfg['model']['feat_dim']
if "resnet" in cfg['model']['backbone']:
arch_settings = {
'resnet_18': (BasicBlock, [2, 2, 2, 2]),
'resnet_34': (BasicBlock, [3, 4, 6, 3]),
'resnet_50': (Bottleneck, [3, 4, 6, 3]),
'resnet_101': (Bottleneck, [3, 4, 23, 3]),
'resnet_152': (Bottleneck, [3, 8, 36, 3])
}
self.base = ResNet(feat_dim=cfg['model']['feat_dim'], last_stride=cfg['model']['last_stride'],
block=Bottleneck,
layers=arch_settings[cfg['model']['backbone']][1])
if cfg['model']['pretrain_choice'] == 'imagenet':
if cfg['model']['pretrained_model_path']:
self.base.load_param(cfg['model']['pretrained_model_path'])
print('Loading pretrained ImageNet model......')
self.gap = nn.AdaptiveAvgPool2d(1)
# self.gap = nn.AdaptiveMaxPool2d(1)
self.num_classes = num_classes
self.neck = cfg['model']['neck']
self.neck_feat = cfg['model']['neck_feat']
self.if_flip_feat = cfg['model']['with_flip_feature']
if not self.neck:
self.classifier = nn.Linear(self.in_planes, self.num_classes)
# self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False) # new add by luo
# self.classifier.apply(weights_init_classifier) # new add by luo
elif self.neck == 'bnneck':
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False) # no shift
self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier.apply(weights_init_classifier)
def forward(self, x):
"""Defines the forward pass of the Baseline model.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after forward pass. This could be feature embeddings
or the sum of feature embeddings in case of flipped features.
"""
if self.training:
return self.__forward(x)
if self.if_flip_feat:
y = torch.flip(x, [3])
feat1 = self.__forward(y)
feat2 = self.__forward(x)
return feat2 + feat1
return self.__forward(x)
def __forward(self, x):
"""Internal method for processing the features through the model.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after processing. This could be the class scores
and global features during training or the feature embeddings during testing.
"""
global_feat = self.gap(self.base(x))
global_feat = global_feat.view(global_feat.shape[0], -1)
if not self.neck:
feat = global_feat
elif self.neck == 'bnneck':
feat = self.bottleneck(global_feat) # normalize for angular softmax
if self.training:
cls_score = self.classifier(feat)
return cls_score, global_feat # global feature for triplet loss
if self.neck_feat == 'after':
# cls_score = self.classifier(feat)
return feat
# return cls_score, global_feat # global feature for triplet loss
return global_feat
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/model/baseline.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification Model Module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/model/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Center Loss for traning."""
from __future__ import absolute_import
import torch
from torch import nn
class CenterLoss(nn.Module):
"""Center loss class for deep learning models.
This class implements Center Loss, a discriminative feature learning approach,
which is beneficial for tasks like face recognition. It computes the loss between
the deep features and their corresponding class centers.
Reference:
Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Attributes:
num_classes (Tensor): The number of classes in the dataset.
feat_dim (int): The dimension of the feature vector.
use_gpu (bool): If True, CUDA will be used for computation.
centers (nn.Parameter): Parameterized center vectors for each class.
Methods:
forward(x, labels): Computes the loss between feature vectors and their corresponding class centers.
"""
def __init__(self, num_classes, feat_dim=2048, use_gpu=True):
"""Initializes the CenterLoss module.
Args:
num_classes (Tensor): The number of classes in the dataset.
feat_dim (int, optional): The dimension of the feature vector. Default is 2048.
use_gpu (bool, optional): If True, CUDA will be used for computation. Default is True.
"""
super(CenterLoss, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x, labels):
"""Computes the loss by passing the feature vectors and labels.
This method calculates the distance between the deep features and their
corresponding class centers. The loss is the mean of these distances.
Args:
x (Tensor): The deep feature vectors of shape (batch_size, feat_dim).
labels (Tensor): The corresponding labels of the deep features of shape (batch_size,).
Returns:
loss (Tensor): A scalar tensor representing the mean loss.
"""
assert x.size(0) == labels.size(0), "Features.size(0) is not equal to Labels.size(0)."
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(x, self.centers.t(), beta=1, alpha=-2)
classes = torch.arange(self.num_classes).long()
if self.use_gpu:
classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
dist = distmat * mask.float()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / batch_size
return loss
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/model/center_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/michuanhaohao/reid-strong-baseline
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resnet2D backbones for re-identification."""
import math
import torch
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1):
"""Creates a 3x3 convolution layer with padding.
Args:
in_planes (int): Number of input planes.
out_planes (int): Number of output planes.
stride (int, optional): Stride size. Defaults to 1.
Returns:
nn.Conv2d: 3x3 Convolutional layer.
"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
"""Defines a basic block for ResNet."""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
"""Initializes the basic block layers.
Args:
inplanes (int): Number of input planes.
planes (int): Number of output planes.
stride (int, optional): Stride size. Defaults to 1.
downsample (nn.Module, optional): Downsample layer, if any. Defaults to None.
"""
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Defines the forward pass for the basic block.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after passing through the basic block.
"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Defines a bottleneck block for ResNet."""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
"""Initializes the bottleneck block layers.
Args:
inplanes (int): Number of input planes.
planes (int): Number of output planes.
stride (int, optional): Stride size. Defaults to 1.
downsample (nn.Module, optional): Downsample layer, if any. Defaults to None.
"""
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Defines the forward pass for the bottleneck block.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after passing through the bottleneck block.
"""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet2D model."""
def __init__(self, block, layers, last_stride, feat_dim):
"""Initializes the ResNet model.
Args:
block (nn.Module): Type of block to be used in the model, BasicBlock or Bottleneck.
layers (list): Number of layers in each of the 4 blocks of the network.
last_stride (int): Stride for the last convolutional layer.
feat_dim (int): Dimensionality of the output feature embeddings.
"""
self.inplanes = 64
self.feat_dim = feat_dim
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
if self.feat_dim != 2048:
self.feature = nn.Conv2d(2048, feat_dim, kernel_size=1, stride=1)
def _make_layer(self, block, planes, blocks, stride=1):
"""Creates a layer of the ResNet model.
Args:
block (nn.Module): Type of block to be used in the layer, BasicBlock or Bottleneck.
planes (int): Number of planes in each block.
blocks (int): Number of blocks in the layer.
stride (int, optional): Stride size. Defaults to 1.
Returns:
nn.Sequential: The created layer of blocks.
"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
"""Defines the forward pass for the ResNet model.
Args:
x (torch.Tensor): Input tensor.
Returns:
torch.Tensor: Output tensor after passing through the ResNet model.
"""
x = self.conv1(x)
x = self.bn1(x)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.feat_dim != 2048:
x = self.feature(x)
return x
def load_param(self, model_path):
"""Loads parameters for the model from the given path.
Args:
model_path (str): Path to the saved model parameters.
"""
param_dict = torch.load(model_path)
for i in param_dict:
j = i.replace("base.", "")
if 'fc' in i:
continue
if j in self.state_dict().keys(): # pylint: disable=E1125 #TODO Fix this
self.state_dict()[j].copy_(param_dict[i]) # pylint: disable=E1125 #TODO Fix this
def random_init(self):
"""Initializes the model with random weights."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def cross_modality_pretrain(conv1_weight, orig_channel, target_channel):
"""Computes weights for cross modality.
Args:
conv1_weight (torch.Tensor): Weights of the first convolutional layer.
orig_channel (int): Original number of channels.
target_channel (int): Target number of channels.
Returns:
torch.Tensor: New weights for the first convolutional layer.
"""
# transform the original channel weight to target channel
S = 0
for i in range(orig_channel):
S += conv1_weight[:, i, :, :]
avg = S / orig_channel
new_conv1_weight = torch.FloatTensor(64, target_channel, 7, 7)
for i in range(target_channel):
new_conv1_weight[:, i, :, :] = avg.data
return new_conv1_weight
def weight_transform(model_dict, pretrain_dict, target_channel):
"""Transforms the weights of the first convolutional layer.
Args:
model_dict (dict): Dictionary of the model state.
pretrain_dict (dict): Dictionary of the pretrained model weights.
target_channel (int): Target number of channels.
Returns:
dict: Updated model state dictionary with transformed weights for the first convolutional layer.
"""
weight_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict}
wo = pretrain_dict[list(pretrain_dict.keys())[0]]
orig_channel = wo.shape[1]
if target_channel == orig_channel:
wt = wo
else:
wt = cross_modality_pretrain(wo, orig_channel, target_channel)
weight_dict['conv1.weight'] = wt
model_dict.update(weight_dict)
return model_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/model/resnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main PTL model file for re-identification."""
from typing import Any, Dict
import pytorch_lightning as pl
import glob
import re
import os
import torch
import torch.nn.functional as F
import torchmetrics
from nvidia_tao_pytorch.cv.pose_classification.utils.common_utils import patch_decrypt_checkpoint
from nvidia_tao_pytorch.cv.re_identification.model.build_nn_model import build_model
from nvidia_tao_pytorch.cv.re_identification.model.triplet_loss import TripletLoss, CrossEntropyLabelSmooth
from nvidia_tao_pytorch.cv.re_identification.model.center_loss import CenterLoss
from nvidia_tao_pytorch.cv.re_identification.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.re_identification.utils.reid_metric import R1_mAP, R1_mAP_reranking
from nvidia_tao_pytorch.cv.re_identification.utils.scheduler import WarmupMultiStepLR
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
# pylint:disable=too-many-ancestors
class ReIdentificationModel(pl.LightningModule):
"""PTL module for single stream re-identification."""
def __init__(self, experiment_spec, prepare_for_training, export=False):
"""Initialize the ReIdentificationModel.
Args:
experiment_spec (DictConfig): Configuration File.
prepare_for_training (bool): Boolean to set model based on training or testing/validation.
export (bool, optional): Export model if True. Defaults to False.
"""
super().__init__()
self.experiment_spec = experiment_spec
self.prepare_for_training = prepare_for_training
# init the model
self._build_model(experiment_spec, export)
self.train_accuracy = torchmetrics.Accuracy()
self.val_accuracy = torchmetrics.Accuracy()
if self.prepare_for_training:
if self.experiment_spec["model"]["with_center_loss"]:
self.my_loss_func, self.center_criterion = self.__make_loss_with_center(experiment_spec, num_classes=self.num_classes)
else:
self.my_loss_func = self.__make_loss(experiment_spec, num_classes=self.num_classes)
self.train_loader, self.val_loader, _, _ = build_dataloader(cfg=self.experiment_spec, is_train=True)
self.status_logging_dict = {"train_loss": 0.0,
"train_acc": 0.0,
"cmc_rank_1": 0.0,
"cmc_rank_5": 0.0,
"cmc_rank_10": 0.0,
"mAP": 0.0}
def _build_model(self, experiment_spec, export):
"""Internal function to build the model.
Args:
experiment_spec (DictConfig): Configuration File.
export (bool): Export model if True.
Returns:
model (Baseline): Model for re-identification.
"""
if self.prepare_for_training:
directory = experiment_spec["dataset"]["train_dataset_dir"]
data = self.__process_dir(directory, relabel=True)
self.num_classes, _, _ = self.__get_imagedata_info(data)
self.query_dict = experiment_spec["dataset"]["query_dataset_dir"]
else:
self.num_classes = experiment_spec["dataset"]["num_classes"]
self.model = build_model(experiment_spec, self.num_classes)
def train_dataloader(self):
"""Build the dataloader for training.
Returns:
train_loader (Dataloader): Training Data.
"""
return self.train_loader
def val_dataloader(self):
"""Build the dataloader for validation.
Returns:
val_loader (Dataloader): Validation Data.
"""
return self.val_loader
def configure_optimizers(self):
"""Configure optimizers for training.
Returns:
optim_dict (Dict): Optimizer Dictionary.
"""
self.train_config = self.experiment_spec["train"]
self.optim_config = self.train_config["optim"]
optim_dict = {}
if self.experiment_spec["model"]["with_center_loss"]:
optimizer, self.optimizer_center = self.__make_optimizer_with_center(self.center_criterion)
else:
optimizer = self.__make_optimizer()
self.scheduler = WarmupMultiStepLR(optimizer, self.optim_config["steps"],
gamma=self.optim_config["gamma"],
warmup_factor=self.optim_config["warmup_factor"],
warmup_iters=self.optim_config["warmup_iters"],
warmup_method=self.optim_config["warmup_method"])
self.scheduler.step()
optim_dict["optimizer"] = optimizer
optim_dict["lr_scheduler"] = self.scheduler
optim_dict['monitor'] = self.optim_config['lr_monitor']
return optim_dict
def __make_optimizer_with_center(self, center_criterion):
"""Make Optimizer using center loss.
Args:
center_criterion (CenterLoss): Center Loss for training.
Returns:
optimizer (Torch.Optimizer): Optimizer for training.
optimizer_center (Torch.Optimizer): Optimizer for center Loss for training.
"""
params = []
for key, value in self.model.named_parameters():
if not value.requires_grad:
continue
lr = self.optim_config["base_lr"] * len(self.experiment_spec["train"]["gpu_ids"])
weight_decay = self.optim_config["weight_decay"]
if "bias" in key:
lr = self.optim_config["base_lr"] * self.optim_config["bias_lr_factor"]
weight_decay = self.optim_config["weight_decay_bias"]
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if self.optim_config["name"] == 'SGD':
optimizer = getattr(torch.optim, self.optim_config["name"])(params, momentum=self.optim_config["momentum"])
else:
optimizer = getattr(torch.optim, self.optim_config["name"])(params)
optimizer_center = torch.optim.SGD(center_criterion.parameters(), lr=self.optim_config["center_lr"])
return optimizer, optimizer_center
def __make_optimizer(self):
"""Make Optimizer.
Returns:
optimizer (Torch.Optimizer): Optimizer for training.
"""
params = []
for key, value in self.model.named_parameters():
if not value.requires_grad:
continue
lr = self.optim_config["base_lr"]
weight_decay = self.optim_config["weight_decay"]
if "bias" in key:
lr = self.optim_config["base_lr"] * self.optim_config["bias_lr_factor"]
weight_decay = self.optim_config["weight_decay_bias"]
params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}]
if self.optim_config["name"] == 'SGD':
optimizer = getattr(torch.optim, self.optim_config["name"])(params, momentum=self.optim_config["momentum"])
else:
optimizer = getattr(torch.optim, self.optim_config["name"])(params)
return optimizer
def training_step(self, batch, batch_idx):
"""Training step.
Args:
batch (Tensor): Batch of data.
batch_idx (int): Index of batch.
Returns:
loss (float): Loss value for each step in training.
"""
data, label = batch
data = data.float()
score, feat = self.model(data)
loss = self.my_loss_func(score, feat, label)
self.train_accuracy.update(score, label)
self.log("train_loss", loss, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, rank_zero_only=True)
self.log("base_lr", self.scheduler.get_lr()[0], on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, rank_zero_only=True)
self.log("train_acc_1", self.train_accuracy, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, rank_zero_only=True)
return loss
def training_epoch_end(self, training_step_outputs):
"""Log Training metrics to status.json"""
average_train_loss = 0.0
for out in training_step_outputs:
average_train_loss += out['loss'].item()
average_train_loss /= len(training_step_outputs)
self.status_logging_dict["train_loss"] = average_train_loss
self.status_logging_dict["train_acc"] = self.train_accuracy.compute().item()
status_logging.get_status_logger().kpi = self.status_logging_dict
status_logging.get_status_logger().write(
message="Train and Val metrics generated.",
status_level=status_logging.Status.RUNNING
)
def on_train_epoch_start(self):
"""Perform on start of every epoch."""
print('\n')
def on_validation_epoch_start(self):
"""Perform on validation."""
if self.experiment_spec["re_ranking"]["re_ranking"]:
self.metrics = R1_mAP_reranking(len(os.listdir(self.query_dict)), self.experiment_spec, self.prepare_for_training, feat_norm=True)
else:
self.metrics = R1_mAP(len(os.listdir(self.query_dict)), self.experiment_spec, self.prepare_for_training, feat_norm=True)
self.metrics.reset()
def validation_step(self, batch, batch_idx):
"""Validation step."""
data, pids, camids, img_path = batch
output = self.model(data)
self.metrics.update(output, pids, camids, img_path)
def on_validation_epoch_end(self):
"""Validation step end."""
print('\n')
cmc, mAP = self.metrics.compute()
for r in [1, 5, 10]:
self.log(f"cmc_rank_{r}", cmc[r - 1], on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, rank_zero_only=True)
self.status_logging_dict[f"cmc_rank_{r}"] = str(cmc[r - 1])
self.log("mAP", mAP, on_step=False, on_epoch=True, prog_bar=True, sync_dist=True, rank_zero_only=True)
self.status_logging_dict["mAP"] = str(mAP)
def forward(self, x):
"""Forward of the re-identification model.
Args:
x (Tensor): Batch of data.
Returns:
output (Tensor): Output of the model (class score, feats).
"""
output = self.model(x)
return output
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Decrypt the checkpoint."""
if checkpoint.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
checkpoint = patch_decrypt_checkpoint(checkpoint, key)
def __process_dir(self, dir_path, relabel=False):
"""Process the directory.
Args:
dir_path (str): Directory name.
relabel (bool, optional): Enable relabelling feature if true, else disable. Defaults to False.
Returns:
dataset (Dataloader): Image data for training, testing, and validation.
"""
img_paths = glob.glob(os.path.join(dir_path, '*.jpg'))
pattern = re.compile(r'([-\d]+)_c(\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
# assert 0 <= pid <= 1501, "The number of person IDs should be between 0 and 1501."
# assert 1 <= camid <= 6, "The number of camera IDs should be between 0 and 6."
camid -= 1 # index starts from 0
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset
def __get_imagedata_info(self, data):
"""Return meta data from the images.
Args:
data (Dataloader): Batch of data.
Returns:
num_pids (int): Number of person IDs.
num_cams (int): Number of camera IDs.
num_imgs (int): Number of images given a folder.
"""
pids, cams = [], []
for _, pid, camid in data:
pids += [pid]
cams += [camid]
pids = set(pids)
cams = set(cams)
num_pids = len(pids)
num_cams = len(cams)
num_imgs = len(data)
return num_pids, num_imgs, num_cams
def __make_loss(self, cfg, num_classes):
"""Create a loss function based on the config.
Args:
cfg (DictConfig): Configuration File.
num_classes (int): Number of classes.
Returns:
loss_func (Function Pointer): Loss function based on the config.
"""
self.optim_config = cfg['train']["optim"]
sampler = cfg['dataset']['sampler']
if "triplet" in cfg['model']['metric_loss_type']:
triplet = TripletLoss(self.optim_config["triplet_loss_margin"]) # triplet loss
else:
raise ValueError('Expected METRIC_LOSS_TYPE should be triplet'
'but got {}'.format(cfg['model']['metric_loss_type']))
if cfg['model']['label_smooth']:
xent = CrossEntropyLabelSmooth(num_classes=num_classes)
if sampler == 'softmax':
def loss_func(score, feat, target):
return F.cross_entropy(score, target)
elif cfg['dataset']['sampler'] == 'triplet':
def loss_func(score, feat, target):
return triplet(feat, target)[0]
elif cfg['dataset']['sampler'] == 'softmax_triplet':
def loss_func(score, feat, target):
if 'triplet' in cfg['model']['metric_loss_type']:
if cfg['model']['label_smooth']:
return xent(score, target) + triplet(feat, target)[0]
return F.cross_entropy(score, target) + triplet(feat, target)[0]
raise ValueError('Expected METRIC_LOSS_TYPE should be triplet'
'but got {}'.format(cfg['model']['metric_loss_type']))
else:
raise ValueError('Expected sampler should be softmax, triplet or softmax_triplet, '
'but got {}'.format(cfg['dataset']['sampler']))
return loss_func
def __make_loss_with_center(self, cfg, num_classes):
"""Create a loss function with center loss based on the config.
Args:
cfg (DictConfig): Configuration File.
num_classes (int): Number of classes.
Returns:
loss_func (Function Pointer): Loss function based on the config.
"""
if cfg['model']['backbone'] == 'resnet18' or cfg['model']['backbone'] == 'resnet34':
feat_dim = 512
else:
feat_dim = cfg['model']['feat_dim']
if cfg['model']['metric_loss_type'] == 'center':
center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=True)
elif cfg['model']['metric_loss_type'] == 'triplet_center':
triplet = TripletLoss(self.optim_config['triplet_loss_margin'])
center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=True)
else:
raise ValueError('Expected METRIC_LOSS_TYPE with center should be center, triplet_center'
'but got {}'.format(cfg['model']['metric_loss_type']))
if cfg['model']['label_smooth']:
xent = CrossEntropyLabelSmooth(num_classes=num_classes)
def loss_func(score, feat, target):
if cfg['model']['metric_loss_type'] == 'center':
if cfg['model']['label_smooth']:
return xent(score, target) + \
self.optim_config['center_loss_weight'] * center_criterion(feat, target)
return F.cross_entropy(score, target) + \
self.optim_config['center_loss_weight'] * center_criterion(feat, target)
if cfg['model']['metric_loss_type'] == 'triplet_center':
if cfg['model']['label_smooth']:
return xent(score, target) + \
triplet(feat, target)[0] + \
self.optim_config['center_loss_weight'] * center_criterion(feat, target)
return F.cross_entropy(score, target) + \
triplet(feat, target)[0] + \
self.optim_config['center_loss_weight'] * center_criterion(feat, target)
raise ValueError('Expected METRIC_LOSS_TYPE with center should be center, triplet_center'
'but got {}'.format(cfg['model']['metric_loss_type']))
return loss_func, center_criterion
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/model/pl_reid_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The top model builder interface."""
from nvidia_tao_pytorch.cv.re_identification.model.baseline import Baseline
def build_model(cfg, num_classes):
"""Build a re-identification model according to provided configuration.
This function builds a re-identification model using the Baseline architecture as per the
provided configuration and number of classes. The Baseline model is primarily a ResNet variant
with additional features like bottleneck and classifier layers.
Args:
cfg (DictConfig): Configuration object containing parameters for the model.
num_classes (int): The number of output classes for the model.
Returns:
Baseline: An instance of the Baseline model configured according to the provided configuration and number of classes.
"""
model = Baseline(cfg, num_classes)
return model
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/model/build_nn_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inferencer."""
from nvidia_tao_pytorch.cv.re_identification.utils.common_utils import data_to_device
class Inferencer():
"""A class to perform inference with a PyTorch model.
This class is designed to facilitate the inference process for a given PyTorch model. The model
and data used for inference are assumed to be compatible with GPU processing.
Args:
model (torch.nn.Module): The PyTorch model to be used for inference. The model should already
be in a state ready for inference (i.e., it should already be trained).
Attributes:
model (torch.nn.Module): The PyTorch model for inference.
Methods:
inference(data): Perform inference on the provided data.
"""
def __init__(self, model):
"""Initialize the inferencer with a PyTorch model.
This function prepares the model for inference by setting it to evaluation mode and moving it to GPU.
Args:
model (torch.nn.Module): The PyTorch model to be used for inference. The model should be in a state ready
for inference (i.e., it should already be trained).
"""
self.model = model
self.model.eval()
self.model.cuda()
def inference(self, data):
"""Perform inference on the provided data and return the model's output.
The data is first converted to a float tensor and moved to the device where the model resides (assumed to be a GPU).
Then it is passed through the model, and the output is returned.
Args:
data (torch.Tensor): The input data for the model. The data should be compatible with the model's expected input format.
Returns:
torch.Tensor: The output of the model. For a model trained for re-identification, this would typically be the feature
embeddings for the input images.
"""
data = data.float()
cuda_data = data_to_device(data)
feat = self.model(cuda_data)
return feat
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/re_identification/inference/inferencer.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.