python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
"""FAN backbone"""
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg
from timm.models.vision_transformer import Mlp as MlpOri
from timm.models.registry import register_model
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
from nvidia_tao_ds.backbone.convnext_utils import _create_hybrid_backbone
from nvidia_tao_ds.backbone.swin_utils import _create_fan_swin_transformer
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# Patch size 16
'fan_tiny_8_p16_224': _cfg(),
'fan_tiny_12_p16_224': _cfg(),
'fan_small_12_p16_224': _cfg(),
'fan_base_18_p16_224': _cfg(),
'fan_large_24_p16_224': _cfg(),
'fan_xlarge_24_p16_224': _cfg(),
}
class ClassAttn(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
"""Initialize ClassAttn class"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fast_attn = hasattr(torch._C._nn, '_scaled_dot_product_attention') # pylint:disable=I1101
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.k = nn.Linear(dim, dim, bias=qkv_bias)
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
"""Taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
with slight modifications to do CA
"""
B, N, C = x.shape
q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = self.k(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
v = self.v(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if torch.onnx.is_in_onnx_export() or not self.fast_attn:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C)
else:
# Since Torch 1.14, scaled_dot_product_attention has been optimized for performance
x, _ = F._scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p,
)
x_cls = x.transpose(1, 2).reshape(B, 1, C)
x_cls = self.proj(x_cls)
x_cls = self.proj_drop(x_cls)
return x_cls
class PositionalEncodingFourier(nn.Module):
"""Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper."""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
"""Initialize PositionalEncodingFourier class"""
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
self.eps = 1e-6
def forward(self, B: int, H: int, W: int, fp32=True):
"""Forward function"""
device = self.token_projection.weight.device
y_embed = torch.arange(1, H + 1, dtype=torch.float32 if fp32 else torch.float16, device=device).unsqueeze(1).repeat(1, 1, W)
x_embed = torch.arange(1, W + 1, dtype=torch.float32 if fp32 else torch.float16, device=device).repeat(1, H, 1)
y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32 if fp32 else torch.float16, device=device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos.repeat(B, 1, 1, 1) # (B, C, H, W)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution + batch norm"""
return torch.nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_planes)
)
def sigmoid(x, inplace=False):
"""Sigmoid function"""
return x.sigmoid_() if inplace else x.sigmoid()
def make_divisible(v, divisor=8, min_value=None):
"""Make tensor divisible"""
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeExcite(nn.Module):
"""SqueezeExcite"""
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=sigmoid, divisor=1, **_):
"""Initialize SqueezeExcite class"""
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
def forward(self, x):
"""Forward function"""
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class SEMlp(nn.Module):
"""SEMlp"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False, use_se=True):
"""Initialize SEMlP Class"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
# self.dwconv = DWConv(hidden_features)
self.dwconv = DWConv(hidden_features)
self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.linear = linear
if self.linear:
self.relu = nn.ReLU(inplace=True)
self.se = SqueezeExcite(out_features, se_ratio=0.25) if use_se else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward function"""
B, N, C = x.shape
x = self.fc1(x)
if self.linear:
x = self.relu(x)
x = self.drop(self.gamma * self.dwconv(x, H, W)) + x
x = self.fc2(x)
x = self.drop(x)
x = self.se(x.permute(0, 2, 1).reshape(B, C, H, W)).reshape(B, C, N).permute(0, 2, 1)
return x, H, W
class Mlp(nn.Module):
"""Mlp class used for FAN"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
"""Initialize Mlp class"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward function"""
x = self.fc1(x)
x = self.drop(self.gamma * self.dwconv(x, H, W)) + x
x = self.fc2(x)
x = self.drop(x)
return x
class ConvPatchEmbed(nn.Module):
"""Image to Patch Embedding using multiple convolutional layers"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, act_layer=nn.GELU):
"""Initialize ConvPatchEmbed class"""
super().__init__()
img_size = to_2tuple(img_size)
num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
if patch_size == 16:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 8, 2),
act_layer(),
conv3x3(embed_dim // 8, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
act_layer(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size == 8:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 2, 2),
act_layer(),
conv3x3(embed_dim // 2, embed_dim, 2),
)
elif patch_size == 4:
self.proj = torch.nn.Sequential(
conv3x3(in_chans, embed_dim // 4, 2),
act_layer(),
conv3x3(embed_dim // 4, embed_dim // 1, 2),
)
else:
raise ('For convolutional projection, patch size has to be in [8, 16]')
def forward(self, x):
"""Forward function"""
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2) # (B, N, C)
return x, (Hp, Wp)
class DWConv(nn.Module):
"""Depth-wise convolution"""
def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3):
"""Initialize DWConv class"""
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(
in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features)
self.act = act_layer()
self.bn = nn.BatchNorm2d(in_features)
self.conv2 = torch.nn.Conv2d(
in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features)
def forward(self, x, H: int, W: int):
"""Forward function"""
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ClassAttentionBlock(nn.Module):
"""Class Attention Layer as in CaiT https://arxiv.org/abs/2103.17239"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., tokens_norm=False):
"""Initialize ClassAttentionBlock class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttn(
dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = MlpOri(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
if eta is not None: # LayerScale Initialization (no layerscale when None)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
else:
self.gamma1, self.gamma2 = 1.0, 1.0
# See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721
self.tokens_norm = tokens_norm
def forward(self, x, return_attention=False):
"""Forward function"""
x_norm1 = self.norm1(x)
if return_attention:
x1, attn = self.attn(x_norm1, use_attn=return_attention)
else:
x1 = self.attn(x_norm1)
x_attn = torch.cat([x1, x_norm1[:, 1:]], dim=1)
x = x + self.drop_path(self.gamma1 * x_attn)
if self.tokens_norm:
x = self.norm2(x)
else:
x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1)
x_res = x
cls_token = x[:, 0:1]
cls_token = self.gamma2 * self.mlp(cls_token)
x = torch.cat([cls_token, x[:, 1:]], dim=1)
x = x_res + self.drop_path(x)
if return_attention:
return attn
return x
class TokenMixing(nn.Module):
"""Token Mixing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
sr_ratio=1, linear=False, share_atten=False, drop_path=0., emlp=False, sharpen_attn=False,
mlp_hidden_dim=None, act_layer=nn.GELU, drop=None, norm_layer=nn.LayerNorm):
"""Initialize TokenMixing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.share_atten = share_atten
self.emlp = emlp
cha_sr = 1
self.q = nn.Linear(dim, dim // cha_sr, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2 // cha_sr, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.linear = linear
self.sr_ratio = sr_ratio
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W, atten=None, return_attention=False):
"""Forward function"""
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q * self.scale @ k.transpose(-2, -1)) # * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn @ v
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, patch_size=2, feature_size=None, in_chans=3, embed_dim=384):
"""Initialize HybridEmbedding class"""
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# NOTE Most reliable way of determining output dims is to run forward pass
training = backbone.training
if training:
backbone.eval()
o = self.backbone.forward_features(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0
self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
"""Forward function"""
x = self.backbone.forward_features(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
_, _, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x, (H // self.patch_size[0], W // self.patch_size[1])
class ChannelProcessing(nn.Module):
"""Channel Processing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., drop_path=0.,
mlp_hidden_dim=None, act_layer=nn.GELU, drop=None, norm_layer=nn.LayerNorm, cha_sr_ratio=1, c_head_num=None):
"""Initialize ChannelProcessing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
num_heads = c_head_num or num_heads
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.cha_sr_ratio = cha_sr_ratio if num_heads > 1 else 1
# config of mlp for v processing
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp_v = Mlp(in_features=dim // self.cha_sr_ratio, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.norm_v = norm_layer(dim // self.cha_sr_ratio)
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def _gen_attn(self, q, k):
"""Returns attention"""
q = q.softmax(-2).transpose(-1, -2)
_, _, N, _ = k.shape
k = torch.nn.functional.adaptive_avg_pool2d(k.softmax(-2), (N, 1))
attn = torch.nn.functional.sigmoid(q @ k)
return attn * self.temperature
def forward(self, x, H, W, atten=None):
"""Forward functions """
B, N, C = x.shape
v = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = self._gen_attn(q, k)
attn = self.attn_drop(attn)
Bv, Hd, Nv, Cv = v.shape
v = self.norm_v(self.mlp_v(v.transpose(1, 2).reshape(Bv, Nv, Hd * Cv), H, W)).reshape(Bv, Nv, Hd, Cv).transpose(1, 2)
repeat_time = N // attn.shape[-1]
attn = attn.repeat_interleave(repeat_time, dim=-1) if attn.shape[-1] > 1 else attn
x = (attn * v.transpose(-1, -2)).permute(0, 3, 1, 2).reshape(B, N, C)
return x, (attn * v.transpose(-1, -2)).transpose(-1, -2) # attn
@torch.jit.ignore
def no_weight_decay(self):
"""Ignore during weight decay"""
return {'temperature'}
class FANBlock_SE(nn.Module):
"""FAN SE block from https://arxiv.org/abs/2204.12451"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., sharpen_attn=False, use_se=False,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., sr_ratio=1., qk_scale=None, linear=False, downsample=None, c_head_num=None):
"""Initialize FANBlock_SE class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = TokenMixing(dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop, drop=drop, drop_path=drop_path, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = SEMlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
def forward(self, x, H: int, W: int, attn=None):
"""Forward function"""
x_new = self.attn(self.norm1(x), H, W)
x = x + self.drop_path(self.gamma1 * x_new)
x_new, H, W = self.mlp(self.norm2(x), H, W)
x = x + self.drop_path(self.gamma2 * x_new)
return x, H, W
class FANBlock(nn.Module):
"""FAN block from https://arxiv.org/abs/2204.12451"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., sharpen_attn=False,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., sr_ratio=1., downsample=None, c_head_num=None):
"""Initialize FANBlock class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = TokenMixing(dim, num_heads=num_heads, qkv_bias=qkv_bias, mlp_hidden_dim=int(dim * mlp_ratio), sharpen_attn=sharpen_attn,
attn_drop=attn_drop, proj_drop=drop, drop=drop, drop_path=drop_path, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ChannelProcessing(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,
drop_path=drop_path, drop=drop, mlp_hidden_dim=int(dim * mlp_ratio), c_head_num=c_head_num)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.downsample = downsample
self.H = None
self.W = None
def forward(self, x, attn=None, return_attention=False):
"""Forward function"""
H, W = self.H, self.W
x_new, attn_s = self.attn(self.norm1(x), H, W)
x = x + self.drop_path(self.gamma1 * x_new)
x_new, _ = self.mlp(self.norm2(x), H, W, atten=attn)
x = x + self.drop_path(self.gamma2 * x_new)
if return_attention:
return x, attn_s
if self.downsample is not None:
x, H, W = self.downsample(x, H, W)
self.H, self.W = H, W
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding"""
def __init__(self, img_size=224, patch_size=7, stride=4, in_chans=3, embed_dim=768):
"""Overlap PatchEmbed"""
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 2, patch_size[1] // 2))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward functions """
B, _, C = x.shape
x = x.transpose(-1, -2).reshape(B, C, H, W)
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
class FAN(nn.Module):
"""
Based on timm code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, sharpen_attn=False, channel_dims=None,
num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., sr_ratio=None, backbone=None, use_checkpoint=False,
act_layer=None, norm_layer=None, se_mlp=False, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False, c_head_num=None, hybrid_patch_size=2, head_init_scale=1.0):
"""Initialize FAN class"""
super().__init__()
img_size = to_2tuple(img_size)
self.use_checkpoint = use_checkpoint
assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \
'`patch_size` should divide image dimensions evenly'
self.num_classes = num_classes
num_heads = [num_heads] * depth if not isinstance(num_heads, list) else num_heads
channel_dims = [embed_dim] * depth if channel_dims is None else channel_dims
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
if backbone is None:
self.patch_embed = ConvPatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer)
else:
self.patch_embed = HybridEmbed(backbone=backbone, patch_size=hybrid_patch_size, embed_dim=embed_dim)
self.use_pos_embed = use_pos_embed
if use_pos_embed:
self.pos_embed = PositionalEncodingFourier(dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if se_mlp:
build_block = FANBlock_SE
else:
build_block = FANBlock
self.blocks = nn.ModuleList([])
for i in range(depth):
if i < depth - 1 and channel_dims[i] != channel_dims[i + 1]:
downsample = OverlapPatchEmbed(img_size=img_size,
patch_size=3,
stride=2,
in_chans=channel_dims[i],
embed_dim=channel_dims[i + 1])
else:
downsample = None
self.blocks.append(
build_block(
dim=channel_dims[i], num_heads=num_heads[i], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, sr_ratio=sr_ratio[i],
attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta,
downsample=downsample, c_head_num=c_head_num[i] if c_head_num is not None else None))
self.num_features = self.embed_dim = channel_dims[i]
self.cls_token = nn.Parameter(torch.zeros(1, 1, channel_dims[i]))
self.cls_attn_blocks = nn.ModuleList([
ClassAttentionBlock(
dim=channel_dims[-1], num_heads=num_heads[-1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm)
for _ in range(cls_attn_layers)])
# Classifier head
self.norm = norm_layer(channel_dims[i])
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Init weights
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
"""layers to ignore for weight decay"""
return {'pos_embed', 'cls_token'}
def get_classifier(self):
"""Returns classifier"""
return self.head
def reset_classifier(self, num_classes, global_pool=''):
"""Redefine classifier of FAN"""
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
"""Extract features"""
B = x.shape[0]
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
H, W = Hp, Wp
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
H, W = blk.H, blk.W
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x)
x = self.norm(x)[:, 0]
return x
def base_forward(self, x):
"""Base forward function"""
B = x.shape[0]
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
H, W = Hp, Wp
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
H, W = blk.H, blk.W
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x)
x = self.norm(x)
return x
def forward(self, x):
"""Forward functions"""
x = self.forward_features(x)
x = self.head(x)
return x
def get_spatial_feat(self, x):
"""Turn token feature into spatial feature.
Args:
x (torch.Tensor): token feature in [B, 1024+1, 768]
Return:
x (torch.Tensor): feature map in (B, 768, H, W)
"""
b, n, c = x.shape
h, w = int((n - 1 + 1e-6) ** 0.5), int((n - 1 + 1e-6) ** 0.5)
x = x[:, 1:].transpose(2, 1).reshape(b, c, h, w)
return x
def get_last_selfattention(self, x, use_cls_attn=False, layer_idx=11):
"""Returns last self-attention"""
B = x.shape[0]
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
return_idx = layer_idx or len(self.blocks) - 1
for i, blk in enumerate(self.blocks):
if i == return_idx:
x, attn = blk(x, Hp, Wp, return_attention=True)
else:
x, Hp, Wp = blk(x, Hp, Wp)
attn = None
if use_cls_attn:
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for i, blk in enumerate(self.cls_attn_blocks):
if i < len(self.cls_attn_blocks) - 1:
x = blk(x)
else:
attn = blk(x, return_attention=True)
return attn
return attn
def checkpoint_filter_fn(state_dict, model):
"""Filter loaded checkpoints"""
if 'model' in state_dict:
state_dict = state_dict['model']
# For consistency with timm's transformer models while being compatible with official weights source we rename
# pos_embeder to pos_embed. Also account for use_pos_embed == False
use_pos_embed = getattr(model, 'pos_embed', None) is not None
pos_embed_keys = [k for k in state_dict if k.startswith('pos_embed')]
for k in pos_embed_keys:
if use_pos_embed:
state_dict[k.replace('pos_embeder.', 'pos_embed.')] = state_dict.pop(k)
else:
del state_dict[k]
if 'cls_attn_blocks.0.attn.qkv.weight' in state_dict and 'cls_attn_blocks.0.attn.q.weight' in model.state_dict():
num_ca_blocks = len(model.cls_attn_blocks)
for i in range(num_ca_blocks):
qkv_weight = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.weight')
qkv_weight = qkv_weight.reshape(3, -1, qkv_weight.shape[-1])
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.weight'] = qkv_weight[j]
qkv_bias = state_dict.pop(f'cls_attn_blocks.{i}.attn.qkv.bias', None)
if qkv_bias is not None:
qkv_bias = qkv_bias.reshape(3, -1)
for j, subscript in enumerate('qkv'):
state_dict[f'cls_attn_blocks.{i}.attn.{subscript}.bias'] = qkv_bias[j]
return state_dict
def _create_fan(variant, pretrained=False, default_cfg=None, **kwargs):
"""Create FAN backbone"""
default_cfg = default_cfg or default_cfgs[variant]
model = build_model_with_cfg(
FAN, variant, pretrained, pretrained_cfg=default_cfg, pretrained_filter_fn=checkpoint_filter_fn, **kwargs)
return model
# FAN-ViT Models
@register_model
def fan_tiny_12_p16_224(pretrained=False, bn_tf=False, **kwargs):
"""FAN-ViT Tiny"""
depth = 12
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=192, depth=depth, num_heads=4, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_tiny_12_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
@register_model
def fan_small_12_p16_224_se_attn(pretrained=False, **kwargs):
"""FAN-ViT SE Small"""
depth = 12
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, se_mlp=True, **kwargs)
model = _create_fan('fan_small_12_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
@register_model
def fan_small_12_p16_224(pretrained=False, **kwargs):
"""FAN-ViT Small"""
depth = 12
sr_ratio = [1] * depth
model_kwargs = dict(
patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, **kwargs)
model = _create_fan('fan_small_12_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
@register_model
def fan_base_18_p16_224(pretrained=False, **kwargs):
"""FAN-ViT Base"""
depth = 18
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=448, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_base_18_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
@register_model
def fan_large_24_p16_224(pretrained=False, **kwargs):
"""FAN-ViT Large"""
depth = 24
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=480, depth=depth, num_heads=10, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_large_24_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, **model_kwargs)
return model
# FAN-Hybrid Models
# CNN backbones are based on ConvNeXt architecture with only first two stages for downsampling purpose
# This has been verified to be beneficial for downstream tasks
@register_model
def fan_tiny_8_p4_hybrid(pretrained=False, **kwargs):
"""FAN Hybrid Tiny"""
depth = 8
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2 + 1)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=192, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_tiny_8_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone, **model_kwargs)
return model
@register_model
def fan_small_12_p4_hybrid(pretrained=False, **kwargs):
"""FAN Hybrid Small"""
depth = 10
channel_dims = [384] * 10 + [384] * (depth - 10)
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_small_12_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone,
channel_dims=channel_dims, **model_kwargs)
return model
@register_model
def fan_base_16_p4_hybrid(pretrained=False, **kwargs):
"""FAN Hybrid Base"""
depth = 16
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=448, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_base_18_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone, **model_kwargs)
return model
@register_model
def fan_large_16_p4_hybrid(pretrained=False, **kwargs):
"""FAN Hybrid Large"""
depth = 22
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 5], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=480, depth=depth, num_heads=10, eta=1.0, tokens_norm=True, sharpen_attn=False, head_init_scale=0.001, **kwargs)
model = _create_fan('fan_large_24_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone, **model_kwargs)
return model
@register_model
def fan_Xlarge_16_p4_hybrid(pretrained=False, **kwargs):
"""For those who have enough GPUs, could try this...."""
depth = 23
stage_depth = 20
channel_dims = [528] * stage_depth + [768] * (depth - stage_depth)
num_heads = [11] * stage_depth + [16] * (depth - stage_depth)
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2 + 1)
model_args = dict(depths=[3, 7], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=channel_dims[0], depth=depth, num_heads=num_heads, eta=1.0, tokens_norm=True, sharpen_attn=False, **kwargs)
model = _create_fan('fan_xlarge_24_p16_224', pretrained=pretrained, sr_ratio=sr_ratio, backbone=backbone,
channel_dims=channel_dims, **model_kwargs)
return model
@register_model
def fan_swin_tiny_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-T @ 224x224, trained ImageNet-1k"""
mlp_type = ['FAN', 'FAN', 'FAN', 'Mlp']
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_fan_swin_transformer('swin_tiny_patch4_window7_224', pretrained=pretrained, mlp_type=mlp_type, **model_kwargs)
# FAN-Swin Models
@register_model
def fan_swin_small_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k"""
mlp_type = ['FAN', 'FAN', 'FAN', 'Mlp']
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs)
return _create_fan_swin_transformer('swin_small_patch4_window7_224', pretrained=pretrained, mlp_type=mlp_type, **model_kwargs)
@register_model
def fan_swin_base_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k"""
mlp_type = ['FAN', 'FAN', 'FAN', 'Mlp']
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
return _create_fan_swin_transformer('swin_base_patch4_window7_224', pretrained=pretrained, mlp_type=mlp_type, **model_kwargs)
@register_model
def fan_swin_large_patch4_window7_224(pretrained=False, **kwargs):
""" Swin-S @ 224x224, trained ImageNet-1k"""
mlp_type = ['FAN', 'FAN', 'FAN', 'Mlp']
model_kwargs = dict(
patch_size=4, window_size=7, embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), **kwargs)
return _create_fan_swin_transformer('swin_large_patch4_window7_224', pretrained=pretrained, mlp_type=mlp_type, **model_kwargs)
| tao_dataset_suite-main | nvidia_tao_ds/backbone/fan.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import warnings
import math
from functools import partial
import torch
import torch.nn as nn
import pytorch_lightning as pl
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class ConvStem(nn.Module):
"""ConvStem, from Early Convolutions Help Transformers See Better, Tete et
al.
https://arxiv.org/abs/2106.14881
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=4,
norm_layer=None,
):
super().__init__()
assert embed_dim % 8 == 0, "Embed dimension must be divisible by 8 for ConvStem"
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size // patch_size, img_size // patch_size)
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.depth = depth
# build stem, similar to the design in https://arxiv.org/abs/2106.14881
stem = []
input_dim, output_dim = in_chans, embed_dim // (2 ** (depth - 1))
for idx in range(depth):
stage_list = [
nn.Conv2d(
input_dim,
output_dim,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
nn.GroupNorm(1, output_dim, eps=1e-6),
nn.GELU(),
]
if idx == depth - 1:
stage_list.append(nn.Conv2d(output_dim, embed_dim, kernel_size=1))
stage = nn.Sequential(*stage_list)
input_dim = output_dim
output_dim *= 2
stem.append(stage)
self.proj = nn.ModuleList(stem)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
outputs = []
for i, stage in enumerate(self.proj):
x = stage(x)
B, C, H, W = x.shape # B, 768, 32, 32
x = x.reshape(B, C, H * W).transpose(2, 1)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
if return_attention:
return x, attn
else:
return x
class PatchEmbed(nn.Module):
"""Image to Patch Embedding."""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(pl.LightningModule):
"""Vision Transformer."""
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, frozen_stages=-1, cfg=None, **kargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_size = patch_size
self.patch_embed = ConvStem(img_size=img_size[0], patch_size=16,
in_chans=in_chans, embed_dim=embed_dim)
num_patches = (img_size[0] // patch_size) ** 2
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.frozen_stages = frozen_stages
if frozen_stages[0] == 0 and frozen_stages[1] > 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
for i in range(max(self.frozen_stages[0], 1), self.frozen_stages[1] + 1):
self.blocks[i-1].eval()
for param in self.blocks[i-1].parameters():
param.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
# [B, 1024, embed_dim]
x = self.patch_embed(x) # patch linear embedding
# cls_token: [1, 1, embed_dim]
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def base_forward(self, x):
"""Base forward pass.
Output size: [B, 1024+1, embed_dim], where 1024=input_h/4 * input_w/4
"""
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x
def decoder_forward(self, x):
x = self.decoder_embed(x)
for blk in self.decoder_blocks:
x = blk(x)
x = self.decoder_pred(x)
return x[:, 1:]
def decode_mask(self, x):
b, n, c = x.shape
h1, w1 = int((n + 1e-6) ** 0.5), int((n + 1e-6) ** 0.5)
h0, w0 = int((c + 1e-6) ** 0.5), int((c + 1e-6) ** 0.5)
x = torch.einsum('nchwpq->nhpwqc', x.reshape(b, h1, w1, h0, w0, 1)).reshape(b, 1, h1*h0, w1*w0)
return x
def get_spatial_feat(self, x):
"""Turn token feature into spatial feature.
Args:
x (torch.Tensor): token feature in [B, 1024+1, 768]
Return:
x (torch.Tensor): feature map in (B, 768, H, W)
"""
b, n, c = x.shape
h, w = int((n - 1 + 1e-6) ** 0.5), int((n - 1 + 1e-6) ** 0.5)
x = x[:, 1:].transpose(2, 1).reshape(b, c, h, w)
return x
def forward(self, x):
return self.base_forward(x)
def summary_feat(self, x):
return x[:, 0]
def get_selfattention(self, x, idx):
attentions = []
if idx < 0:
idx = len(self.blocks) + idx
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < idx:
x = blk(x)
else:
# return attention of the last block
x, attention = blk(x, return_attention=True)
attentions.append(attention)
attentions = torch.cat(attentions, 0)
return attentions
| tao_dataset_suite-main | nvidia_tao_ds/backbone/vision_transformer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| tao_dataset_suite-main | nvidia_tao_ds/backbone/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import requests
# constants
API_URL = 'https://api.github.com'
class PullRequest:
"""Pull Request class"""
def __init__(self,
head_owner, head, head_token,
base_owner, repo, base, base_token):
self.head_owner = head_owner
self.head = head
self.base_owner = base_owner
self.repo = repo
self.base = base
self.pulls_url = f'{API_URL}/repos/{self.base_owner}/{self.repo}/pulls'
self._head_auth_headers = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': f"token {head_token}"
}
self._base_auth_headers = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': f"token {base_token}"
}
def get_open(self):
"""get open pull request if existed"""
params = {
'state': 'open',
'head': f"{self.head_owner}:{self.head}",
'base': self.base,
}
r = requests.get(self.pulls_url, headers=self._base_auth_headers, params=params)
if r.status_code == 200:
return r.json()
if r.status_code == 304:
return None
# FAILURE
print('FAILURE - list PR')
print(f'status code: {r.status_code}')
raise Exception(f"Failed to list PR: {r.json()}")
def create(self, params):
"""create a pull request"""
# the token here must have write access to head owner/repo
r = requests.post(self.pulls_url, headers=self._head_auth_headers, json=params)
if r.status_code == 201:
print('SUCCESS - create PR')
pull = r.json()
number = str(pull['number'])
sha = str(pull['head']['sha'])
return number, sha, False
if r.status_code == 422: # early-terminate if no commits between HEAD and BASE
print('SUCCESS - No commits')
print(r.json())
return '', '', True
# FAILURE
print('FAILURE - create PR')
print(f'status code: {r.status_code}')
raise Exception(f"Failed to create PR: {r.json()}")
def merge(self, number, params):
"""merge a pull request"""
# the token here must have write access to base owner/repo
url = f'{self.pulls_url}/{number}/merge'
return requests.put(url, headers=self._head_auth_headers, json=params)
def auto_merge(self, number, sha, merge_method='merge'):
"""merge a auto-merge pull request"""
params = {
'sha': sha,
'merge_method': merge_method,
}
r = self.merge(number, params)
if r.status_code == 200:
self.comment(number, '**SUCCESS** - auto-merge')
print('SUCCESS - auto-merge')
return
else:
print('FAILURE - auto-merge')
self.comment(number=number, content=f"""**FAILURE** - Unable to auto-merge. Manual operation is required.
```
{r.json()}
```
Please use the following steps to fix the merge conflicts manually:
```
# Assume upstream is {self.base_owner}/{self.repo} remote
git fetch upstream {self.head} {self.base}
git checkout -b fix-auto-merge-conflict-{number} upstream/{self.base}
git merge upstream/{self.head}
# Fix any merge conflicts caused by this merge
git commit -am "Merge {self.head} into {self.base}"
git push <personal fork> fix-auto-merge-conflict-{number}
# Open a PR targets {self.base_owner}/{self.repo} {self.base}
```
**IMPORTANT:** Before merging this PR, be sure to change the merging strategy to `Create a merge commit` (repo admin only).
Once this PR is merged, the auto-merge PR should automatically be closed since it contains the same commit hashes
""")
print(f'status code: {r.status_code}')
raise Exception(f"Failed to auto-merge PR: {r.json()}")
def comment(self, number, content):
"""comment in a pull request"""
url = f'{API_URL}/repos/{self.base_owner}/{self.repo}/issues/{number}/comments'
params = {
'body': content
}
r = requests.post(url, headers=self._base_auth_headers, json=params)
if r.status_code == 201:
print('SUCCESS - create comment')
else:
print('FAILURE - create comment')
print(f'status code: {r.status_code}')
raise Exception(f"Failed to create comment: {r.json()}")
def delete_branch(self, owner, branch):
"""delete a branch"""
url = f'{API_URL}/repos/{owner}/{self.repo}/git/refs/heads/{branch}'
r = requests.delete(url, headers=self._base_auth_headers)
if r.status_code == 204:
print(f'SUCCESS - delete {branch}')
else:
print(f'FAILURE - delete {branch}')
print(f'status code: {r.status_code}')
raise Exception(f"Failed to delete {branch}: {r.json()}")
def delete_head(self):
"""delete the HEAD branch in a pull request"""
return self.delete_branch(self.head_owner, self.head)
class EnvDefault(argparse.Action):
"""EnvDefault argparse action class"""
def __init__(self, env, default=None, required=True, **kwargs):
if not default and env:
if env in os.environ:
default = os.environ[env]
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
this function is copied from distutils.util to avoid deprecation waring https://www.python.org/dev/peps/pep-0632/
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
| spark-rapids-jni-branch-23.10 | .github/workflows/action-helper/python/utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import dask.array as da
import dask.dataframe as ddf
from typing import Dict, List, Tuple, Callable
from d2geo.attributes.CompleTrace import ComplexAttributes
from d2geo.attributes.SignalProcess import SignalProcess
sys.path.append('./d2geo/attributes')
def get_sub_cube(cube, examples_percent):
assert examples_percent > 0 and examples_percent <= 1.0, "Percent must be in (0,1] range."
i_num, x_num, t_num = cube.shape
i_start_idx = int((i_num - (i_num * examples_percent))/2)
i_end_idx = int(i_start_idx + (examples_percent * i_num))
x_start_idx = int((x_num - (x_num * examples_percent))/2)
x_end_idx = int(x_start_idx + (examples_percent * x_num))
t_start_idx = int((t_num - (t_num * examples_percent))/2)
t_end_idx = int(t_start_idx + (examples_percent * t_num))
return cube[i_start_idx:i_end_idx,
x_start_idx:x_end_idx,
t_start_idx:t_end_idx]
def get_default_funcs():
complex_att = ComplexAttributes()
signal_process = SignalProcess()
def amplitude_arr(input_cube):
return da.from_array(input_cube)
# List of tuples with attribute name, the function
# to run (with cube as input) and additional kwargs dict.
funcs = [
('Amplitude', amplitude_arr, {}),
('Envelope', complex_att.envelope, {}),
('Instantaneous Phase', complex_att.instantaneous_phase, {}),
('Instantaneous Frequency', complex_att.instantaneous_frequency, {}),
('Instantaneous Bandwidth', complex_att.instantaneous_bandwidth, {}),
('Dominant Frequency', complex_att.dominant_frequency, {}),
('Cosine Instantaneous Phase', complex_att.cosine_instantaneous_phase, {}),
('Second Derivative', signal_process.second_derivative, {}),
('Reflection Intensity', signal_process.reflection_intensity, {})
]
return funcs
def run_attributes(input_cube, attributes: List[Tuple[str, Callable, Dict[str, str]]]=None):
if attributes == None:
attributes = get_default_funcs()
datas = [attr_func(input_cube, **attr_kwargs).flatten() for _, attr_func, attr_kwargs in attributes]
datas = da.stack(datas, axis=1)
return ddf.from_dask_array(datas, columns=[attr_name for attr_name, _, _ in attributes])
| energy-sdk-master | rapids_seismic_facies/facies_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script to build the TLT launcher package."""
import os
import setuptools
from release.python.utils import utils
from torch.utils.cpp_extension import BuildExtension
version_locals = utils.get_version_details()
PACKAGE_LIST = [
"nvidia_tao_pytorch",
"third_party"
]
setuptools_packages = []
for package_name in PACKAGE_LIST:
setuptools_packages.extend(utils.find_packages(package_name))
setuptools.setup(
name=version_locals['__package_name__'],
version=version_locals['__version__'],
description=version_locals['__description__'],
author='NVIDIA Corporation',
classifiers=[
'Environment :: Console',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
license=version_locals['__license__'],
keywords=version_locals['__keywords__'],
packages=setuptools_packages,
package_data={
'': ['*.pyc', "*.yaml", "*.so", "*.pdf"]
},
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
# CV entry points
'action_recognition=nvidia_tao_pytorch.cv.action_recognition.entrypoint.action_recognition:main',
'segformer=nvidia_tao_pytorch.cv.segformer.entrypoint.segformer:main',
'classification_pyt=nvidia_tao_pytorch.cv.classification.entrypoint.classification:main',
'deformable_detr=nvidia_tao_pytorch.cv.deformable_detr.entrypoint.deformable_detr:main',
'dino=nvidia_tao_pytorch.cv.dino.entrypoint.dino:main',
'pose_classification=nvidia_tao_pytorch.cv.pose_classification.entrypoint.pose_classification:main',
're_identification=nvidia_tao_pytorch.cv.re_identification.entrypoint.re_identification:main',
'mal=nvidia_tao_pytorch.cv.mal.entrypoint.mal:main',
'ml_recog=nvidia_tao_pytorch.cv.metric_learning_recognition.entrypoint.metric_learning_recognition:main',
'ocrnet=nvidia_tao_pytorch.cv.ocrnet.entrypoint.ocrnet:main',
'ocdnet=nvidia_tao_pytorch.cv.ocdnet.entrypoint.ocdnet:main',
# Pointpillars entry point
'optical_inspection=nvidia_tao_pytorch.cv.optical_inspection.entrypoint.optical_inspection:main',
'pointpillars=nvidia_tao_pytorch.pointcloud.pointpillars.entrypoint.pointpillars:main',
]
},
cmdclass={'build_ext': BuildExtension},
ext_modules=[
utils.make_cuda_ext(
name='iou3d_nms_cuda',
module='nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.ops.iou3d_nms',
sources=[
'src/iou3d_cpu.cpp',
'src/iou3d_nms_api.cpp',
'src/iou3d_nms.cpp',
'src/iou3d_nms_kernel.cu',
]
),
utils.make_cuda_ext(
name='roiaware_pool3d_cuda',
module='nvidia_tao_pytorch.pointcloud.pointpillars.pcdet.ops.roiaware_pool3d',
sources=[
'src/roiaware_pool3d.cpp',
'src/roiaware_pool3d_kernel.cu',
]
),
utils.make_cuda_ext(
name='MultiScaleDeformableAttention',
module='nvidia_tao_pytorch.cv.deformable_detr.model.ops',
sources=[
'src/ms_deform_attn_cpu.cpp',
'src/ms_deform_attn_api.cpp',
'src/ms_deform_attn_cuda.cu'
],
include_dirs=['src'],
define_macros=[("WITH_CUDA", None)],
extra_flags = utils.get_extra_compile_args()
)
],
)
utils.cleanup()
| tao_pytorch_backend-main | setup.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Instantiate the TAO-pytorch docker container for developers."""
import argparse
from distutils.version import LooseVersion
import json
import os
import subprocess
import sys
ROOT_DIR = os.getenv("NV_TAO_PYTORCH_TOP", os.getcwd())
with open(os.path.join(ROOT_DIR, "docker/manifest.json"), "r") as m_file:
docker_config = json.load(m_file)
DOCKER_REGISTRY = docker_config["registry"]
DOCKER_REPOSITORY = docker_config["repository"]
DOCKER_TAG = docker_config["tag"]
DOCKER_COMMAND = "docker"
HOME_PATH = os.path.expanduser("~")
MOUNTS_PATH = os.path.join(HOME_PATH, ".tao_mounts.json")
def get_docker_mounts_from_file(mounts_file=MOUNTS_PATH):
"""Check for docker mounts in ~/.tao_mounts.json."""
if not os.path.exists(mounts_file):
return []
with open(mounts_file, 'r') as mfile:
data = json.load(mfile)
assert "Mounts" in list(data.keys()), "Invalid json file. Requires Mounts key."
return data["Mounts"]
def format_mounts(mount_points):
"""Format mount points to docker recognizable commands."""
formatted_mounts = []
# Traverse through mount points and add format them for the docker command.
for mount_point in mount_points:
assert "source" in list(mount_point.keys()), "destination" in list(mount_point.keys())
mount = "{}:{}".format(mount_point["source"], mount_point["destination"])
formatted_mounts.append(mount)
return formatted_mounts
def check_image_exists(docker_image):
"""Check if the image exists locally."""
check_command = '{} images | grep "\\<{}\\>" | grep "{}" >/dev/null 2>&1'.format(DOCKER_COMMAND, docker_image, DOCKER_TAG)
rc = subprocess.call(check_command, stdout=sys.stderr, shell=True)
return rc == 0
def pull_base_container(docker_image):
"""Pull the default base container."""
pull_command = "{} pull {}:{}".format(DOCKER_COMMAND, docker_image, DOCKER_TAG)
rc = subprocess.call(pull_command, stdout=sys.stderr, shell=True)
return rc == 0
def get_formatted_mounts(mount_file):
"""Simple function to get default mount points."""
default_mounts = get_docker_mounts_from_file(mount_file)
return format_mounts(default_mounts)
def check_mounts(formatted_mounts):
"""Check the formatted mount commands."""
assert type(formatted_mounts) == list
for mounts in formatted_mounts:
source_path = mounts.split(":")[0]
if not os.path.exists(source_path):
raise ValueError("Path doesn't exist: {}".format(source_path))
return True
def get_docker_gpus_prefix(gpus):
"""Get the docker command gpu's prefix."""
docker_version = (
subprocess.check_output(
["docker", "version", "--format={{ .Server.APIVersion }}"]
)
.strip()
.decode()
)
if LooseVersion(docker_version) > LooseVersion("1.40"):
# You are using the latest version of docker using
# --gpus instead of the nvidia runtime.
gpu_string = "--gpus "
if gpus == "all":
gpu_string += "all"
else:
gpu_string += "\'\"device={}\"\'".format(gpus)
else:
# Stick to the older version of getting the gpu's using runtime=nvidia
gpu_string = "--runtime=nvidia -e NVIDIA_DRIVER_CAPABILITIES=all "
if gpus != "none":
gpu_string += "-e NVIDIA_VISIBLE_DEVICES={}".format(gpus)
return gpu_string
def create_base_docker():
"""Function to create the base docker."""
create_command = "bash {}/docker/build.sh --build".format(ROOT_DIR)
try:
subprocess.run(create_command, stdout=sys.stderr, shell=True, check=True)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Container build failed with error {e}")
def instantiate_dev_docker(gpus, mount_file,
mount_cli_list,
env_var_list,
command, ulimit=None,
shm_size="16G", run_as_user=False,
port_mapping=None):
"""Instiate the docker container."""
docker_image = "{}/{}:{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY, DOCKER_TAG)
# Invoking the nvidia docker.
gpu_string = get_docker_gpus_prefix(gpus)
# Prefix for the run command.
run_command = "{} run -it --rm".format(DOCKER_COMMAND)
# get default mount points.
formatted_mounts = get_formatted_mounts(MOUNTS_PATH)
# get mounts from cli mount file.
formatted_mounts += get_formatted_mounts(mount_file)
if mount_cli_list is not None:
formatted_mounts.extend(mount_cli_list)
assert check_mounts(formatted_mounts), "Mounts don't exists, Please make sure the paths all exist."
mount_string = "-v {}:/tao-pt ".format(os.getenv("NV_TAO_PYTORCH_TOP", os.getcwd()))
# Defining env variables.
env_variables = "-e PYTHONPATH={}:$PYTHONPATH ".format("/tao-pt")
for env in env_var_list:
if "=" not in env:
print(f"invalid env variable definition. skipping this {env}")
continue
env_variables += "-e {} ".format(env)
for path in formatted_mounts:
mount_string += "-v {} ".format(path)
# Setting shared memory.
shm_option = "--shm-size {}".format(shm_size)
# Setting ulimits for host
ulimit_options = ""
if ulimit is not None:
for param in ulimit:
ulimit_options += "--ulimit {} ".format(param)
user_option = ""
if run_as_user:
user_option = "--user {}:{}".format(os.getuid(), os.getgid())
port_option = "--net=host"
if port_mapping:
port_option += f" -p {port_mapping}"
final_command = "{} {} {} {} {} {} {} {} {} {}".format(
run_command, gpu_string,
mount_string, env_variables,
shm_option, ulimit_options, user_option,
port_option,
docker_image, " ".join(command)
)
print(final_command)
return subprocess.check_call(final_command, stdout=sys.stderr, shell=True)
def parse_cli_args(args=None):
"""Parse run container command line."""
parser = argparse.ArgumentParser(prog="tao_pt", description="Tool to run the pytorch container.", add_help=True)
parser.add_argument(
"--gpus", default="all", type=str, help="Comma separated GPU indices to be exposed to the docker."
)
parser.add_argument("--volume", action="append", type=str, default=[], help="Volumes to bind.")
parser.add_argument("--env", action="append", type=str, default=[], help="Environment variables to bind.")
parser.add_argument("--mounts_file", help="Path to the mounts file.", default="", type=str)
parser.add_argument("--shm_size", help="Shared memory size for docker", default="16G", type=str)
parser.add_argument("--run_as_user", help="Flag to run as user", action="store_true", default=False)
parser.add_argument("--ulimit", action='append', help="Docker ulimits for the host machine." )
parser.add_argument(
"--port",
type=str,
default=None,
help="Port mapping (e.g. 8889:8889)."
)
args = vars(parser.parse_args(args))
return args
def main(cl_args=None):
"""Start docker container."""
if "--" in cl_args:
index = cl_args.index("--")
# Split args to the tao docker wrapper and the command to be run inside the docker.
tao_pt_args = cl_args[:index]
command_args = cl_args[index + 1:]
else:
tao_pt_args = cl_args
command_args = ""
# parse command line args.
args = parse_cli_args(tao_pt_args)
docker_image = "{}/{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY)
if not check_image_exists(docker_image):
if not pull_base_container(docker_image):
print("The base container doesn't exist locally and the pull failed. Hence creating the base container")
create_base_docker()
try:
instantiate_dev_docker(
args["gpus"], args["mounts_file"],
args["volume"], args["env"],
command_args, args["ulimit"],
args["shm_size"], args["run_as_user"],
args['port']
)
except subprocess.CalledProcessError:
# Do nothing - the errors are printed in entrypoint launch.
pass
if __name__ == "__main__":
main(sys.argv[1:])
| tao_pytorch_backend-main | runner/tao_pt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| tao_pytorch_backend-main | release/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launcher ."""
"""TAO Pytorch SDK version"""
MAJOR = "4"
MINOR = "0.0"
PATCH = "01"
PRE_RELEASE = ''
# Getting the build number.
def get_build_info():
"""Get the build version number."""
# required since setup.py runs a version string and global imports aren't executed.
import os # noqa pylint: disable=import-outside-toplevel
build_file = "build.info"
if not os.path.exists(build_file):
raise FileNotFoundError("Build file doesn't exist.")
patch = 0
with open(build_file, 'r') as bfile:
patch = bfile.read().strip()
assert bfile.closed, "Build file wasn't closed properly."
return patch
try:
PATCH = get_build_info()
except FileNotFoundError:
pass
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
# Version of the library.
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
# Version of the file format.
__format_version__ = 2
# Other package info.
__package_name__ = "nvidia-tao-pytorch"
__description__ = "NVIDIA's package for DNN implementation on PyTorch for use with TAO Toolkit."
__keywords__ = "nvidia, tao, pytorch"
__contact_names__ = "Varun Praveen"
__contact_emails__ = "[email protected]"
__license__ = "NVIDIA Proprietary Software"
| tao_pytorch_backend-main | release/python/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| tao_pytorch_backend-main | release/python/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing utility functions required for packaging TLT related modules."""
| tao_pytorch_backend-main | release/python/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for packaging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import compileall
import glob
import os
import setuptools
from torch.utils.cpp_extension import CUDAExtension
# Rename all .py files to .py_tmp temporarily.
ignore_list = ['__init__.py', '__version__.py']
LOCAL_DIR = os.path.dirname(os.path.abspath(__file__))
def up_directory(dir_path, n=1):
"""Go up n directories from dir_path."""
dir_up = dir_path
for _ in range(n):
dir_up = os.path.split(dir_up)[0]
return dir_up
TOP_LEVEL_DIR = up_directory(LOCAL_DIR, 3)
def remove_prefix(dir_path):
"""Remove a certain prefix from path."""
max_path = 8
prefix = dir_path
while max_path > 0:
prefix = os.path.split(prefix)[0]
if prefix.endswith('ai_infra'):
return dir_path[len(prefix) + 1:]
max_path -= 1
return dir_path
def get_subdirs(path):
"""Get all subdirs of given path."""
dirs = os.walk(path)
return [remove_prefix(x[0]) for x in dirs]
def rename_py_files(path, ext, new_ext, ignore_files):
"""Rename all .ext files in a path to .new_ext except __init__ files."""
files = glob.glob(path + '/*' + ext)
for ignore_file in ignore_files:
files = [f for f in files if ignore_file not in f]
for filename in files:
os.rename(filename, filename.replace(ext, new_ext))
def get_version_details():
"""Simple function to get packages for setup.py."""
# Define env paths.
LAUNCHER_SDK_PATH = os.path.join(TOP_LEVEL_DIR, "release/python/")
# Get current __version__.
version_locals = {}
with open(os.path.join(LAUNCHER_SDK_PATH, 'version.py')) as version_file:
exec(version_file.read(), {}, version_locals)
return version_locals
def cleanup():
"""Cleanup directories after the build process."""
req_subdirs = get_subdirs(TOP_LEVEL_DIR)
# Cleanup. Rename all .py_tmp files back to .py and delete pyc files
for dir_path in req_subdirs:
dir_path = os.path.join(TOP_LEVEL_DIR, dir_path)
# TODO: @vpraveen Think about removing python files before the final
# release.
rename_py_files(dir_path, '.py_tmp', '.py', ignore_list)
pyc_list = glob.glob(dir_path + '/*.pyc')
for pyc_file in pyc_list:
os.remove(pyc_file)
def make_cuda_ext(name, module, sources, include_dirs=None, define_macros=None, extra_flags=None):
"""Build cuda extensions for custom ops.
Args:
name (str): Name of the op.
module (str): Name of the module with the op.
source (list): List of source files.
extra_flags (dict): Any extra compile flags.
Returns
cuda_ext (torch.utils.cpp_extension.CUDAExtension): Cuda extension for wheeling.
"""
kwargs = {"extra_compile_args": extra_flags}
if include_dirs:
kwargs["include_dirs"] = [
os.path.join(os.path.relpath(TOP_LEVEL_DIR), *module.split('.'), dir)
for dir in include_dirs
]
if define_macros:
kwargs["define_macros"] = define_macros
cuda_ext = CUDAExtension(
name='%s.%s' % (module, name),
sources=[
os.path.join(os.path.relpath(TOP_LEVEL_DIR), *module.split('.'), src)
for src in sources
],
**kwargs,
)
return cuda_ext
def get_extra_compile_args():
"""Function to get extra compile arguments.
Returns:
extra_compile_args (dict): Dictionary of compile flags.
"""
extra_compile_args = {"cxx": []}
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
return extra_compile_args
def find_packages(package_name):
"""List of packages.
Args:
package_name (str): Name of the package.
Returns:
packages (list): List of packages.
"""
packages = setuptools.find_packages(package_name)
packages = [f"{package_name}.{f}" for f in packages]
packages.append(package_name)
return packages
| tao_pytorch_backend-main | release/python/utils/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Overrides modules from Torch."""
| tao_pytorch_backend-main | third_party/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom onnx modules for Torch."""
| tao_pytorch_backend-main | third_party/onnx/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to export models into the ONNX IR format.
These models can be loaded with the ONNX library and then
converted to models which run on other deep learning frameworks.
"""
from __future__ import annotations
import contextlib
import copy
import inspect
import io
import re
import textwrap
import typing
import warnings
from typing import (
Any,
Callable,
cast,
Collection,
Dict,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
)
import torch
import torch._C._onnx as _C_onnx
import torch.jit._trace
import torch.serialization
from torch import _C
from torch.onnx import ( # noqa: F401
_constants,
_exporter_states,
errors,
symbolic_caffe2,
symbolic_helper,
)
from torch.onnx._globals import GLOBALS
from torch.onnx._internal import (
_beartype,
diagnostics,
jit_utils,
onnx_proto_utils,
registration,
)
__all__ = [
"is_in_onnx_export",
"select_model_mode_for_export",
"disable_apex_o2_state_dict_hook",
"setup_onnx_logging",
"exporter_context",
"export",
"warn_on_static_input_change",
"unpack_quantized_tensor",
"export_to_pretty_string",
"unconvertible_ops",
"register_custom_op_symbolic",
"unregister_custom_op_symbolic",
]
def is_in_onnx_export() -> bool:
"""Returns whether it is in the middle of ONNX export."""
return GLOBALS.in_onnx_export
# TODO(justinchuby): Remove dependency to this global variable from constant_fold.cpp
# Skip check due to cannot import IValue from torch._C
_params_dict = {} # type: ignore[var-annotated]
@contextlib.contextmanager
@_beartype.beartype
def select_model_mode_for_export(model, mode: _C_onnx.TrainingMode):
r"""A context manager to temporarily set the training mode of ``model``
to ``mode``, resetting it when we exit the with-block.
Args:
model: Same type and meaning as ``model`` arg to :func:`export`.
mode: Same type and meaning as ``training`` arg to :func:`export`.
"""
if not isinstance(mode, _C_onnx.TrainingMode):
raise TypeError(
f"'mode' should be a torch.onnx.TrainingMode enum, but got '{type(mode)}'."
)
originally_training: bool = False
if hasattr(model, "training"):
originally_training = model.training
# ONNX opset 12 has better support for training amenable models, with updated
# versions of the dropout and batch_norm operators
if mode == _C_onnx.TrainingMode.TRAINING or (
mode == _C_onnx.TrainingMode.PRESERVE and originally_training
):
GLOBALS.export_training = True
if GLOBALS.export_onnx_opset_version < 12:
warnings.warn(
"You are exporting the model in training mode with onnx opset "
f"version {GLOBALS.export_onnx_opset_version}. "
"Opset versions lower than opset 12 will not be able to export "
"nodes such as Dropout and BatchNorm correctly."
)
else:
GLOBALS.export_training = False
GLOBALS.training_mode = mode
if mode == _C_onnx.TrainingMode.TRAINING:
model.train(True)
elif mode == _C_onnx.TrainingMode.EVAL:
model.train(False)
# else mode == _C_onnx.TrainingMode.PRESERVE, do nothing
try:
yield
finally:
if hasattr(model, "training") and not mode == _C_onnx.TrainingMode.PRESERVE:
model.train(originally_training)
@contextlib.contextmanager
@_beartype.beartype
def disable_apex_o2_state_dict_hook(
model: Union[torch.nn.Module, torch.jit.ScriptFunction]
):
# Apex O2 hook state_dict to return fp16 weights as fp32.
# Exporter cannot identify them as same tensors.
# Since this hook is only used by optimizer, it is safe to
# remove this hook while exporting.
if not isinstance(model, torch.jit.ScriptFunction):
model_hooks = {} # type: ignore[var-annotated]
for module in model.modules():
for key, hook in module._state_dict_hooks.items():
if type(hook).__name__ == "O2StateDictHook":
if module not in model_hooks:
model_hooks[module] = {}
model_hooks[module][key] = hook
if module in model_hooks:
for key in model_hooks[module]:
module._state_dict_hooks.pop(key)
try:
yield
finally:
# Add the hooks back
for module, m_map in model_hooks.items():
for key, hook in m_map.items():
module._state_dict_hooks[key] = hook
else:
try:
yield
finally:
pass
@contextlib.contextmanager
@_beartype.beartype
def setup_onnx_logging(verbose: bool):
is_originally_enabled = torch.onnx.is_onnx_log_enabled()
if is_originally_enabled or verbose:
torch.onnx.enable_log()
try:
yield
finally:
if not is_originally_enabled:
torch.onnx.disable_log()
@contextlib.contextmanager
@_beartype.beartype
def exporter_context(model, mode: _C_onnx.TrainingMode, verbose: bool):
with select_model_mode_for_export(
model, mode
) as mode_ctx, disable_apex_o2_state_dict_hook(
model
) as apex_ctx, setup_onnx_logging(
verbose
) as log_ctx, diagnostics.create_export_diagnostic_context() as diagnostic_ctx:
yield (mode_ctx, apex_ctx, log_ctx, diagnostic_ctx)
@_beartype.beartype
def export(
model: Union[torch.nn.Module, torch.jit.ScriptModule, torch.jit.ScriptFunction],
args: Union[Tuple[Any, ...], torch.Tensor],
f: Union[str, io.BytesIO],
export_params: bool = True,
verbose: bool = False,
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL,
input_names: Optional[Sequence[str]] = None,
output_names: Optional[Sequence[str]] = None,
operator_export_type: _C_onnx.OperatorExportTypes = _C_onnx.OperatorExportTypes.ONNX,
opset_version: Optional[int] = None,
do_constant_folding: bool = True,
dynamic_axes: Optional[
Union[Mapping[str, Mapping[int, str]], Mapping[str, Sequence[int]]]
] = None,
keep_initializers_as_inputs: Optional[bool] = None,
custom_opsets: Optional[Mapping[str, int]] = None,
export_modules_as_functions: Union[bool, Collection[Type[torch.nn.Module]]] = False,
) -> None:
r"""Exports a model into ONNX format.
If ``model`` is not a :class:`torch.jit.ScriptModule` nor a
:class:`torch.jit.ScriptFunction`, this runs
``model`` once in order to convert it to a TorchScript graph to be exported
(the equivalent of :func:`torch.jit.trace`). Thus this has the same limited support
for dynamic control flow as :func:`torch.jit.trace`.
Args:
model (:class:`torch.nn.Module`, :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`):
the model to be exported.
args (tuple or torch.Tensor):
args can be structured either as:
1. ONLY A TUPLE OF ARGUMENTS::
args = (x, y, z)
The tuple should contain model inputs such that ``model(*args)`` is a valid
invocation of the model. Any non-Tensor arguments will be hard-coded into the
exported model; any Tensor arguments will become inputs of the exported model,
in the order they occur in the tuple.
2. A TENSOR::
args = torch.Tensor([1])
This is equivalent to a 1-ary tuple of that Tensor.
3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS::
args = (
x,
{
"y": input_y,
"z": input_z
}
)
All but the last element of the tuple will be passed as non-keyword arguments,
and named arguments will be set from the last element. If a named argument is
not present in the dictionary, it is assigned the default value, or None if a
default value is not provided.
.. note::
If a dictionary is the last element of the args tuple, it will be
interpreted as containing named arguments. In order to pass a dict as the
last non-keyword arg, provide an empty dict as the last element of the args
tuple. For example, instead of::
torch.onnx.export(
model,
(
x,
# WRONG: will be interpreted as named arguments
{y: z}
),
"test.onnx.pb"
)
Write::
torch.onnx.export(
model,
(
x,
{y: z},
{}
),
"test.onnx.pb"
)
f: a file-like object (such that ``f.fileno()`` returns a file descriptor)
or a string containing a file name. A binary protocol buffer will be written
to this file.
export_params (bool, default True): if True, all parameters will
be exported. Set this to False if you want to export an untrained model.
In this case, the exported model will first take all of its parameters
as arguments, with the ordering as specified by ``model.state_dict().values()``
verbose (bool, default False): if True, prints a description of the
model being exported to stdout. In addition, the final ONNX graph will include the
field ``doc_string``` from the exported model which mentions the source code locations
for ``model``. If True, ONNX exporter logging will be turned on.
training (enum, default TrainingMode.EVAL):
* ``TrainingMode.EVAL``: export the model in inference mode.
* ``TrainingMode.PRESERVE``: export the model in inference mode if model.training is
False and in training mode if model.training is True.
* ``TrainingMode.TRAINING``: export the model in training mode. Disables optimizations
which might interfere with training.
input_names (list of str, default empty list): names to assign to the
input nodes of the graph, in order.
output_names (list of str, default empty list): names to assign to the
output nodes of the graph, in order.
operator_export_type (enum, default OperatorExportTypes.ONNX):
* ``OperatorExportTypes.ONNX``: Export all ops as regular ONNX ops
(in the default opset domain).
* ``OperatorExportTypes.ONNX_FALLTHROUGH``: Try to convert all ops
to standard ONNX ops in the default opset domain. If unable to do so
(e.g. because support has not been added to convert a particular torch op to ONNX),
fall back to exporting the op into a custom opset domain without conversion. Applies
to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_
as well as ATen ops. For the exported model to be usable, the runtime must support
these non-standard ops.
* ``OperatorExportTypes.ONNX_ATEN``: All ATen ops (in the TorchScript namespace "aten")
are exported as ATen ops (in opset domain "org.pytorch.aten").
`ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so
this instructs the runtime to use PyTorch's implementation of these ops.
.. warning::
Models exported this way are probably runnable only by Caffe2.
This may be useful if the numeric differences in implementations of operators are
causing large differences in behavior between PyTorch and Caffe2 (which is more
common on untrained models).
* ``OperatorExportTypes.ONNX_ATEN_FALLBACK``: Try to export each ATen op
(in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so
(e.g. because support has not been added to convert a particular torch op to ONNX),
fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for
context.
For example::
graph(%0 : Float):
%3 : int = prim::Constant[value=0]()
# conversion unsupported
%4 : Float = aten::triu(%0, %3)
# conversion supported
%5 : Float = aten::mul(%4, %0)
return (%5)
Assuming ``aten::triu`` is not supported in ONNX, this will be exported as::
graph(%0 : Float):
%1 : Long() = onnx::Constant[value={0}]()
# not converted
%2 : Float = aten::ATen[operator="triu"](%0, %1)
# converted
%3 : Float = onnx::Mul(%2, %0)
return (%3)
If PyTorch was built with Caffe2 (i.e. with ``BUILD_CAFFE2=1``), then
Caffe2-specific behavior will be enabled, including special support
for ops are produced by the modules described in
`Quantization <https://pytorch.org/docs/stable/quantization.html>`_.
.. warning::
Models exported this way are probably runnable only by Caffe2.
opset_version (int, default 14): The version of the
`default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md>`_
to target. Must be >= 7 and <= 16.
do_constant_folding (bool, default True): Apply the constant-folding optimization.
Constant-folding will replace some of the ops that have all constant inputs
with pre-computed constant nodes.
dynamic_axes (dict[string, dict[int, string]] or dict[string, list(int)], default empty dict):
By default the exported model will have the shapes of all input and output tensors
set to exactly match those given in ``args``. To specify axes of tensors as
dynamic (i.e. known only at run-time), set ``dynamic_axes`` to a dict with schema:
* KEY (str): an input or output name. Each name must also be provided in ``input_names`` or
``output_names``.
* VALUE (dict or list): If a dict, keys are axis indices and values are axis names. If a
list, each element is an axis index.
For example::
class SumModule(torch.nn.Module):
def forward(self, x):
return torch.sum(x, dim=1)
torch.onnx.export(
SumModule(),
(torch.ones(2, 2),),
"onnx.pb",
input_names=["x"],
output_names=["sum"]
)
Produces::
input {
name: "x"
...
shape {
dim {
dim_value: 2 # axis 0
}
dim {
dim_value: 2 # axis 1
...
output {
name: "sum"
...
shape {
dim {
dim_value: 2 # axis 0
...
While::
torch.onnx.export(
SumModule(),
(torch.ones(2, 2),),
"onnx.pb",
input_names=["x"],
output_names=["sum"],
dynamic_axes={
# dict value: manually named axes
"x": {0: "my_custom_axis_name"},
# list value: automatic names
"sum": [0],
}
)
Produces::
input {
name: "x"
...
shape {
dim {
dim_param: "my_custom_axis_name" # axis 0
}
dim {
dim_value: 2 # axis 1
...
output {
name: "sum"
...
shape {
dim {
dim_param: "sum_dynamic_axes_1" # axis 0
...
keep_initializers_as_inputs (bool, default None): If True, all the
initializers (typically corresponding to parameters) in the
exported graph will also be added as inputs to the graph. If False,
then initializers are not added as inputs to the graph, and only
the non-parameter inputs are added as inputs.
This may allow for better optimizations (e.g. constant folding) by
backends/runtimes.
If ``opset_version < 9``, initializers MUST be part of graph
inputs and this argument will be ignored and the behavior will be
equivalent to setting this argument to True.
If None, then the behavior is chosen automatically as follows:
* If ``operator_export_type=OperatorExportTypes.ONNX``, the behavior is equivalent
to setting this argument to False.
* Else, the behavior is equivalent to setting this argument to True.
custom_opsets (dict[str, int], default empty dict): A dict with schema:
* KEY (str): opset domain name
* VALUE (int): opset version
If a custom opset is referenced by ``model`` but not mentioned in this dictionary,
the opset version is set to 1. Only custom opset domain name and version should be
indicated through this argument.
export_modules_as_functions (bool or set of type of nn.Module, default False): Flag to enable
exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the
particular types of modules to export as local functions in ONNX.
This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because
``opset_version`` < 15 implies IR version < 8, which means no local function support.
Module variables will be exported as function attributes. There are two categories of function
attributes.
1. Annotated attributes: class variables that have type annotations via
`PEP 526-style <https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations>`_
will be exported as attributes.
Annotated attributes are not used inside the subgraph of ONNX local function because
they are not created by PyTorch JIT tracing, but they may be used by consumers
to determine whether or not to replace the function with a particular fused kernel.
2. Inferred attributes: variables that are used by operators inside the module. Attribute names
will have prefix "inferred::". This is to differentiate from predefined attributes retrieved from
python module annotations. Inferred attributes are used inside the subgraph of ONNX local function.
* ``False`` (default): export ``nn.Module`` forward calls as fine grained nodes.
* ``True``: export all ``nn.Module`` forward calls as local function nodes.
* Set of type of nn.Module: export ``nn.Module`` forward calls as local function nodes,
only if the type of the ``nn.Module`` is found in the set.
Raises:
:class:`torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph.
:class:`torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it
uses an operator that is not supported by the exporter.
:class:`torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export.
All errors are subclasses of :class:`errors.OnnxExporterError`.
"""
_export(
model,
args,
f,
export_params,
verbose,
training,
input_names,
output_names,
operator_export_type=operator_export_type,
opset_version=opset_version,
do_constant_folding=do_constant_folding,
dynamic_axes=dynamic_axes,
keep_initializers_as_inputs=keep_initializers_as_inputs,
custom_opsets=custom_opsets,
export_modules_as_functions=export_modules_as_functions,
)
@_beartype.beartype
def _is_constant_tensor_list(node):
if node.kind() != "prim::Constant":
return False
output_type = node.output().type()
if output_type.isSubtypeOf(_C.ListType.ofTensors()):
return True
if output_type.isSubtypeOf(_C.ListType(_C.OptionalType.ofTensor())):
return True
# ONNX can't handle constants that are lists of tensors, which can
# get generated in constant prop. So we split them back into prim::ListConstructs
@_beartype.beartype
def _split_tensor_list_constants(g, block):
for node in block.nodes():
for subblock in node.blocks():
_split_tensor_list_constants(g, subblock)
if _is_constant_tensor_list(node):
inputs = []
for val in node.output().toIValue():
input = g.insertConstant(val)
input.node().moveBefore(node)
input.node().copyMetadata(node)
inputs.append(input)
lc = (
g.create("prim::ListConstruct", inputs)
.insertBefore(node)
.output()
.setType(_C.ListType.ofTensors())
)
lc.node().copyMetadata(node)
node.output().replaceAllUsesWith(lc)
@_beartype.beartype
def _optimize_graph(
graph: _C.Graph,
operator_export_type: _C_onnx.OperatorExportTypes,
_disable_torch_constant_prop: bool = False,
fixed_batch_size: bool = False,
params_dict=None,
dynamic_axes=None,
input_names=None,
module=None,
):
if params_dict is None:
params_dict = {}
# Inline everything
_C._jit_pass_inline(graph)
# Remove fork/wait nodes
_C._jit_pass_inline_fork_wait(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_onnx_autograd_function_process(graph)
_C._jit_pass_lower_all_tuples(graph)
# we now record some ops like ones/zeros
# into a trace where we previously recorded constants.
# use constant prop to maintain our current level of onnx support
# without implementing symbolics for all of them
if _disable_torch_constant_prop is False:
_C._jit_pass_constant_propagation(graph)
_split_tensor_list_constants(graph, graph)
# run dce to eliminate dead parts of the graph that might have been
# left behind by things like symbolic_override
_C._jit_pass_dce(graph)
_C._jit_pass_lint(graph)
# CSE should improve perf when Autocast is used with disabled cache
# Autocast is disabled due to a limitation on tracer as described at https://github.com/pytorch/pytorch/issues/84092
# Must run before _C._jit_pass_erase_number_types to prevent type substitution
if _C._jit_pass_cse(graph):
_C._jit_pass_onnx_lint(graph)
_C._jit_pass_canonicalize_graph_fuser_ops(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_peephole(graph, True)
_C._jit_pass_fuse_addmm(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_peephole(graph, True)
_C._jit_pass_lower_all_tuples(graph)
# in _jit_pass_onnx, symbolic functions are called for each node for conversion.
# However, there are nodes that cannot be converted without additional context.
# For example, the number of outputs from split (and whether it is static or dynamic) is unknown
# until the point where it is unpacked by listUnpack node.
# This pass does a preprocess, and prepares the nodes such that enough context can be received
# by the symbolic function.
_C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph, module)
_C._jit_pass_onnx_preprocess(graph)
# onnx does not support tuples, so try to remove them
_C._jit_pass_lint(graph)
# onnx only supports tensors, but 1 / 2 = 0.5 and tensor(1) / tensor(2) = 0
_C._jit_pass_prepare_division_for_onnx(graph)
_C._jit_pass_onnx_remove_print(graph)
_C._jit_pass_onnx_preprocess_caffe2(graph)
symbolic_helper._quantized_ops.clear()
# Unpack quantized weights for conv and linear ops and insert into graph.
_C._jit_pass_onnx_unpack_quantized_weights(
graph, params_dict, symbolic_helper.is_caffe2_aten_fallback()
)
if symbolic_helper.is_caffe2_aten_fallback():
# Insert permutes before and after each conv op to ensure correct order.
_C._jit_pass_onnx_quantization_insert_permutes(graph, params_dict)
# Find consecutive permutes that are no-ops and remove them.
_C._jit_pass_custom_pattern_based_rewrite_graph(
textwrap.dedent(
"""\
graph(%Pi):
%Pq = quantized::nhwc2nchw(%Pi)
%Pr = quantized::nchw2nhwc(%Pq)
return (%Pr)"""
),
textwrap.dedent(
"""\
graph(%Ri):
return (%Ri)"""
),
graph,
)
# onnx only supports tensors, so we turn all out number types into tensors
_C._jit_pass_erase_number_types(graph)
if GLOBALS.onnx_shape_inference:
input_names = [] if input_names is None else input_names
dynamic_axes = {} if dynamic_axes is None else dynamic_axes
_C._jit_pass_onnx_set_dynamic_input_shape(graph, dynamic_axes, input_names)
_C._jit_pass_onnx_lint(graph)
graph = _C._jit_pass_onnx(graph, operator_export_type)
_C._jit_pass_onnx_lint(graph)
_C._jit_pass_lint(graph)
_C._jit_pass_onnx_scalar_type_analysis(
graph, True, GLOBALS.export_onnx_opset_version
)
_C._jit_pass_lint(graph)
_C._jit_pass_onnx_peephole(
graph, GLOBALS.export_onnx_opset_version, fixed_batch_size
)
_C._jit_pass_lint(graph)
# graph is not a valid jit graph anymore because types have been replaced
# (e.g. int with Tensor), so it now contains operators that don't actually
# exist. We can't run normal dead code elimination because it'd fail trying
# to look up if an operator has side effects, but we can run a dead code
# elimination variant that doesn't need to look up if an op has side effects.
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
_C._jit_pass_lint(graph)
graph = _C._jit_pass_canonicalize(graph)
_C._jit_pass_lint(graph)
if GLOBALS.onnx_shape_inference:
_C._jit_pass_onnx_graph_shape_type_inference(
graph, params_dict, GLOBALS.export_onnx_opset_version
)
return graph
@_beartype.beartype
def warn_on_static_input_change(input_states):
"""Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph.
We accept dictionaries and strings as ONNX inputs, but they should be only for
configuration use. we detect here if these inputs are modified, and if so we warn
the user that the changes won't take effect in the traced ONNX graph.
"""
for input, traced_input in zip(input_states[0], input_states[1]):
if isinstance(input, dict):
if list(input.keys()) != list(traced_input.keys()):
warning = (
"We detected that you are modifying a dictionary that is an input to your "
"model. "
"Note that dictionaries are allowed as inputs in ONNX but they should be "
"handled with care. "
"Usages of dictionaries is not recommended, and should not be used except "
"for configuration use. "
"Also note that the order and values of the keys must remain the same. "
)
warnings.warn(warning)
elif isinstance(input, str):
if input != traced_input:
warning = (
"The model seems to have string inputs/outputs. "
"Note that strings will not appear as inputs/outputs of the ONNX graph. "
)
warnings.warn(warning)
@_beartype.beartype
def _resolve_args_by_export_type(arg_name, arg_value, operator_export_type):
"""Resolves the arguments that are ignored when export_type != operator_export_type.ONNX."""
if (
operator_export_type is not operator_export_type.ONNX
and _C_onnx._CAFFE2_ATEN_FALLBACK
):
if arg_value is True:
warnings.warn(
f"'{arg_name}' can be set to True only when 'operator_export_type' is "
"`ONNX`. Since 'operator_export_type' is not set to 'ONNX', "
f"'{arg_name}' argument will be ignored."
)
arg_value = False
return arg_value
@_beartype.beartype
def _decide_keep_init_as_input(
keep_initializers_as_inputs: Optional[bool],
operator_export_type: _C_onnx.OperatorExportTypes,
opset_version: int,
):
"""Decides whether the initializers in the graph should be listed as ONNX graph inputs.
This method encapsulates the logic to decide whether the initializers in the graph
should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4).
If keep_initializers_as_inputs is not specified (None), then we decide whether to keep
initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type
is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other
export types keep initializers as input (val_keep_init_as_ip=True).
If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8,
in which case it must be ignored because for opset version <= 8, all initializers MUST be
part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True.
Special handling is needed for opset version 8 or lower, because irrespective
of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3
semantics, i.e. all initializers must be listed as ONNX graph input.
"""
if opset_version < 9:
if keep_initializers_as_inputs is False:
warnings.warn(
"Setting 'keep_initializers_as_inputs=False' for opset version"
"8 or lower would lead to an invalid ONNX graph. Therefore, "
"'keep_initializers_as_inputs=False' is ignored during export."
"Exported model will have initializers as graph inputs (compliant "
" to ONNX IR v3)."
)
return True # i.e. True == initializers are part of graph input (ONNX IR v3)
val_keep_init_as_ip = (
True if keep_initializers_as_inputs is None else keep_initializers_as_inputs
)
if (
keep_initializers_as_inputs is None
and operator_export_type is _C_onnx.OperatorExportTypes.ONNX
):
val_keep_init_as_ip = False
return val_keep_init_as_ip
@_beartype.beartype
def _decide_add_node_names(add_node_names, operator_export_type):
return _resolve_args_by_export_type(
"add_node_names", add_node_names, operator_export_type
)
@_beartype.beartype
def _decide_constant_folding(do_constant_folding, operator_export_type, training):
do_constant_folding = _resolve_args_by_export_type(
"do_constant_folding", do_constant_folding, operator_export_type
)
if do_constant_folding and (
training is not None and training is not _C_onnx.TrainingMode.EVAL
):
warnings.warn(
"It is recommended that constant folding be turned off ('do_constant_folding=False') "
"when exporting the model in training-amenable mode, i.e. with 'training=TrainingMode.TRAIN' "
"or 'training=TrainingMode.PRESERVE' (when model is in training mode). Otherwise, some "
"learnable model parameters may not translate correctly in the exported ONNX model "
"because constant folding mutates model parameters. Please consider "
"turning off constant folding or setting the training=TrainingMode.EVAL."
)
return do_constant_folding
@_beartype.beartype
def _signature(model) -> inspect.Signature:
should_be_callable = getattr(model, "forward", model)
if callable(should_be_callable):
return inspect.signature(should_be_callable)
raise ValueError("model has no forward method and is not callable")
@_beartype.beartype
def _decide_input_format(model, args):
try:
sig = _signature(model)
except ValueError as e:
warnings.warn(f"{e}, skipping _decide_input_format")
return args
try:
ordered_list_keys = list(sig.parameters.keys())
if ordered_list_keys[0] == "self":
ordered_list_keys = ordered_list_keys[1:]
args_dict: Dict = {}
if isinstance(args, list):
args_list = args
elif isinstance(args, tuple):
args_list = list(args)
else:
args_list = [args]
if isinstance(args_list[-1], dict):
args_dict = args_list[-1]
args_list = args_list[:-1]
n_nonkeyword = len(args_list)
for optional_arg in ordered_list_keys[n_nonkeyword:]:
if optional_arg in args_dict:
args_list.append(args_dict[optional_arg])
# Check if this arg has a default value
else:
param = sig.parameters[optional_arg]
if param.default != param.empty:
args_list.append(param.default)
args = args_list if isinstance(args, list) else tuple(args_list)
# Cases of models with no input args
except IndexError:
warnings.warn("No input args, skipping _decide_input_format")
except Exception as e:
warnings.warn(f"Skipping _decide_input_format\n {e.args[0]}")
return args
@_beartype.beartype
def _trace(func, args, operator_export_type, return_outs=False):
# Special case for common case of passing a single Tensor
if isinstance(args, torch.Tensor):
args = (args,)
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(
func,
args,
strict=False,
_force_outplace=False,
_return_inputs_states=True,
)
warn_on_static_input_change(inputs_states)
trace_graph = _optimize_graph(trace_graph, operator_export_type, params_dict={})
if return_outs:
return trace_graph, torch_out
return trace_graph
@_beartype.beartype
def _trace_and_get_graph_from_model(model, args):
# A basic sanity check: make sure the state_dict keys are the same
# before and after running the model. Fail fast!
orig_state_dict_keys = torch.jit._unique_state_dict(model).keys()
# Disable Autocast cache because it replaces kernel's weight and bias
# by (undesired) constants.
# No perf impact for when there are reused weights since https://github.com/pytorch/pytorch/pull/85665
# TODO: https://github.com/pytorch/pytorch/issues/84092
prev_autocast_cache_enabled = torch.is_autocast_cache_enabled()
torch.set_autocast_cache_enabled(False)
trace_graph, torch_out, inputs_states = torch.jit._get_trace_graph(
model,
args,
strict=False,
_force_outplace=False,
_return_inputs_states=True,
)
torch.set_autocast_cache_enabled(prev_autocast_cache_enabled)
warn_on_static_input_change(inputs_states)
if orig_state_dict_keys != torch.jit._unique_state_dict(model).keys():
raise RuntimeError(
"state_dict changed after running the tracer; "
"something weird is happening in your model!"
)
return trace_graph, torch_out
@_beartype.beartype
def _get_param_count_list(method_graph, args_params):
param_count_list = []
for input_, arg_params_ in zip(method_graph.inputs(), args_params):
if "PackedParams" in str(input_.type()):
in_vars, _ = torch.jit._flatten(arg_params_)
param_count_list.append(len(in_vars))
else:
param_count_list.append(arg_params_ is not None)
return param_count_list
@_beartype.beartype
def _check_flatten_did_not_remove(original, jit_flattened):
"""torch.jit._flatten removes None. Check if it did so in this case."""
@_beartype.beartype
def flatten(x):
if isinstance(x, (list, tuple)):
for inner in x:
yield from flatten(inner)
elif isinstance(x, dict):
for inner in x.values():
yield from flatten(inner)
else:
yield x
flattened_with_none = list(flatten(original))
num_none = len(flattened_with_none) - len(jit_flattened)
assert num_none >= 0
if num_none:
raise ValueError(
f"args contained {num_none} None's after flattening. "
"When exporting a ScriptModule or ScriptFunction, no args may "
"be None because that breaks type propagation."
)
def _create_jit_graph(
model: Union[torch.nn.Module, torch.jit.ScriptFunction], args: Sequence[Any]
) -> Tuple[_C.Graph, List[_C.IValue], Optional[Any], Optional[_C.ScriptModule]]:
if isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule)):
flattened_args = tuple(torch.jit._flatten(tuple(args))[0])
_check_flatten_did_not_remove(args, flattened_args)
torch_out = None
if isinstance(model, torch.jit.ScriptModule):
try:
graph = model.forward.graph
except AttributeError as e:
raise RuntimeError("'forward' method must be a script method") from e
_C._jit_pass_onnx_function_substitution(graph)
freezed_module = _C._freeze_module(
cast(_C.ScriptModule, model._c), preserveParameters=True
)
module, params = _C._jit_onnx_list_model_parameters(freezed_module)
method_graph = module._get_method("forward").graph
args_params = tuple(args) + tuple(params)
param_count_list = _get_param_count_list(method_graph, args_params)
in_vars, _ = torch.jit._flatten(args_params)
graph = _C._propagate_and_assign_input_shapes(
method_graph, tuple(in_vars), param_count_list, False, False
)
return graph, params, torch_out, module
# torch.jit.ScriptFunction
params = []
graph = model.graph
_C._jit_pass_onnx_function_substitution(graph)
param_count_list = _get_param_count_list(graph, args)
graph = _C._propagate_and_assign_input_shapes(
graph, flattened_args, param_count_list, False, False
)
return graph, params, torch_out, None
graph, torch_out = _trace_and_get_graph_from_model(model, args)
_C._jit_pass_onnx_lint(graph)
state_dict = torch.jit._unique_state_dict(model)
params = list(state_dict.values())
graph_inputs = list(graph.inputs())
user_input_num = len(graph_inputs) - len(state_dict)
param_names = list(state_dict.keys())
for i, inp in enumerate(graph_inputs):
if i >= user_input_num:
inp.setDebugName(param_names[i - user_input_num])
_C._jit_pass_onnx_function_substitution(graph)
return graph, params, torch_out, None
@_beartype.beartype
def _get_named_param_dict(graph, params):
input_and_param_names = [val.debugName() for val in graph.inputs()]
param_names = input_and_param_names[len(input_and_param_names) - len(params) :]
_params_dict = dict(zip(param_names, params))
return _params_dict
@_beartype.beartype
def _get_example_outputs(model, args):
input_args = copy.deepcopy(args)
input_kwargs = {}
if input_args and isinstance(input_args[-1], dict):
input_kwargs = input_args[-1]
input_args = input_args[:-1]
example_outputs = model(*input_args, **input_kwargs)
if isinstance(example_outputs, list):
example_outputs = [example_outputs]
elif not isinstance(example_outputs, tuple):
example_outputs = (example_outputs,)
return example_outputs
_qtype_vtype_map = {
torch.quint8: torch.uint8,
torch.qint8: torch.int8,
torch.qint32: torch.int32,
torch.quint4x2: torch.int8,
}
@_beartype.beartype
def unpack_quantized_tensor(value, cast_onnx_accepted=True):
if isinstance(value, torch.Tensor) and value.dtype in _qtype_vtype_map:
q_value_dequantize = value.dequantize()
q_scale = (
torch.tensor(value.q_scale(), dtype=torch.double)
if cast_onnx_accepted
else torch.tensor(value.q_scale(), dtype=torch.float32)
)
q_zero_point = (
torch.tensor(value.q_zero_point(), dtype=torch.int64)
if cast_onnx_accepted
else torch.tensor(value.q_zero_point(), dtype=_qtype_vtype_map[value.dtype])
)
q_value = q_value_dequantize / q_scale + q_zero_point
q_value = q_value.to(dtype=_qtype_vtype_map[value.dtype])
return q_value, q_scale, q_zero_point
else:
return (value,)
@_beartype.beartype
def _pre_trace_quant_model(model, args):
r"""Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return
original model.
This is due to https://github.com/pytorch/pytorch/issues/75761.
"""
if any(
hasattr(m, "_packed_params") for m in getattr(model, "modules", lambda: [])()
) or any(getattr(arg, "is_quantized", False) for arg in args):
return torch.jit.trace(model, args)
return model
@_beartype.beartype
def _model_to_graph(
model,
args,
verbose=False,
input_names=None,
output_names=None,
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
do_constant_folding=True,
_disable_torch_constant_prop=False,
fixed_batch_size=False,
training=_C_onnx.TrainingMode.EVAL,
dynamic_axes=None,
) -> Tuple[
_C.Graph,
Dict[str, torch.Tensor],
Optional[
Union[
torch.Tensor,
Tuple[torch.Tensor, ...],
List[torch.Tensor],
Dict[str, torch.Tensor],
Any, # Can be nested tuples etc.
]
],
]:
"""Converts model into an ONNX graph.
Returns:
graph: A TorchScript IR Graph with ONNX nodes.
params_dict: Dict from input param name to param value.
torch_out: The output tensors resulting from the trace of ``model``.
If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`,
this will be None, since we are not doing any tracing.
"""
# TODO: can we simplify this to always return a tuple of Tensor or None?
# Special case for common case of passing a single Tensor
if isinstance(args, (torch.Tensor, int, float, bool)):
args = (args,)
model = _pre_trace_quant_model(model, args)
graph, params, torch_out, module = _create_jit_graph(model, args)
params_dict = _get_named_param_dict(graph, params)
try:
graph = _optimize_graph(
graph,
operator_export_type,
_disable_torch_constant_prop=_disable_torch_constant_prop,
fixed_batch_size=fixed_batch_size,
params_dict=params_dict,
dynamic_axes=dynamic_axes,
input_names=input_names,
module=module,
)
except Exception as e:
torch.onnx.log("Torch IR graph at exception: ", graph)
raise
is_script = isinstance(model, (torch.jit.ScriptFunction, torch.jit.ScriptModule))
if is_script:
example_outputs = _get_example_outputs(model, args)
example_outputs_final = ()
for example_output in example_outputs:
example_outputs_final += unpack_quantized_tensor(example_output)
out_vars, desc = torch.jit._flatten(example_outputs_final)
_C._jit_pass_onnx_assign_output_shape(
graph, out_vars, desc, GLOBALS.onnx_shape_inference, is_script
)
# NB: ONNX requires complete information about output types, which might be
# erased by some optimizations, so we need to set it explicitly again.
else:
if not isinstance(torch_out, (list, tuple)):
output_wrapped = [torch_out]
else:
output_wrapped = torch_out # type: ignore[assignment]
output_tensors, out_desc = torch.jit._flatten(tuple(output_wrapped))
# assign_output_shape pass is not compatible with quantized outputs.
# Quantized outputs are flattened to 3 values in ONNX, while packed as
# single value in PyTorch.
if not any(getattr(out, "is_quantized", False) for out in output_tensors):
_C._jit_pass_onnx_assign_output_shape(
graph,
output_tensors,
out_desc,
GLOBALS.onnx_shape_inference,
is_script,
)
_set_input_and_output_names(graph, input_names, output_names)
params_dict = _get_named_param_dict(graph, params)
if training is None or training == _C_onnx.TrainingMode.EVAL:
params_dict = _C._jit_pass_onnx_eval_peephole(graph, params_dict)
if (
do_constant_folding
and GLOBALS.export_onnx_opset_version
>= _constants.ONNX_CONSTANT_FOLDING_MIN_OPSET
):
params_dict = _C._jit_pass_onnx_constant_fold(
graph, params_dict, GLOBALS.export_onnx_opset_version
)
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
if GLOBALS.onnx_shape_inference:
_C._jit_pass_onnx_graph_shape_type_inference(
graph, params_dict, GLOBALS.export_onnx_opset_version
)
params_dict = _C._jit_pass_onnx_eliminate_unused_items(graph, params_dict)
# For ONNX opset < 9, constants only have three data types: float16, float, double.
# In this pass transform constants of other data types to float/double + cast operator.
if GLOBALS.export_onnx_opset_version < 9:
_C._jit_pass_onnx_cast_all_constant_to_floating(graph)
params_dict = _C._jit_pass_filter_non_tensor_arguments(params_dict)
_C._jit_decay_packed_param_input_types(graph)
# If output names lack a proper name and are identified only by their unique
# give them a legible name for debugging purposes
_apply_friendly_debug_names(graph, params_dict)
return graph, params_dict, torch_out
@_beartype.beartype
def export_to_pretty_string(
model,
args,
export_params=True,
verbose=False,
training=_C_onnx.TrainingMode.EVAL,
input_names=None,
output_names=None,
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
export_type=None,
google_printer=False,
opset_version=None,
keep_initializers_as_inputs=None,
custom_opsets=None,
add_node_names=True,
do_constant_folding=True,
dynamic_axes=None,
):
r"""
Similar to :func:`export`, but returns a text representation of the ONNX
model. Only differences in args listed below. All other args are the same
as :func:`export`.
Args:
add_node_names (bool, default True): Whether or not to set
NodeProto.name. This makes no difference unless
``google_printer=True``.
google_printer (bool, default False): If False, will return a custom,
compact representation of the model. If True will return the
protobuf's `Message::DebugString()`, which is more verbose.
Returns:
A UTF-8 str containing a human-readable representation of the ONNX model.
"""
if opset_version is None:
opset_version = _constants.ONNX_DEFAULT_OPSET
if custom_opsets is None:
custom_opsets = {}
GLOBALS.export_onnx_opset_version = opset_version
GLOBALS.operator_export_type = operator_export_type
with exporter_context(model, training, verbose):
val_keep_init_as_ip = _decide_keep_init_as_input(
keep_initializers_as_inputs, operator_export_type, opset_version
)
val_add_node_names = _decide_add_node_names(
add_node_names, operator_export_type
)
val_do_constant_folding = _decide_constant_folding(
do_constant_folding, operator_export_type, training
)
args = _decide_input_format(model, args)
graph, params_dict, torch_out = _model_to_graph(
model,
args,
verbose,
input_names,
output_names,
operator_export_type,
val_do_constant_folding,
training=training,
dynamic_axes=dynamic_axes,
)
return graph._pretty_print_onnx( # type: ignore[attr-defined]
params_dict,
opset_version,
False,
operator_export_type,
google_printer,
val_keep_init_as_ip,
custom_opsets,
val_add_node_names,
)
@_beartype.beartype
def unconvertible_ops(
model,
args,
training: _C_onnx.TrainingMode = _C_onnx.TrainingMode.EVAL,
opset_version: Optional[int] = None,
) -> Tuple[_C.Graph, List[str]]:
"""Returns an approximated list of all ops that are yet supported by :mod:`torch.onnx`.
The list is approximated because some ops may be removed during the conversion
process and don't need to be converted. Some other ops may have partial support
that will fail conversion with particular inputs. Please open a Github Issue
for op support requests.
Args:
model: Same as the `model` parameter in :func:`torch.onnx.export`.
args: Same as the `args` parameter in :func:`torch.onnx.export`.
training: Same as the `training` parameter in :func:`torch.onnx.export`.
opset_version: Same as the `opset_version` parameter in :func:`torch.onnx.export`.
Returns:
The JIT graph and a list of unconvertible ops in the format of "domain::op".
"""
opset_version = opset_version or _constants.ONNX_DEFAULT_OPSET
GLOBALS.export_onnx_opset_version = opset_version
try:
with exporter_context(model, training, verbose=False):
# Create a mostly clean JIT graph that contains the plain aten and
# other ops we can check with the symbolic registry.
# NOTE: We don't want to actually convert any ops to ONNX or run any
# symbolic functions because there is a higher chance that a pass
# fails or an unconvertible op messes up the graph during ONNX conversion.
# This way we can always generate a list just by looking at the names
# of the ops in the graph.
args = _decide_input_format(model, args)
model = _pre_trace_quant_model(model, args)
graph, _, _, module = _create_jit_graph(model, args)
_C._jit_pass_inline(graph)
_C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph, module)
_C._jit_pass_erase_number_types(graph)
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
except Exception as e:
raise errors.OnnxExporterError(
"Failed to discover unconvertible ops because of errors during the JIT graph "
"generation process."
) from e
unsupported_ops = []
for node in graph.nodes():
domain_op = node.kind()
if domain_op.startswith("onnx::") or domain_op.startswith("prim::"):
# We consider onnx and prim ops as supported ops, even though some "prim"
# ops are not implemented as symbolic functions, because they may be
# eliminated in the conversion passes. Users may still see errors caused
# by prim ops even though they don't show up in the list.
continue
if not registration.registry.is_registered_op(
domain_op.rstrip("_"), opset_version
):
# We consider all registered ops supported, even though some of them are
# only partially supported, because there is not yet a good way to check
# if an op is fully supported.
# TODO(justinchuby): Create a way to check if an op is fully supported.
unsupported_ops.append(domain_op)
return graph, unsupported_ops
@_beartype.beartype
def _setup_trace_module_map(
model: Union[torch.nn.Module, torch.jit.ScriptModule],
export_modules_as_functions: Union[bool, Collection[Type[torch.nn.Module]]],
) -> Set[str]:
def __register_attribute_hook():
attr_name = "_onnx_attrs"
def _track_module_attributes_forward_pre_hook(module, input):
setattr(module, attr_name, _get_module_attributes(module))
def _track_module_attributes_forward_hook(module, input, output):
tracing_state = _C._get_tracing_state()
if not tracing_state:
return
graph = tracing_state.graph()
onnx_attrs = {}
if hasattr(module, attr_name):
onnx_attrs = getattr(module, attr_name)
delattr(module, attr_name)
_C._jit_pass_onnx_track_scope_attributes(graph, onnx_attrs)
for m in model.modules():
m.register_forward_hook(_track_module_attributes_forward_hook)
m.register_forward_pre_hook(_track_module_attributes_forward_pre_hook)
def _unqualified_variable_name(qualified_name: str) -> str:
"""
Parse qualified variable name and return the unqualified version.
Pure numeric atoms are considered inadequate, so this function will look past them,
and start from the first non-numeric atom.
Example:
>>> _unqualified_variable_name('__main__.Foo.bar')
'bar'
>>> _unqualified_variable_name('__main__.Foo.bar.0')
'bar.0'
"""
name_atoms = qualified_name.split(".")
for i, atom in reversed(list(enumerate(name_atoms))):
if not atom.isnumeric():
return ".".join(name_atoms[i:])
return qualified_name
trace_module_map = {
_m: torch._C._jit_onnx_create_full_scope_name(
torch.typename(type(_m)), _unqualified_variable_name(_n)
)
for _n, _m in model.named_modules()
}
torch.jit._trace._trace_module_map = trace_module_map
if isinstance(export_modules_as_functions, bool) and export_modules_as_functions:
module_typenames = {torch.typename(type(module)) for module in trace_module_map}
elif isinstance(export_modules_as_functions, set) and export_modules_as_functions:
def _find_typename(v):
if isinstance(v, type):
return torch.typename(v)
else:
raise RuntimeError(
"Only type of the `nn.Module` should be "
"passed in the set for argument `export_modules_as_functions`. "
"Got `%s`." % (type(v).__name__)
)
module_typenames = {_find_typename(v) for v in export_modules_as_functions}
else:
module_typenames = set()
if module_typenames:
__register_attribute_hook()
return module_typenames
@_beartype.beartype
def _reset_trace_module_map():
torch.jit._trace._trace_module_map = None
_C._jit_pass_onnx_clear_scope_records()
@_beartype.beartype
def _get_module_attributes(module):
annotations = typing.get_type_hints(type(module))
base_m_annotations = typing.get_type_hints(torch.nn.Module)
[annotations.pop(k, None) for k in base_m_annotations]
return {k: getattr(module, k) for k in annotations}
@_beartype.beartype
def _export(
model,
args,
f,
export_params=True,
verbose=False,
training=_C_onnx.TrainingMode.EVAL,
input_names=None,
output_names=None,
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
export_type=None,
opset_version=None,
do_constant_folding=True,
dynamic_axes=None,
keep_initializers_as_inputs=None,
fixed_batch_size=False,
custom_opsets=None,
add_node_names=True,
onnx_shape_inference=True,
export_modules_as_functions=False,
):
assert GLOBALS.in_onnx_export is False
if export_type is None:
export_type = _exporter_states.ExportTypes.PROTOBUF_FILE
if isinstance(model, torch.nn.DataParallel):
raise ValueError(
"torch.nn.DataParallel is not supported by ONNX "
"exporter, please use 'attribute' module to "
"unwrap model from torch.nn.DataParallel. Try "
"torch.onnx.export(model.module, ...)"
)
GLOBALS.onnx_shape_inference = onnx_shape_inference
if opset_version is None:
opset_version = _constants.ONNX_DEFAULT_OPSET
if export_modules_as_functions and opset_version < 15:
raise ValueError(
"`export_modules_as_functions` is not supported for `opset_version` < 15."
"This is because `opset_version` < 15 implies IR version < 8, which means "
"no local function support. "
)
if not operator_export_type:
if _C_onnx._CAFFE2_ATEN_FALLBACK:
operator_export_type = _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
else:
operator_export_type = _C_onnx.OperatorExportTypes.ONNX
# By default, training=TrainingMode.EVAL,
# which is good because running a model in training mode could result in
# internal buffers getting updated, dropout getting applied, etc.
# If you really know what you're doing, you can turn
# training=TrainingMode.TRAINING or training=TrainingMode.PRESERVE,
# (to preserve whatever the original training mode was.)
GLOBALS.export_onnx_opset_version = opset_version
GLOBALS.operator_export_type = operator_export_type
try:
GLOBALS.in_onnx_export = True
module_typenames_to_export_as_functions: Set[str] = set()
if isinstance(model, (torch.nn.Module, torch.jit.ScriptModule)):
module_typenames_to_export_as_functions = _setup_trace_module_map(
model, export_modules_as_functions
)
with exporter_context(model, training, verbose):
val_keep_init_as_ip = _decide_keep_init_as_input(
keep_initializers_as_inputs,
operator_export_type,
opset_version,
)
val_add_node_names = _decide_add_node_names(
add_node_names, operator_export_type
)
val_do_constant_folding = _decide_constant_folding(
do_constant_folding, operator_export_type, training
)
# Normally f can be a file-like object, but for large models, the external data format requires a
# valid `model_file_location`. Code in export.cpp will enforce this.
if isinstance(f, str):
model_file_location = f
else:
model_file_location = ""
args = _decide_input_format(model, args)
if dynamic_axes is None:
dynamic_axes = {}
_validate_dynamic_axes(dynamic_axes, model, input_names, output_names)
graph, params_dict, torch_out = _model_to_graph(
model,
args,
verbose,
input_names,
output_names,
operator_export_type,
val_do_constant_folding,
fixed_batch_size=fixed_batch_size,
training=training,
dynamic_axes=dynamic_axes,
)
# TODO: Don't allocate a in-memory string for the protobuf
defer_weight_export = (
export_type is not _exporter_states.ExportTypes.PROTOBUF_FILE
)
if custom_opsets is None:
custom_opsets = {}
_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)
node_attr_to_name = {} # type: ignore[var-annotated]
if module_typenames_to_export_as_functions:
# NOTE: cannot call DCE after this pass. DCE will remove function definition nodes.
node_attr_to_name = _C._jit_pass_onnx_function_extraction(
graph,
module_typenames_to_export_as_functions,
list(params_dict.keys()),
)
params_dict = _C._jit_pass_onnx_deduplicate_initializers( # type: ignore[assignment]
graph, params_dict, getattr(model, "training", False) # type: ignore[arg-type]
)
_C._jit_pass_onnx_assign_scoped_names_for_node_and_value(graph)
if export_params:
(
proto,
export_map,
val_use_external_data_format,
node_names,
) = graph._export_onnx( # type: ignore[attr-defined]
params_dict,
opset_version,
dynamic_axes,
defer_weight_export,
operator_export_type,
not verbose,
val_keep_init_as_ip,
custom_opsets,
val_add_node_names,
model_file_location,
node_attr_to_name,
)
else:
(
proto,
export_map,
val_use_external_data_format,
node_names,
) = graph._export_onnx( # type: ignore[attr-defined]
{},
opset_version,
dynamic_axes,
False,
operator_export_type,
not verbose,
val_keep_init_as_ip,
custom_opsets,
val_add_node_names,
model_file_location,
node_attr_to_name,
)
# insert function_proto into model_proto.
#proto = onnx_proto_utils._add_onnxscript_fn(
# proto,
# custom_opsets,
#)
if verbose:
torch.onnx.log("Exported graph: ", graph)
# onnx_proto_utils._export_file(proto, f, export_type, export_map)
if export_type == _exporter_states.ExportTypes.PROTOBUF_FILE:
assert len(export_map) == 0
with torch.serialization._open_file_like(f, "wb") as opened_file:
opened_file.write(proto)
elif export_type in [
_exporter_states.ExportTypes.ZIP_ARCHIVE,
_exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE,
]:
compression = (
zipfile.ZIP_DEFLATED
if export_type
== _exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE
else zipfile.ZIP_STORED
)
with zipfile.ZipFile(f, "w", compression=compression) as z:
z.writestr(_constants.ONNX_ARCHIVE_MODEL_PROTO_NAME, proto)
for k, v in export_map.items():
z.writestr(k, v)
elif export_type == _exporter_states.ExportTypes.DIRECTORY:
if os.path.exists(f):
assert os.path.isdir(f)
else:
os.makedirs(f)
model_proto_file = os.path.join(
f, _constants.ONNX_ARCHIVE_MODEL_PROTO_NAME
)
with torch.serialization._open_file_like(
model_proto_file, "wb"
) as opened_file:
opened_file.write(proto)
for k, v in export_map.items():
weight_proto_file = os.path.join(f, k)
with torch.serialization._open_file_like(
weight_proto_file, "wb"
) as opened_file:
opened_file.write(v)
else:
raise RuntimeError("Unknown export type")
# The ONNX checker only works for ONNX graph. So if the operator_export_type is not ONNX,
# we can skip this check.
# If large model format export is enabled, proto will only contain data location instead of
# raw data and _check_onnx_proto() will fail because it can only handle the raw ONNX proto
# string in memory.
if (operator_export_type is _C_onnx.OperatorExportTypes.ONNX) and (
not val_use_external_data_format
):
try:
_C._check_onnx_proto(proto, full_check=True)
except RuntimeError as e:
raise errors.CheckerError(e) from e
finally:
assert GLOBALS.in_onnx_export
GLOBALS.in_onnx_export = False
_reset_trace_module_map()
return torch_out
@_beartype.beartype
def _apply_friendly_debug_names(graph, params):
for n in graph.nodes():
for v in n.inputs():
old_name = v.debugName()
if old_name != str(v.unique()):
continue
new_name = f"{n.kind()}_{v.unique()}"
v.setDebugName(new_name)
if old_name in params:
params[new_name] = params.pop(old_name)
@_beartype.beartype
def _set_input_and_output_names(graph, input_names, output_names):
@_beartype.beartype
def set_names(node_list, name_list, descriptor):
if name_list is None:
return
if len(name_list) > len(node_list):
raise RuntimeError(
"number of %s names provided (%d) exceeded number of %ss (%d)"
% (descriptor, len(name_list), descriptor, len(node_list))
)
# Mark if the output node DebugName is set before.
output_node_set = set()
for i, (name, node) in enumerate(zip(name_list, node_list)):
# Duplicated output node, insert onnx::Identity to avoid setting the same DebugName after setDebugName().
if descriptor == "output":
if node in output_node_set:
identity_node = graph.create("onnx::Identity")
identity_node.insertAfter(node.node())
identity_node.addInput(node)
identity_node.output().setType(node.type())
graph.return_node().replaceInput(i, identity_node.output())
node = identity_node.output()
output_node_set.add(node)
if node.debugName() != name:
node.setDebugName(name)
set_names(list(graph.inputs()), input_names, "input")
set_names(list(graph.outputs()), output_names, "output")
@_beartype.beartype
def _run_symbolic_method(g, op_name, symbolic_fn, args):
r"""
This trampoline function gets invoked for every symbolic method
call from C++.
"""
try:
return symbolic_fn(g, *args)
except TypeError as e:
# Handle the specific case where we didn't successfully dispatch
# to symbolic_fn. Otherwise, the backtrace will have the clues
# you need.
e.args = (f"{e.args[0]} (occurred when translating {op_name})",)
raise
@_beartype.beartype
def _add_block(node: _C.Node) -> _C.Block:
return node.addBlock()
@_beartype.beartype
def _add_input_to_block(block: _C.Block):
return block.addInputToBlock() # type: ignore[attr-defined]
@_beartype.beartype
def _add_output_to_block(block: _C.Block, value: _C.Value) -> int:
return block.registerOutput(value)
@_beartype.beartype
def _should_aten_fallback(
name: str, opset_version: int, operator_export_type: _C_onnx.OperatorExportTypes
):
# For BUILD_CAFFE2=0 builds, if domain=="aten" and operator_export_type==ONNX_ATEN,
# an aten::ATen operator is created regardless of symbolics existence
# For BUILD_CAFFE2=1, the same applies only if there is no symbolic available
is_exportable_aten_op = registration.registry.is_registered_op(name, opset_version)
is_onnx_aten_export = operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN
is_aten_fallback_export = (
operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
)
is_caffe2_build = _C_onnx._CAFFE2_ATEN_FALLBACK
if not name.startswith("aten::"):
return False
if is_caffe2_build:
if (
is_onnx_aten_export or is_aten_fallback_export
) and not is_exportable_aten_op:
return True
else:
if is_onnx_aten_export or (
is_aten_fallback_export and not is_exportable_aten_op
):
return True
return False
@_beartype.beartype
def _need_symbolic_context(symbolic_fn: Callable) -> bool:
"""Checks if the first argument to symbolic_fn is annotated as type `torch.onnx.SymbolicContext`."""
params = tuple(inspect.signature(symbolic_fn).parameters.values())
# When the annotation is postpone-evaluated, the annotation is a string
# and not a type. We need to use get_type_hints to get the real type.
if not params:
return False
first_param_name = params[0].name
type_hints = typing.get_type_hints(symbolic_fn)
if first_param_name not in type_hints:
return False
param_type = type_hints[first_param_name]
return issubclass(param_type, _exporter_states.SymbolicContext)
@_beartype.beartype
def _symbolic_context_handler(symbolic_fn: Callable) -> Callable:
"""Decorator that provides the symbolic context to the symbolic function if needed."""
if _need_symbolic_context(symbolic_fn):
# TODO(justinchuby): Update the module name of GraphContext when it is public
warnings.warn(
"The first argument to symbolic functions is deprecated in 1.13 and will be "
"removed in the future. Please annotate treat the first argument (g) as GraphContext "
"and use context information from the object instead.",
category=FutureWarning,
)
def wrapper(graph_context: jit_utils.GraphContext, *args, **kwargs):
symbolic_context = _exporter_states.SymbolicContext(
params_dict=graph_context.params_dict,
env=graph_context.env,
cur_node=graph_context.original_node,
onnx_block=graph_context.block,
)
return symbolic_fn(symbolic_context, graph_context, *args, **kwargs)
return wrapper
return symbolic_fn
@_beartype.beartype
def _get_aten_op_overload_name(n: _C.Node) -> str:
# Returns `overload_name` attribute to ATen ops on non-Caffe2 builds
schema = n.schema()
if not schema.startswith("aten::") or symbolic_helper.is_caffe2_aten_fallback():
return ""
return _C.parse_schema(schema).overload_name
@_beartype.beartype
def _run_symbolic_function(
graph: _C.Graph,
block: _C.Block,
node: _C.Node,
inputs: Any,
env: Dict[_C.Value, _C.Value],
operator_export_type=_C_onnx.OperatorExportTypes.ONNX,
) -> Optional[Union[_C.Value, Sequence[Optional[_C.Value]]]]:
"""Runs a symbolic function.
The function is used in C++ to export the node to ONNX.
Returns:
A single or a tuple of Values.
None when the node gets cloned as is into the new graph.
"""
opset_version = GLOBALS.export_onnx_opset_version
# See Note [Export inplace]
node_kind = node.kind()
if node_kind.endswith("_"):
# Treat relu_ -> relu; add_ -> add etc.
ns_op_name = node_kind[:-1]
else:
ns_op_name = node_kind
namespace, op_name = jit_utils.parse_node_kind(ns_op_name)
graph_context = jit_utils.GraphContext(
graph=graph,
block=block,
opset=opset_version,
original_node=node,
params_dict=_params_dict,
env=env,
)
# Direct ATen export requested
if _should_aten_fallback(ns_op_name, opset_version, operator_export_type):
attrs = {
k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k)
for k in node.attributeNames()
}
outputs = node.outputsSize()
attrs["outputs"] = outputs
return graph_context.at(
op_name,
*inputs,
overload_name=_get_aten_op_overload_name(node),
**attrs,
)
try:
# Caffe2-specific: Quantized op symbolics are registered for opset 9 only.
if symbolic_helper.is_caffe2_aten_fallback() and opset_version == 9:
symbolic_caffe2.register_quantized_ops("caffe2", opset_version)
if namespace == "quantized" and symbolic_helper.is_caffe2_aten_fallback():
domain = "caffe2"
else:
domain = namespace
symbolic_function_name = f"{domain}::{op_name}"
symbolic_function_group = registration.registry.get_function_group(
symbolic_function_name
)
if symbolic_function_group is not None:
symbolic_fn = symbolic_function_group.get(opset_version)
if symbolic_fn is not None:
# TODO Wrap almost identical attrs assignment or comment the difference.
attrs = {
k: symbolic_helper._node_get(node, k) for k in node.attributeNames()
}
return symbolic_fn(graph_context, *inputs, **attrs)
attrs = {
k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k)
for k in node.attributeNames()
}
if namespace == "onnx":
# Clone node to trigger ONNX shape inference
return graph_context.op(op_name, *inputs, **attrs, outputs=node.outputsSize()) # type: ignore[attr-defined]
raise errors.UnsupportedOperatorError(
symbolic_function_name,
opset_version,
symbolic_function_group.get_min_supported()
if symbolic_function_group
else None,
)
except RuntimeError:
if operator_export_type == _C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH:
return None
elif (
operator_export_type == _C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
and not symbolic_helper.is_caffe2_aten_fallback()
):
# Emit ATen op for non-Caffe2 builds when `operator_export_type==ONNX_ATEN_FALLBACK`
attrs = {
k + "_" + node.kindOf(k)[0]: symbolic_helper._node_get(node, k)
for k in node.attributeNames()
}
return graph_context.at(
op_name,
*inputs,
overload_name=_get_aten_op_overload_name(node),
**attrs,
)
raise
except TypeError as e:
# Handle the specific case where we didn't successfully dispatch.
# Otherwise, the backtrace will have the clues you need.
e.args = (f"{e.args[0]} \n(Occurred when translating {op_name}).",)
raise
@_beartype.beartype
def _verify_custom_op_name(symbolic_name: str):
if not re.match(r"^[a-zA-Z0-9-_]+::[a-zA-Z-_]+[a-zA-Z0-9-_]*$", symbolic_name):
raise errors.OnnxExporterError(
f"Failed to register operator {symbolic_name}. "
"The symbolic name must match the format domain::name, "
"and should start with a letter and contain only "
"alphanumerical characters"
)
ns, _ = jit_utils.parse_node_kind(symbolic_name)
if ns == "onnx":
raise ValueError(
f"Failed to register operator {symbolic_name}. {ns} domain cannot be modified."
)
@_beartype.beartype
def register_custom_op_symbolic(
symbolic_name: str,
symbolic_fn: Callable,
opset_version: int,
):
"""Registers a symbolic function for a custom operator.
When the user registers symbolic for custom/contrib ops,
it is highly recommended to add shape inference for that operator via setType API,
otherwise the exported graph may have incorrect shape inference in some extreme cases.
An example of setType is `test_aten_embedding_2` in `test_operators.py`.
See "Custom Operators" in the module documentation for an example usage.
Args:
symbolic_name (str): The name of the custom operator in "<domain>::<op>"
format.
symbolic_fn (Callable): A function that takes in the ONNX graph and
the input arguments to the current operator, and returns new
operator nodes to add to the graph.
opset_version (int): The ONNX opset version in which to register.
"""
if symbolic_name.startswith("::"):
symbolic_name = f"aten{symbolic_name}"
_verify_custom_op_name(symbolic_name)
registration.custom_onnx_symbolic(
symbolic_name,
opset_version,
decorate=[
_symbolic_context_handler,
],
)(symbolic_fn)
@_beartype.beartype
def unregister_custom_op_symbolic(symbolic_name: str, opset_version: int):
"""Unregisters ``symbolic_name``.
See "Custom Operators" in the module documentation for an example usage.
Args:
symbolic_name (str): The name of the custom operator in "<domain>::<op>"
format.
opset_version (int): The ONNX opset version in which to unregister.
"""
if symbolic_name.startswith("::"):
symbolic_name = f"aten{symbolic_name}"
_verify_custom_op_name(symbolic_name)
registration.registry.unregister(symbolic_name, opset_version)
@_beartype.beartype
def _validate_dynamic_axes(dynamic_axes, model, input_names, output_names):
"""Ensures dynamic axes argument is follows the expected format."""
if len(dynamic_axes) == 0:
return
if hasattr(model, "graph"):
# Extracting set of valid input/output names that shall be used for dynamic_axes
if (input_names is None) or len(input_names) == 0:
input_names = [x.debugName() for x in model.graph.inputs()]
if (output_names is None) or len(output_names) == 0:
output_names = [y.debugName() for y in model.graph.outputs()]
valid_names = set((input_names or []) + (output_names or []))
# If dynamic axes are provided as a list rather than dictionary, they should
# first get converted to a dictionary in expected format. If desired axes names
# are not provided for dynamic axes, automatic names shall be generated for
# provided dynamic axes of specified input/output
for key, value in dynamic_axes.items():
if key not in valid_names:
warnings.warn(
f"Provided key {key} for dynamic axes is not a valid input/output name"
)
if isinstance(value, list):
warnings.warn(
"No names were found for specified dynamic axes of provided input."
f"Automatically generated names will be applied to each dynamic axes of input {key}"
)
value_dict = {}
for i, x in enumerate(value):
if not isinstance(x, int):
raise ValueError(
"The type of axis index is expected to be an integer"
)
if x in value_dict:
warnings.warn(
f"Duplicate dynamic axis index {x} was provided for input {key}."
)
else:
value_dict[x] = str(key) + "_dynamic_axes_" + str(i + 1)
dynamic_axes[key] = value_dict
| tao_pytorch_backend-main | third_party/onnx/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| tao_pytorch_backend-main | nvidia_tao_pytorch/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script responsible for copying file specs to the folder pointed by the user."""
from os import makedirs, listdir
from os.path import abspath, dirname, exists, join
import shutil
from omegaconf import MISSING
from dataclasses import dataclass
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.exp_manager import minimal_exp_manager, MinimalExpManagerConfig
from nvidia_tao_pytorch.core.loggers.api_logging import Status, StatusLogger
from nvidia_tao_pytorch.core.tlt_logging import logging, obfuscate_logs
# Usage example - for ASR:
# ==============
"""
python download_specs \
exp_manager.explicit_log_dir=/results/speech_to_text/download_specs/ \
source_data_dir=/home/tkornuta/workspace/tlt-pytorch/asr/experiment_specs \
workflow=asr \
target_data_dir=/specs/asr \
"""
@dataclass
class DefaultConfig:
"""This is a structured config for ASR dataset processing."""
# Minimalistic experiment manager.
exp_manager: MinimalExpManagerConfig = MinimalExpManagerConfig(task_name="download_specs")
# Input folder where the default configs are.
source_data_dir: str = MISSING
# Output folder path
target_data_dir: str = MISSING
# Name of the worflow.
workflow: str = MISSING
spec_path = dirname(abspath(__file__))
@hydra_runner(config_path=spec_path, config_name="download_specs", schema=DefaultConfig)
def main(cfg: DefaultConfig) -> None:
"""Script to run dataset convert.
Args:
cfg (OmegaConf.DictConf): Hydra parsed config object.
"""
# Obfuscate logs.
obfuscate_logs(cfg)
# Initialize export manager (simple logging).
log_dir = minimal_exp_manager(cfg.exp_manager)
status_logger = StatusLogger(
filename=join(log_dir, "status.json"),
append=True
)
status_logger.write(
message=f"Downloading default specs for {cfg.workflow}",
status_level=Status.STARTED
)
if exists(cfg.target_data_dir):
if listdir(cfg.target_data_dir):
raise FileExistsError(f"The target directory `{cfg.target_data_dir}` is not empty!\n"
"In order to avoid overriding the existing spec files please point to a different folder.")
else:
# Create a new folder.
makedirs(cfg.target_data_dir, exist_ok=True)
# Copy files from source to target.
names = [item for item in listdir(cfg.source_data_dir) if item.endswith("yaml")]
for name in names:
srcname = join(cfg.source_data_dir, name)
dstname = join(cfg.target_data_dir, name)
shutil.copy2(srcname, dstname)
# Inform where the logs are.
logging.info(f"Default specification files for {cfg.workflow} downloaded to '{cfg.target_data_dir}'")
status_message = f"Default specification files for {cfg.workflow} downloaded."\
f"List of files: {names}"
status_logger.write(
message=status_message,
status_level=Status.SUCCESS
)
logging.info(f"Experiment logs saved to '{log_dir}'")
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/download_specs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version string for the utils module of TLT."""
__version__ = "0.0.1-dev"
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing functions used to encrypt and decrypt encrypted checkpoints."""
import pickle
from io import BytesIO
from eff.codec import decrypt_stream, encrypt_stream, get_random_encryption
def decrypt_checkpoint(checkpoint, key):
"""Function to decrypt checkpoint using the supplied key.
Args:
checkpoint(dict): Pytorch checkpoint file.
key (str): String key to decrypt the checkpoint file.
Returns:
checkpoint (dict): Checkpoint containing decrypted checkpoint file.
"""
# Get the encrypted model state dict.
encrypted_state_stream = BytesIO(checkpoint["state_dict"])
# Get encryption_type
encryption_type = checkpoint["state_dict_encrypted"]
# Decrypt it to binary stream.
decrypted_state_stream = BytesIO()
decrypt_stream(
input_stream=encrypted_state_stream, output_stream=decrypted_state_stream,
passphrase=key, encryption=encryption_type
)
# Restore state dict from binary stream.
deserialized_state_dict = pickle.loads(decrypted_state_stream.getvalue())
# Overwrite the state.
checkpoint["state_dict"] = deserialized_state_dict
return checkpoint
def encrypt_checkpoint(checkpoint, key):
"""Function to encrypt checkpoint with supplied key.
Args:
checkpoint (dict): Dictionary checkpoint for pytorch.
key (str): Key to encode the checkpoint state dict.
Returns:
checkpoint (dict): Encrypted checkpoint file.
"""
# Get model state dict.
state_dict = checkpoint["state_dict"]
# Get a random encryption type
encryption_type = get_random_encryption()
checkpoint["state_dict_encrypted"] = encryption_type
# Serialize it.
serialized_state_stream = BytesIO(pickle.dumps(state_dict))
# Encrypt it to binary steam.
encrypted_state_stream = BytesIO()
encrypt_stream(
input_stream=serialized_state_stream, output_stream=encrypted_state_stream,
passphrase=key, encryption=encryption_type
)
# Overwrite the state.
checkpoint["state_dict"] = encrypted_state_stream.getvalue()
return checkpoint
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/checkpoint_encryption.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Root module for common utilities that could be used across all nlp models."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities that could be used across all nlp models."""
import importlib
import os
import pkgutil
import subprocess
import shlex
import sys
from time import time
import nvidia_tao_pytorch.core.download_specs as download_specs
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
from nvidia_tao_pytorch.core.tlt_logging import logging
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
# Add new command for copying specs.
modules["download_specs"] = {
"source_data_dir": os.path.join(os.path.dirname(module_path[0]), "experiment_specs"),
"runner_path": os.path.abspath(importlib.import_module(download_specs.__name__).__file__),
"workflow": package.__name__.split(".")[0]
}
return modules
def launch(parser, subtasks, network=None):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
network (str): name of the network running.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written. (DEFAULT: ./)",
required=True,
)
parser.add_argument("-k", "--key", help="User specific encoding key to save or load a .tlt model.")
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
parser.add_argument(
"-g", "--gpus", help="Number of GPUs to use. The default value is 1.", default=1,
type=int
)
parser.add_argument(
"-m", "--resume_model_weights", help="Path to a pre-trained model or model to continue training."
)
parser.add_argument(
"-o", "--output_specs_dir", help="Path to a target folder where experiment spec files will be downloaded."
)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
process_passed = True
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
if args.subtask not in ["download_specs", "pitch_stats"]:
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
# Translate results dir to exp_manager - optional for now! (as 4/6 workflows weren't adapted!)
script_args += " exp_manager.explicit_log_dir=" + args.results_dir
# Set gpus - override only in the case of tasks that use GPUs (assumption for now!).
if args.subtask in ["train", "finetune", "evaluate"]:
script_args += " trainer.gpus=" + str(args.gpus)
# Don't resume for 1) data_convert and 2) train from scratch.
if args.subtask in ["finetune", "evaluate", "infer", "infer_onnx", "export"]:
if args.resume_model_weights is not None:
script_args += " restore_from=" + args.resume_model_weights
# Add encryption key.
if args.subtask in ["train", "finetune", "evaluate", "infer", "infer_onnx", "export"]:
if args.key is not None:
script_args += " encryption_key=" + args.key
if args.subtask == "download_specs":
# Set target_data_dir
if args.output_specs_dir is not None:
script_args += " target_data_dir=" + args.output_specs_dir
else:
print("ERROR: The subtask `{}` requires the following argument: -o/--output_specs_dir".format(args.subtask))
exit(1)
# Set the remaining params.
script_args += " source_data_dir=" + subtasks[args.subtask]["source_data_dir"]
script_args += " workflow=" + subtasks[args.subtask]["workflow"]
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
start = time()
try:
# Run the script.
subprocess.check_call(
shlex.split(call),
shell=False,
stdout=sys.stdout,
stderr=sys.stdout
)
except (KeyboardInterrupt, SystemExit) as e:
logging.info("Command was interrupted due to ", e)
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
logging.info(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
logging.info("Sending telemetry data.")
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=args.gpus,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
logging.warning("Telemetry data couldn't be sent, but the command ran successfully.")
logging.warning(f"[Error]: {e}")
pass
if not process_passed:
logging.warning("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
logging.info("Execution status: PASS")
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/entrypoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities that could be used in create_tokenizer script."""
from typing import Optional
from omegaconf import MISSING
from dataclasses import dataclass
__all__ = ["TokenizerConfig"]
@dataclass
class TokenizerConfig:
"""Tokenizer config for use in create_tokenizer script."""
# tokenizer type: "spe" or "wpe"
tokenizer_type: str = MISSING
# spe type if tokenizer_type == "spe"
# choose from ['bpe', 'unigram', 'char', 'word']
spe_type: str = MISSING
# spe character coverage, defaults to 1.0
spe_character_coverage: Optional[float] = 1.0
# flag for lower case, defaults to True
lower_case: Optional[bool] = True
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/tokenizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing the config for early-stopping."""
from dataclasses import dataclass
@dataclass
class EarlyStoppingConfig:
"""EarlyStopping config."""
monitor: str = ""
patience: int = 3
min_delta: float = 0.0
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/early_stopping.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilties for PTL."""
import glob
import os
# Define 1MB for filesize calculation.
MB = 1 << 20
def get_num_trainable_elements(model):
"""Get number of trainable model elements.
Args:
model (ptl.module): Pytorch lightning module.
Return:
size (int): Number of elements in the model.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def get_model_file_size(model_path):
"""Get the size of the model.
Args:
model_path (str): UNIX path to the model.
Returns:
file_size (float): File size in MB.
"""
if not os.path.exists(model_path):
raise FileNotFoundError(f"Model file wasn't found at {model_path}")
file_size = os.path.getsize(model_path) / MB
return file_size
def update_results_dir(cfg, task):
"""Update global results_dir based on task.results_dir.
This function should be called at the beginning of a pipeline script.
Args:
cfg (Hydra config): Config object loaded by Hydra
task (str): TAO pipeline name
Returns:
Updated cfg
"""
if cfg[task]['results_dir']:
cfg.results_dir = cfg[task]['results_dir']
else:
cfg.results_dir = os.path.join(cfg.results_dir, task)
cfg[task]['results_dir'] = cfg.results_dir
print(f"{task.capitalize()} results will be saved at: {cfg.results_dir}")
return cfg
def get_last_generated_file(folder_path, extension="txt"):
"""Returns the last generated file in the folder.
Args:
folder_path (str): path to the folder
extension (str): file extension
"""
files = glob.glob(os.path.join(folder_path, f"*.{extension}"))
return max(files, key=os.path.getmtime, default=None)
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/utilities.py |
#!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abstraction over EFF/cookbooks stuff
We only deal in file handles through cli.
We want to be able to open an archive and pass around its interal file handles pointing to the
pieces in /tmp without reading and rewriting the contents in this process.
"""
import tempfile
from nvidia_tao_pytorch.core.cookbooks.kenlm_cookbook import KenLMCookbook
from eff.core import Archive
import shutil
import os
INTERMEDIATE = "intermediate.kenlm_intermediate"
VOCABULARY = "vocabulary.txt"
BINARY = "kenlm.arpa"
class KenLMArchive():
"""Implementation of a KenLM EFF archive model file."""
@classmethod
def dumpLMA(cls, archive_path, key, binary=None, vocabulary=None, intermediate=None):
"""Create eff archive for language model.
Args:
archive_path (str): Path to the eff archive.
binary (str): Path to the binary file.
vocabulary (str): Path to the vocabulary file.
intermediate (str): Path to the intermediate file.
"""
cb = KenLMCookbook()
cb.set_passphrase(key)
if intermediate:
with open(intermediate, "rb") as f:
c = f.read()
cb.add_class_file_content(name=INTERMEDIATE,
content=c,
description="KenLM intermediate format")
if vocabulary:
with open(vocabulary, "r") as f:
c = f.read()
cb.add_class_file_content(name=VOCABULARY,
content=c,
description="KenLM vocabulary file")
with open(binary, "rb") as f:
c = f.read()
cb.add_class_file_content(name=BINARY,
content=c,
description="KenLM binary .arpa",
binary=True)
cb.save(archive_path)
return cls(archive_path, key)
def __init__(self, archive_path, key):
"""Setup archive_path and key.
Args:
archive_path (str): Path to the KenLM archive object.
key (str): Passphrase to encrypt the model file.
"""
self._archive_path = archive_path
self._key = key
def open(self):
"""
Restore a kenLM archive model file.
Underlying EFF drops bits into tmpdirs.
this moves those into a tmpdir our object controls
"""
self._tmpdir = tempfile.TemporaryDirectory()
current_dir = self._tmpdir.name
with Archive.restore_from(
restore_path=self._archive_path,
passphrase=self._key
) as archive:
for artifact in archive['artifacts'].keys():
fname = archive['artifacts'][artifact].get_handle()
shutil.copyfile(fname, os.path.join(current_dir, artifact))
def get_tmpdir(self):
"""Return path to the temp directory.
Args:
self._tmpdir.name (str): Path to the temp directory.
"""
return self._tmpdir.name
def get_intermediate(self):
"""Return path of INTERMEDIATE file."""
return os.path.join(self._tmpdir.name, INTERMEDIATE)
def get_vocabulary(self):
"""Return path of VOCABULARY file."""
return os.path.join(self._tmpdir.name, VOCABULARY)
def get_binary(self):
"""Return path of BINARY file."""
return os.path.join(self._tmpdir.name, BINARY)
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/kenlma.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO common path utils used across all apps."""
import os
def expand_path(path):
"""Function to resolve a path.
This function takes in a path and returns the absolute path of that path after
expanding the tilde (~) character to the user's home directory to prevent path
traversal vulnerability.
Args:
path (str): The path to expand and make absolute.
Returns:
str: The absolute path with expanded tilde.
"""
return os.path.abspath(os.path.expanduser(path))
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/path_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing a minimalistic "export manager" for TLT scripts not using PTL trainer/NeMo ExpManager."""
import os
from pathlib import Path
from dataclasses import dataclass
from nvidia_tao_pytorch.core.tlt_logging import logging
@dataclass
class MinimalExpManagerConfig:
"""Minimalistic config enabling setup of log dir and task name."""
task_name: str = "task"
# Log dir creation parameters
explicit_log_dir: str = "./"
def minimal_exp_manager(cfg: MinimalExpManagerConfig) -> Path:
"""Minimalistic experiment manager - sets logging and returns log_dir.
Args:
cfg (MinimalExpManagerConfig): Omega config dictionary for Minimal experiment manager.
Returns:
log_dir(str): String path to the logs.
"""
if Path(cfg.explicit_log_dir).exists():
logging.warning(f"Exp_manager is logging to `{cfg.explicit_log_dir}``, but it already exists.")
# Shortcut.
log_dir = Path(cfg.explicit_log_dir)
# Create the logging directory if it does not exist
os.makedirs(log_dir, exist_ok=True)
# Set output log file.
logging.add_file_handler(os.path.join(log_dir, cfg.task_name + ".log"))
return log_dir
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/exp_manager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities useful for logging."""
import logging as _logging
import os
from random import randint
from omegaconf import OmegaConf
_logging.basicConfig(
format='[%(asctime)s - TAO Toolkit - %(name)s - %(levelname)s] %(message)s',
level='INFO'
)
logging = _logging
def obfuscate_logs(cfg):
"""Function obfuscates encryption key if exposed/present in args.
Args:
cfg(OmegaConf.DictConfig): Function to obfuscate key from the log.
"""
# First obfuscate what is being shown as configuration.
config = OmegaConf.to_container(cfg)
if "encryption_key" in config.keys():
config["encryption_key"] = '*' * randint(3, 10)
# Show the experiment configuration.
logging.info(f'Experiment configuration:\n{OmegaConf.to_yaml(config)}')
def remove_logs(log_dir):
"""Function removes the cmd-args and git-info log files from log_dir.
Args:
log_dir(str): Path to the results directory containing the logs.
"""
log_files = ["cmd-args.log", "git-info.log"]
for log in log_files:
logfile = os.path.join(log_dir, log)
if os.path.exists(logfile):
os.remove(logfile)
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/tlt_logging.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing definitions of all connectors for TAO Toolkit."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/connectors/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing Overloaded version of PTL CheckpointCollector."""
import os
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.trainer.connectors.checkpoint_connector import \
CheckpointConnector
from pytorch_lightning.utilities import rank_zero_info, rank_zero_only, rank_zero_warn
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.cloud_io import load as pl_load
from nvidia_tao_pytorch.core.checkpoint_encryption import decrypt_checkpoint, encrypt_checkpoint
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
class TLTCheckpointConnector(CheckpointConnector):
"""
Overloaded version of PTL CheckpointCollector, with additional encryption of intermediate checkpoints.
"""
def restore(self, checkpoint_path: str, on_gpu: bool) -> bool:
"""
Load model/training states from a 'PyTorch-Lightning checkpoint' file through file-read and state-restore.
All restored states are listed in return value description of `dump_checkpoint`.
"""
# Try to read the checkpoint file at `checkpoint_path`. If not exist, do not restore checkpoint.
fs = get_filesystem(checkpoint_path)
if not fs.exists(checkpoint_path):
rank_zero_warn("No checkpoint file exists at `resume_from_checkpoint`. Start from scratch")
return False
# read a checkpoint dictionary object from the 'PyTorch-Lightning checkpoint' file at `checkpoint_path`
checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
# acquire the model
model = self.trainer.get_model()
# restore model and datamodule state
self.restore_model_state(model, checkpoint)
if on_gpu:
model.cuda(self.trainer.root_gpu)
# restore training state
self.restore_training_state(checkpoint)
rank_zero_info(f"Restored states from the checkpoint file at {checkpoint_path}")
return True
def restore_model(self) -> None:
"""
Restores a model's weights from a PyTorch Lightning checkpoint. Hooks are called first go give
the LightningModule a chance to modify the contents, then finally the model gets updated with
the loaded weights.
"""
if not self._loaded_checkpoint:
return
model = self.trainer.lightning_module
# hook: give user access to checkpoint if needed.
model.on_load_checkpoint(self._loaded_checkpoint)
# call hpc specific hook
if self._hpc_resume_path is not None:
model.on_hpc_load(self._loaded_checkpoint)
# restore model state_dict
self.restore_model_state(model, self._loaded_checkpoint)
def restore_model_state(self, model: LightningModule, checkpoint) -> None:
"""
Restore model states from a 'PyTorch-Lightning checkpoint' dictionary object
"""
# restore datamodule states
if self.trainer.datamodule is not None:
self.trainer.datamodule.on_load_checkpoint(checkpoint)
if checkpoint.get("state_dict_encrypted", False):
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is None:
raise PermissionError("Cannot access model state dict without the encryption key")
checkpoint = decrypt_checkpoint(checkpoint, key)
# hook: give user access to checkpoint if needed.
model.on_load_checkpoint(checkpoint)
# restore model state_dict
model.load_state_dict(checkpoint['state_dict'])
def dump_checkpoint(self, weights_only: bool = False) -> dict:
"""Creating a model checkpoint dictionary object from various component states.
If encryption key is provided (in NeMoCookbook), encrypts the model checkpoint.
Args:
weights_only: saving model weights only
Return:
structured dictionary: {
'epoch': training epoch
'global_step': training global step
'pytorch-lightning_version': PyTorch Lightning's version
'callbacks': "callback specific state"[] # if not weights_only
'optimizer_states': "PT optim's state_dict"[] # if not weights_only
'lr_schedulers': "PT sched's state_dict"[] # if not weights_only
'native_amp_scaling_state': PT amp's state_dict # if not weights_only and use native amp
'amp_scaling_state': Apex's state_dict # if not weights_only and use apex amp
'state_dict': Model's state_dict (e.g. network weights)
CHECKPOINT_HYPER_PARAMS_NAME:
CHECKPOINT_HYPER_PARAMS_KEY:
CHECKPOINT_HYPER_PARAMS_TYPE:
something_cool_i_want_to_save: anything you define through model.on_save_checkpoint
LightningDataModule.__class__.__name__: pl DataModule's state
}
"""
# Retrieve checkpoint using the connector.
checkpoint = super().dump_checkpoint(weights_only)
# Retrieve encryption key from TLTPyTorchCookbook.
key = TLTPyTorchCookbook.get_passphrase()
if key is not None:
checkpoint = encrypt_checkpoint(checkpoint, key)
return checkpoint
class TLTModelSaver(Callback):
"""TLT model saver that saves .tlt models every n epochs."""
def __init__(self, dirpath, filename, postfix=".tlt", every_n_epochs=0, **kwargs):
"""Initialize."""
self.dirpath = dirpath
self.filename = filename
self.postfix = postfix
self.every_n_epochs = every_n_epochs
# Call the parent class constructor with the remaining kwargs.
super().__init__(**kwargs)
@rank_zero_only
def on_train_epoch_end(self, trainer, pl_module, unused=None):
"""action on training epoch end."""
if self.every_n_epochs > 0:
current_epochs = trainer.current_epoch
if current_epochs % self.every_n_epochs == 0:
name = self.filename + "_epoch_" + str(current_epochs) + self.postfix
pl_module.save_to(save_path=os.path.join(self.dirpath, name))
def setup_tlt_checkpointer(log_dir, trainer, cfg):
"""Helper function to setup TLT model checkpointer."""
checkpointer = TLTModelSaver(
log_dir,
cfg.exp_manager.name,
postfix=cfg.exp_manager.checkpoint_callback_params.postfix,
every_n_epochs=cfg.tlt_checkpoint_interval
)
trainer.callbacks.append(checkpointer)
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/connectors/checkpoint_connector.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger class for TAO Toolkit models."""
import atexit
from datetime import datetime
import json
import logging
import os
from torch import distributed as torch_distributed
from pytorch_lightning.utilities import rank_zero_only, rank_zero_warn
logger = logging.getLogger(__name__)
class Verbosity():
"""Verbosity levels."""
DISABLE = 0
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
# Defining a log level to name dictionary.
log_level_to_name = {
Verbosity.DISABLE: "DISABLE",
Verbosity.DEBUG: 'DEBUG',
Verbosity.INFO: 'INFO',
Verbosity.WARNING: 'WARNING',
Verbosity.ERROR: 'ERROR',
Verbosity.CRITICAL: 'CRITICAL'
}
class Status():
"""Status levels."""
SUCCESS = 0
FAILURE = 1
STARTED = 2
RUNNING = 3
SKIPPED = 4
status_level_to_name = {
Status.SUCCESS: 'SUCCESS',
Status.FAILURE: 'FAILURE',
Status.STARTED: 'STARTED',
Status.RUNNING: 'RUNNING',
Status.SKIPPED: 'SKIPPED'
}
class BaseLogger(object):
"""File logger class."""
def __init__(self, verbosity=Verbosity.DISABLE):
"""Base logger class.
Args:
verbsority (int): Logging level
"""
self.verbosity = verbosity
self.categorical = {}
self.graphical = {}
self.kpi = {}
@property
def date(self):
"""Get date from the status.
Returns:
Formatted string containing mm/dd/yyyy.
"""
date_time = datetime.now()
date_object = date_time.date()
return "{}/{}/{}".format(
date_object.month,
date_object.day,
date_object.year
)
@property
def time(self):
"""Get date from the status.
Returns:
Formatted string with time in hh:mm:ss
"""
date_time = datetime.now()
time_object = date_time.time()
return "{}:{}:{}".format(
time_object.hour,
time_object.minute,
time_object.second
)
@property
def categorical(self):
"""Property getter for categorical data to be logged."""
return self._categorical
@categorical.setter
def categorical(self, value: dict):
"""Set categorical data to be logged."""
self._categorical = value
@property
def graphical(self):
"""Property getter for graphical data to be logged."""
return self._graphical
@graphical.setter
def graphical(self, value: dict):
"""Set graphical data to be logged."""
self._graphical = value
@property
def kpi(self):
"""Set KPI data."""
return self._kpi
@kpi.setter
def kpi(self, value: dict):
"""Set KPI data."""
self._kpi = value
@rank_zero_only
def flush(self):
"""Flush the logger."""
pass
def format_data(self, data: dict):
"""Format the data.
Args:
data(dict): Dictionary data to be formatted to a json string.
Returns
data_string (str): Recursively formatted string.
"""
if isinstance(data, dict):
data_string = []
for key, value in data.items():
data_string.append(
f"{key}: {self.format_data(value)}"
if isinstance(value, dict) else value
)
return ", ".join(data_string)
@rank_zero_only
def log(self, level, string):
"""Log the data string.
This method is implemented only for rank 0 process in a multiGPU
session.
Args:
level (int): Log level requested.
string (string): Message to be written.
"""
if level >= self.verbosity:
logging.log(level, string)
@rank_zero_only
def write(self, data=None,
status_level=Status.RUNNING,
verbosity_level=Verbosity.INFO,
message=None):
"""Write data out to the log file.
Args:
data (dict): Dictionary of data to be written out.
status_level (nvidia_tao_pytorch.core.loggers.api_logging.Status): Current status of the
process being logged. DEFAULT=Status.RUNNING
verbosity level (nvidia_tao_pytorch.core.loggers.api_logging.Vebosity): Setting
logging level of the Status logger. Default=Verbosity.INFO
"""
if self.verbosity > Verbosity.DISABLE:
if not data:
data = {}
# Define generic data.
data["date"] = self.date
data["time"] = self.time
data["status"] = status_level_to_name.get(status_level, "RUNNING")
data["verbosity"] = log_level_to_name.get(verbosity_level, "INFO")
if message:
data["message"] = message
logging.log(verbosity_level, message)
if self.categorical:
data["categorical"] = self.categorical
if self.graphical:
data["graphical"] = self.graphical
if self.kpi:
data["kpi"] = self.kpi
data_string = self.format_data(data)
self.log(verbosity_level, data_string)
self.flush()
class StatusLogger(BaseLogger):
"""Simple logger to save the status file."""
def __init__(self, filename=None,
verbosity=Verbosity.INFO,
append=True):
"""Logger to write out the status.
Args:
filename (str): Path to the log file.
verbosity (str): Logging level. Default=INFO
append (bool): Flag to open the log file in
append mode or write mode. Default=True
"""
super().__init__(verbosity=verbosity)
self.log_path = os.path.realpath(filename)
if os.path.exists(self.log_path):
rank_zero_warn(
f"Log file already exists at {self.log_path}"
)
# Open the file only if rank == 0.
distributed = torch_distributed.is_initialized() and torch_distributed.is_available()
global_rank_0 = (not distributed) or (distributed and torch_distributed.get_rank() == 0)
if global_rank_0:
self.l_file = open(self.log_path, "a" if append else "w")
atexit.register(self.l_file.close)
@rank_zero_only
def log(self, level, string):
"""Log the data string.
This method is implemented only for rank 0 process in a multiGPU
session.
Args:
level (int): Log level requested.
string (string): Message to be written.
"""
if level >= self.verbosity:
self.l_file.write(string + "\n")
@rank_zero_only
def flush(self):
"""Flush contents of the log file."""
self.l_file.flush()
@staticmethod
def format_data(data):
"""Format the dictionary data.
Args:
data(dict): Dictionary data to be formatted to a json string.
Returns
data_string (str): json formatted string from a dictionary.
"""
if not isinstance(data, dict):
raise TypeError(f"Data must be a dictionary and not type {type(data)}.")
data_string = json.dumps(data)
return data_string
# Define the logger here so it's static.
_STATUS_LOGGER = BaseLogger()
def set_status_logger(status_logger):
"""Set the status logger.
Args:
status_logger: An instance of the logger class.
"""
global _STATUS_LOGGER # pylint: disable=W0603
_STATUS_LOGGER = status_logger
def get_status_logger():
"""Get the status logger."""
global _STATUS_LOGGER # pylint: disable=W0602,W0603
return _STATUS_LOGGER
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/loggers/api_logging.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger module for TAO Toolkit models."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/loggers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status Logger callback."""
from collections import Iterable
from datetime import timedelta
import os
import time
from pytorch_lightning import Callback
import numpy as np
import six
from nvidia_tao_pytorch.core.loggers.api_logging import (
get_status_logger,
Status,
StatusLogger,
Verbosity
)
# Get default status logger() if it's been previously defined.
logger = get_status_logger()
KEY_MAP = {
"val_loss": "validation_loss",
"val_acc": "validation_accuracy",
"loss": "loss",
"acc": "training_accuracy",
"lr": "learning_rate",
"mAP": "mean average precision"
}
class TAOStatusLogger(Callback):
"""Callback that streams the data training data to a status.json file.
Supports all values that can be represented as a string,
including 1D iterables such as np.ndarray.
# Example
```python
logger = TAOStatusLogger('/path/to/results_dir')
model.fit(X_train, Y_train, callbacks=[logger])
```
# Arguments
results_dir (str): The directory where the logs will be saved.
num_epochs (int): Number of epochs to run the training
verbosity (status_logger.verbosity.Verbosity()): Verbosity level.
append: True: append if file exists (useful for continuing
training). False: overwrite existing file,
"""
def __init__(self, results_dir, num_epochs=120,
verbosity=Verbosity.INFO,
append=False):
"""Instantiate the TAOStatusLogger."""
# Make sure that the status logger obtained is always
# an instance of iva.common.logging.logging.StatusLogger.
# Otherwise, this data get's rendered in stdout.
if isinstance(logger, StatusLogger):
self.logger = logger
else:
self.logger = StatusLogger(
filename=os.path.join(results_dir, "status.json"),
verbosity=verbosity,
append=append
)
self.keys = None
self.max_epochs = num_epochs
self._epoch_start_time = None
self.epoch_counter = 0
super(TAOStatusLogger, self).__init__()
def on_train_start(self, trainer, pl_module):
"""Write data beginning of the training."""
self.logger.write(
status_level=Status.STARTED,
message="Starting Training Loop."
)
@staticmethod
def _handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
if isinstance(k, Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
return k
def on_train_epoch_start(self, trainer, pl_module):
"""Routines to be run at the beginning of the epoch."""
self._epoch_start_time = time.time()
def on_train_epoch_end(self, trainer, pl_module):
"""Collect data at the end of an epoch."""
self.epoch_counter += 1
data = {}
data["epoch"] = self.epoch_counter
data["max_epoch"] = self.max_epochs
epoch_end_time = time.time()
time_per_epoch = epoch_end_time - self._epoch_start_time
eta = (self.max_epochs - self.epoch_counter) * time_per_epoch
data["time_per_epoch"] = str(timedelta(seconds=time_per_epoch))
data["eta"] = str(timedelta(seconds=eta))
self.logger.write(data=data, message="Training loop in progress")
def on_train_end(self, trainer, pl_module):
"""Callback function run at the end of training."""
self.logger.write(
status_level=Status.RUNNING,
message="Training loop complete."
)
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/callbacks/loggers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing definitions of all the PyTorch callbacks for TAO Toolkit."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/callbacks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing Overloaded version of PTL ModelCheckpoint."""
import os
from copy import deepcopy
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from nvidia_tao_pytorch.core.checkpoint_encryption import decrypt_checkpoint
from nvidia_tao_pytorch.core.cookbooks.tlt_pytorch_cookbook import TLTPyTorchCookbook
class TLTModelCheckpoint(ModelCheckpoint):
"""Light wrapper around Lightning's ModelCheckpoint to force a saved checkpoint on train_end.
Args:
prefix (str): Prefix string to be added to the model. Default=model
save_best_model (bool): Flag to save the best model. Default=True
postfix (str): File extension to save the model.
"""
def __init__(self, prefix="model", save_best_model=True, postfix=".etlt", **kwargs):
"""Constructor function for the class."""
# Call the parent class constructor with the remaining kwargs.
super().__init__(**kwargs)
# Parse and store "extended" parameters: save_best model and postfix.
self.save_best_model = save_best_model
self.postfix = postfix
self.previous_best_path = ""
# self.filename = prefix
self.prefix = prefix
@rank_zero_only
def on_save_checkpoint(self, trainer, pl_module, checkpoint):
"""Functions called by PTL when saving checkpoint. Used to save encypted models with EFF
This function is only run for the rank 0 process in a multiGPU training.
Args:
trainer (pytorch_lightning.Trainer): PTL trainer calling the checkpoint callback.
pl_module (pytorch_lightning.LightningModule): Lightning module implementing the model.
checkpoint (dict): Pytorch lightning checkpoint dictionary.
Return:
output (LightningModule.state_dict): Checkpoint containing encrypted state dict.
"""
output = super().state_dict(trainer, pl_module, checkpoint)
# Load the best model and then re-save it
if self.save_best_model:
if not os.path.exists(self.best_model_path):
return output
if self.best_model_path == self.previous_best_path:
return output
self.previous_model_path = self.best_model_path
old_state_dict = deepcopy(pl_module.state_dict())
checkpoint = torch.load(self.best_model_path, map_location='cpu')
checkpoint = decrypt_checkpoint(checkpoint, TLTPyTorchCookbook.get_passphrase())
# trainer._checkpoint_connector.restore_model_state(pl_module, checkpoint)
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
# get a new instanace of the model
# TLTPyTorchCookbook().restore_from_ckpt()
pl_module.load_state_dict(checkpoint)
TLTPyTorchCookbook().save_checkpoint_to(pl_module.netG.module.state_dict(), save_path=os.path.join(self.dirpath, self.prefix + "_best" + self.postfix))
# pl_module.save_to(save_path=os.path.join(self.dirpath, self.prefix + self.postfix))
pl_module.load_state_dict(old_state_dict, strict=True)
else:
TLTPyTorchCookbook().save_checkpoint_to(pl_module.netG.module.state_dict(), save_path=os.path.join(self.dirpath, self.prefix + self.postfix))
# pl_module.save_to(save_path=os.path.join(self.dirpath, self.prefix + self.postfix))
return output
@rank_zero_only
def on_train_end(self, trainer, pl_module):
"""Overriden PTL function to force save of EFF encrypted model.
Args:
trainer (pytorch_lightning.Trainer): PTL trainer calling the checkpoint callback.
pl_module (pytorch_lightning.LightningModule): Lightning module implementing the model.
"""
if trainer.fast_dev_run:
return None
# Load the best model and then re-save it
if self.save_best_model:
checkpoint = torch.load(self.best_model_path, map_location='cpu')
checkpoint = decrypt_checkpoint(checkpoint, TLTPyTorchCookbook.get_passphrase())
# trainer._checkpoint_connector.restore(self.best_model_path, on_gpu=trainer.on_gpu)
TLTPyTorchCookbook().save_checkpoint_to(pl_module.netG.module.state_dict(), save_path=os.path.join(self.dirpath, self.prefix + "_last" + self.postfix))
return None
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/callbacks/model_checkpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" MMLab Init Module """
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base MMCV Trainer Class."""
import torch
from mmcv.parallel import MMDistributedDataParallel
from abc import ABC, abstractmethod
class MMCVTrainer(ABC):
"""MMCV Base Trainer"""
def __init__(self,
dataset,
model,
timestamp=None,
meta=None,
result_dir=None,
experiment_spec=None):
"""Init Function.
Args:
dataset (dataset instance): Imagenet Dataset type instance.
model (nn.Module): PyT model instance.
meta (Dict): Contains the env variables.
result_dir (str): Path to the results dir.
experiment_spec (Dict): Contains the hydra exp config parameters.
"""
self.model = model
self.dataset = dataset
self.timestamp = timestamp
self.result_dir = result_dir
self.cfg = experiment_spec
self.model_cfg = experiment_spec["model"]
self.train_cfg = experiment_spec["train"]["train_config"]
self.dataset_cfg = experiment_spec["dataset"]
self.meta = meta
self.evaluation_cfg = experiment_spec["train"]["train_config"]["evaluation"]
@abstractmethod
def set_up_data_loaders(self):
"""Function to generate dataloaders."""
raise NotImplementedError(
"Base Trainer doesn't implement data loader instantiation."
)
@abstractmethod
def validate_runner(self):
"""Function to Add validation hook to training"""
raise NotImplementedError(
"Base Trainer doesn't implement validation for runner instantiation."
)
def set_up_trainer(self):
""" Set up the end-end trainer"""
self.data_loaders = self.set_up_data_loaders()
self.model = self.set_up_model()
self.runner = self.set_up_runner()
if self.train_cfg["validate"]:
# Add the validation hook to the runner if validate is True
self.validate_runner()
def set_up_model(self):
"""Function To Set Up Model"""
# put model on gpus
find_unused_parameters = self.train_cfg["find_unused_parameters"]
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
self.model = MMDistributedDataParallel(
self.model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
return self.model
@abstractmethod
def set_up_runner(self):
"""Function to Build the Runner."""
raise NotImplementedError(
"Base Trainer doesn't implement data loader instantiation."
)
def fit(self):
"""Runner Fit to Start the training."""
self.runner.run(self.data_loaders, workflow=[('train', 1)])
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/common/base_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init Module Common Hooks for MMLab"""
from nvidia_tao_pytorch.core.mmlab.common.base_tao_status_logger import BaseTaoTextLoggerHook
__all__ = ['BaseTaoTextLoggerHook']
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/common/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MMCV Base Tao status logger for segformer """
from collections import OrderedDict
from typing import Dict
import torch
from mmcv.runner.hooks import HOOKS, TextLoggerHook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from abc import abstractmethod
@HOOKS.register_module()
class BaseTaoTextLoggerHook(TextLoggerHook):
""" Logger hook in text. """
def __init__(self, *args, **kwargs):
""" init """
self.s_logger = status_logging.get_status_logger()
super(BaseTaoTextLoggerHook, self).__init__(*args, **kwargs)
self.monitor_data = {}
@abstractmethod
def _status_log(self, log_dict: Dict, runner) -> None:
"""Function to generate dataloaders."""
raise NotImplementedError(
"Base Trainer doesn't implement data loader instantiation."
)
def log(self, runner) -> OrderedDict:
""" log runner """
if 'eval_iter_num' in runner.log_buffer.output:
# this doesn't modify runner.iter and is regardless of by_epoch
cur_iter = runner.log_buffer.output.pop('eval_iter_num')
else:
cur_iter = self.get_iter(runner, inner_iter=True)
log_dict = OrderedDict(
mode=self.get_mode(runner),
epoch=self.get_epoch(runner),
iter=cur_iter)
# only record lr of the first param group
cur_lr = runner.current_lr()
if isinstance(cur_lr, list):
log_dict['lr'] = cur_lr[0]
else:
assert isinstance(cur_lr, dict)
log_dict['lr'] = {}
for k, lr_ in cur_lr.items():
assert isinstance(lr_, list)
log_dict['lr'].update({k: lr_[0]})
if 'time' in runner.log_buffer.output:
# statistic memory
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
log_dict = dict(log_dict, **runner.log_buffer.output) # type: ignore
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
self._status_log(log_dict, runner)
return log_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/common/base_tao_status_logger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils Function"""
from mmcls.utils import collect_env
from mmcv.runner import get_dist_info, init_dist
import os
import glob
def set_env():
""" Function to Set Environment """
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
meta['env_info'] = env_info
return meta
def set_distributed(experiment_config, phase="train"):
""" Set Distributed Params """
rank, world_size = get_dist_info()
# If distributed these env variables are set by torchrun
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(0)
if "RANK" not in os.environ:
os.environ['RANK'] = str(rank)
if "WORLD_SIZE" not in os.environ:
os.environ['WORLD_SIZE'] = str(world_size)
if "MASTER_PORT" not in os.environ:
os.environ['MASTER_PORT'] = str(experiment_config[phase]["exp_config"]["MASTER_PORT"])
if "MASTER_ADDR" not in os.environ:
os.environ['MASTER_ADDR'] = experiment_config[phase]["exp_config"]["MASTER_ADDR"]
init_dist("pytorch", backend="nccl")
def get_latest_pth_model(results_dir):
"""Utility function to return the latest tlt model in a dir.
Args:
results_dir (str): Path to results dir.
Returns:
Returns the latest checkpoint.
"""
files = list(filter(os.path.isfile, glob.glob(results_dir + "/*.pth")))
if not files:
return None
files.sort(key=lambda x: os.path.getmtime(x))
latest_checkpoint = files[-1]
if not os.path.isfile(latest_checkpoint):
raise FileNotFoundError("Checkpoint file not found at {}").format(latest_checkpoint)
return latest_checkpoint
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/common/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Model Parameters Mapping Module """
map_params = {"head": {"in_channels": {
"fan_tiny_8_p4_hybrid": 192, # FAN
"fan_small_12_p4_hybrid": 384,
"fan_base_16_p4_hybrid": 448,
"fan_large_16_p4_hybrid": 480,
"fan_Xlarge_16_p4_hybrid": 768,
"fan_base_18_p16_224": 448,
"fan_tiny_12_p16_224": 192,
"fan_small_12_p16_224_se_attn": 384,
"fan_small_12_p16_224": 384,
"fan_large_24_p16_224": 480,
"gc_vit_xxtiny": 512, # GCViT
"gc_vit_xtiny": 512,
"gc_vit_tiny": 512,
"gc_vit_small": 768,
"gc_vit_base": 1024,
"gc_vit_large": 1536,
"gc_vit_large_384": 1536,
}}}
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/mmclassification/model_params_mapping.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tao status logger for segformer """
import os
from typing import Dict
from mmcv.runner.hooks import HOOKS
from nvidia_tao_pytorch.core.mmlab.common.base_tao_status_logger import BaseTaoTextLoggerHook
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
import json
from time import strftime, gmtime
STATUS_JSON_FILENAME = "status.json"
@HOOKS.register_module()
class MMClsTaoTextLoggerHook(BaseTaoTextLoggerHook):
"""TAO Epoch based runner.
Overrides mmcv.runner.epoch_based_runner.EpochBaseRunner to save checkpoints
without symlinks which requires root access.
"""
def _status_log(self, log_dict: Dict, runner) -> None:
""" status_log
Args:
log_dict (Dict): Contains the parameters for experiment logging.
runner (Class): Object of TAO Runner
"""
self.monitor_data["mode"] = log_dict["mode"]
if log_dict['mode'] == 'val':
self.monitor_data["accuracy_top-1"] = log_dict["accuracy_top-1"]
self.s_logger.kpi = {
"accuracy_top-1": log_dict["accuracy_top-1"],
}
if log_dict['mode'] == 'train':
running_avg_loss = log_dict["loss"]
self.monitor_data["epoch"] = log_dict["epoch"]
self.monitor_data["loss"] = running_avg_loss
time_sec_avg = self.time_sec_tot / (runner.iter - self.start_iter + 1) # Per iter
time_sec_avg_epoch = len(runner.data_loader) * time_sec_avg
self.monitor_data["time_per_epoch"] = strftime("%H:%M:%S", gmtime(time_sec_avg_epoch))
self.monitor_data["lr"] = log_dict["lr"]
self.s_logger.graphical = {
"loss": running_avg_loss,
}
try:
self.s_logger.write(
data=self.monitor_data,
status_level=status_logging.Status.RUNNING)
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
# Save the json file.
filename = os.path.join(runner.work_dir, STATUS_JSON_FILENAME)
try:
with open(filename, "a+") as f:
json.dump(self.monitor_data, f)
f.write('\n')
except IOError:
# We let this pass because we do not want the json file writing to crash the whole job.
pass
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/mmclassification/mmclassification_tao_status_logger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init module for Classification Common Hooks"""
from nvidia_tao_pytorch.core.mmlab.mmclassification.mmclassification_tao_status_logger import MMClsTaoTextLoggerHook
__all__ = ['MMClsTaoTextLoggerHook']
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/mmclassification/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classification Default config file"""
from typing import Optional, List, Dict, Any
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class ImgNormConfig:
"""Configuration parameters for Img Normalization."""
mean: List[float] = field(default_factory=lambda: [123.675, 116.28, 103.53])
std: List[float] = field(default_factory=lambda: [58.395, 57.12, 57.375])
to_rgb: bool = True
@dataclass
class TrainData:
"""Train Data Dataclass"""
type: str = "ImageNet"
data_prefix: Optional[str] = None
pipeline: List[Any] = field(default_factory=lambda: [{"type": "RandomResizedCrop", "size": 224},
{"type": "RandomFlip", "flip_prob": 0.5, "direction": "horizontal"}])
classes: Optional[str] = None
@dataclass
class ValData:
"""Validation Data Dataclass"""
type: str = "ImageNet"
data_prefix: Optional[str] = None
ann_file: Optional[str] = None
pipeline: List[Any] = field(default_factory=lambda: [{"type": "Resize", "size": (256, -1)},
{"type": "CenterCrop", "crop_size": 224}])
classes: Optional[str] = None
@dataclass
class TestData:
"""Test Data Dataclass"""
type: str = "ImageNet"
data_prefix: Optional[str] = None
ann_file: Optional[str] = None
pipeline: List[Any] = field(default_factory=lambda: [{"type": "Resize", "size": (256, -1)},
{"type": "CenterCrop", "crop_size": 224}])
classes: Optional[str] = None
@dataclass
class DataConfig:
"""Data Config"""
samples_per_gpu: int = 1
workers_per_gpu: int = 2
train: TrainData = TrainData()
val: ValData = ValData()
test: TestData = TestData()
@dataclass
class DatasetConfig:
"""Dataset config."""
img_norm_cfg: ImgNormConfig = ImgNormConfig()
data: DataConfig = DataConfig()
sampler: Optional[Dict[Any, Any]] = None # Allowed sampler : RepeatAugSampler
@dataclass
class DistParams:
"""Distribution Parameters"""
backend: str = "nccl"
@dataclass
class RunnerConfig:
"""Configuration parameters for Runner."""
type: str = "TAOEpochBasedRunner" # Currently We support only Epochbased Runner - Non configurable
max_epochs: int = 20 # Set this if Epoch based runner
@dataclass
class CheckpointConfig:
"""Configuration parameters for Checkpointing."""
interval: int = 1 # Epochs or Iterations accordingly
by_epoch: bool = True # By default it trains by iters
# Default Runtime Config
@dataclass
class LogConfig:
"""Configuration parameters for Logging."""
interval: int = 1000
log_dir: str = "logs" # Make sure this directory is created
# Optim and Schedule Config
@dataclass
class ValidationConfig:
"""Validation Config."""
interval: int = 100
@dataclass
class ParamwiseConfig:
"""Configuration parameters for Parameters."""
pos_block: Dict[str, float] = field(default_factory=lambda: {"decay_mult": 0.0})
norm: Dict[str, float] = field(default_factory=lambda: {"decay_mult": 0.0})
head: Dict[str, float] = field(default_factory=lambda: {"lr_mult": 10.0})
@dataclass
class EvaluationConfig:
"""Evaluation Config."""
interval: int = 1
metric: str = "accuracy"
@dataclass
class TrainConfig:
"""Train Config."""
checkpoint_config: CheckpointConfig = CheckpointConfig()
optimizer: Dict[Any, Any] = field(default_factory=lambda: {"type": 'AdamW',
"lr": 10e-4,
"weight_decay": 0.05})
paramwise_cfg: Optional[Dict[Any, Any]] = None # Not a must - needs to be provided in yaml
optimizer_config: Dict[Any, Any] = field(default_factory=lambda: {'grad_clip': None}) # Gradient Accumulation and grad clip
lr_config: Dict[Any, Any] = field(default_factory=lambda: {"policy": 'CosineAnnealing',
"min_lr": 10e-4, "warmup": "linear",
"warmup_iters": 5,
"warmup_ratio": 0.01,
"warmup_by_epoch": True})
runner: RunnerConfig = RunnerConfig()
logging: LogConfig = LogConfig() # By default we add logging
evaluation: EvaluationConfig = EvaluationConfig() # Does not change
find_unused_parameters: bool = False # Does not change
resume_training_checkpoint_path: Optional[str] = None
validate: bool = False
# This param can be omitted if init_cfg is used in model_cfg. Both does same thing.
load_from: Optional[str] = None # If they want to load the weights till head
custom_hooks: List[Any] = field(default_factory=lambda: [])
# Experiment Common Configs
@dataclass
class ExpConfig:
"""Overall Exp Config for Cls."""
manual_seed: int = 47
# If needed, the next line can be commented
MASTER_ADDR: str = "127.0.0.1"
MASTER_PORT: int = 631
@dataclass
class TrainExpConfig:
"""Train experiment config."""
exp_config: ExpConfig = ExpConfig()
validate: bool = False
train_config: TrainConfig = TrainConfig() # Could change across networks
num_gpus: int = 1 # non configurable here
results_dir: Optional[str] = None
@dataclass
class InferenceExpConfig:
"""Inference experiment config."""
num_gpus: int = 1 # non configurable here
batch_size: int = 1
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
exp_config: ExpConfig = ExpConfig()
results_dir: Optional[str] = None
@dataclass
class EvalExpConfig:
"""Inference experiment config."""
num_gpus: int = 1 # non configurable here
batch_size: int = 1
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
exp_config: ExpConfig = ExpConfig()
topk: int = 1 # Configurable
results_dir: Optional[str] = None
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "FP32"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
@dataclass
class ExportExpConfig:
"""Export experiment config."""
verify: bool = False
opset_version: int = 12
checkpoint: Optional[str] = None
input_channel: int = 3
input_width: int = 224
input_height: int = 224
onnx_file: Optional[str] = None
results_dir: Optional[str] = None
@dataclass
class HeadConfig:
"""Head Config"""
type: str = 'LinearClsHead'
num_classes: int = 1000
in_channels: int = 448 # Mapped to differenct channels based according to the backbone used in the fan_model.py
custom_args: Optional[Dict[Any, Any]] = None
loss: Dict[Any, Any] = field(default_factory=lambda: {"type": 'CrossEntropyLoss'})
topk: List[int] = field(default_factory=lambda: [1, ])
@dataclass
class InitCfg:
"""Init Config"""
type: str = "Pretrained"
checkpoint: Optional[str] = None
prefix: Optional[str] = None # E.g., backbone
@dataclass
class BackboneConfig:
"""Configuration parameters for Backbone."""
type: str = "fan_tiny_8_p4_hybrid"
custom_args: Optional[Dict[Any, Any]] = None
@dataclass
class TrainAugCfg:
"""Arguments for Train Config"""
augments: Optional[List[Dict[Any, Any]]] = None
@dataclass
class ModelConfig:
"""Cls model config."""
type: str = "ImageClassifier"
backbone: BackboneConfig = BackboneConfig()
neck: Optional[Dict[Any, Any]] = None
head: HeadConfig = HeadConfig()
init_cfg: InitCfg = InitCfg() # No change
train_cfg: TrainAugCfg = TrainAugCfg()
@dataclass
class GenTrtEngineExpConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: Optional[str] = None
trt_engine: Optional[str] = None
input_channel: int = 3
input_width: int = 224
input_height: int = 224
opset_version: int = 12
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class ExperimentConfig:
"""Experiment config."""
model: ModelConfig = ModelConfig()
dataset: DatasetConfig = DatasetConfig()
train: TrainExpConfig = TrainExpConfig()
evaluate: EvalExpConfig = EvalExpConfig()
inference: InferenceExpConfig = InferenceExpConfig()
gen_trt_engine: GenTrtEngineExpConfig = GenTrtEngineExpConfig()
export: ExportExpConfig = ExportExpConfig()
results_dir: str = MISSING
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/mmclassification/classification_default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmclassification
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer Class for Classification."""
from mmcv.runner import build_optimizer, build_runner, DistSamplerSeedHook
from mmcv.runner.epoch_based_runner import EpochBasedRunner
from mmcv.runner.builder import RUNNERS
from mmcv.runner.checkpoint import save_checkpoint
from mmcls.core import DistEvalHook, DistOptimizerHook
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.utils import get_root_logger
from nvidia_tao_pytorch.core.mmlab.common.base_trainer import MMCVTrainer
from nvidia_tao_pytorch.core.mmlab.mmclassification.utils import load_model
import os.path as osp
@RUNNERS.register_module()
class TAOEpochBasedRunner(EpochBasedRunner):
"""TAO Epoch based runner.
Overrides mmcv.runner.epoch_based_runner.EpochBaseRunner to save checkpoints
without symlinks which requires root access.
"""
def __init__(self, *args, **kwargs):
"""Init Function."""
super(TAOEpochBasedRunner, self).__init__(*args, **kwargs)
def save_checkpoint(self, out_dir, filename_tmpl='epoch_{}.pth', save_optimizer=True,
meta=None, create_symlink=False):
"""Checkpoint saver
Args:
out_dir (str): Output dir to save checkpoints
filename_tmpl (str): Checkpoint saving template
save_optimizer (bool): Flag to whether to save optimizer states
meta (Dict): Dictionary that has the checkpoint meta variables
create_symlink (bool): Flag whether to create sym link to the latest checkpoint
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(
f'meta should be a dict or None, but got {type(meta)}')
if self.meta is not None:
meta.update(self.meta)
# Note: meta.update(self.meta) should be done before
# meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
# there will be problems with resumed checkpoints.
# More details in https://github.com/open-mmlab/mmcv/pull/1108
meta.update(epoch=self.epoch + 1, iter=self.iter)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
class MMClsTrainer(MMCVTrainer):
"""MMClassification Trainer."""
def __init__(self, *args, **kwargs):
"""Init Function."""
super(MMClsTrainer, self).__init__(*args, **kwargs)
def set_up_runner(self):
"""Function to Set Up Runner"""
# build runner
optimizer = self.train_cfg["optimizer"]
lr_config = self.train_cfg["lr_config"]
optimizer_config = self.train_cfg["optimizer_config"]
runner_config = self.train_cfg["runner"]
optimizer = build_optimizer(self.model, optimizer)
logger = get_root_logger(osp.join(self.result_dir, "INFO"))
self.runner = build_runner(
runner_config,
default_args=dict(model=self.model, batch_processor=None, optimizer=optimizer,
work_dir=self.result_dir, logger=logger, meta=self.meta))
if optimizer_config:
optimizer_config = DistOptimizerHook(**optimizer_config)
log_config = dict(interval=self.train_cfg["logging"]["interval"],
hooks=[dict(type='MMClsTaoTextLoggerHook')])
checkpoint_config = dict(interval=self.train_cfg["checkpoint_config"]["interval"])
custom_hooks = self.train_cfg["custom_hooks"]
# register hooks
self.runner.register_training_hooks(lr_config=lr_config, optimizer_config=optimizer_config,
checkpoint_config=checkpoint_config, log_config=log_config,
momentum_config=None,
custom_hooks_config=custom_hooks)
# Register Dist hook for sampler
if runner_config['type'] == 'TAOEpochBasedRunner':
self.runner.register_hook(DistSamplerSeedHook())
# an ugly walkaround to make the .log and .log.json filenames the same
self.runner.timestamp = self.timestamp
# # register eval hooks
resume_from = self.train_cfg["resume_training_checkpoint_path"]
load_from = self.train_cfg["load_from"]
if resume_from:
modified_ckpt = load_model(resume_from, return_ckpt=True)
self.runner.resume(modified_ckpt)
elif load_from:
self.runner.load_checkpoint(load_from)
return self.runner
def set_up_data_loaders(self):
"""Function to generate dataloaders"""
# prepare data loaders
dataset = self.dataset if isinstance(self.dataset, (list, tuple)) else [self.dataset]
self.data_loaders = [
build_dataloader(
ds,
self.dataset_cfg["data"]["samples_per_gpu"],
self.dataset_cfg["data"]["workers_per_gpu"],
self.cfg["train"]["num_gpus"],
dist=True,
seed=self.cfg["train"]["exp_config"]["manual_seed"],
drop_last=True) for ds in dataset
]
return self.data_loaders
def validate_runner(self):
"""Function to Add validation hook to training"""
val_dataset = build_dataset(self.dataset_cfg["data"]["val"], dict(test_mode=True))
# The specific dataloader settings
val_dataloader = dict(samples_per_gpu=self.dataset_cfg["data"]["samples_per_gpu"], workers_per_gpu=self.dataset_cfg["data"]["workers_per_gpu"])
sampler_cfg = self.dataset_cfg["sampler"]
loader_cfg = dict(num_gpus=self.cfg["train"]["num_gpus"], dist=True, seed=self.cfg["train"]["exp_config"]["manual_seed"],
round_up=True, sampler_cfg=sampler_cfg)
val_loader_cfg = {**loader_cfg, 'shuffle': False, # Not shuffle by default
'sampler_cfg': None, # Not use sampler by default
'drop_last': False, # Not drop last by default
**val_dataloader}
val_dataloader = build_dataloader(val_dataset, **val_loader_cfg)
eval_cfg = dict(interval=self.train_cfg["evaluation"]["interval"], metric="accuracy", metric_options={"topk": (1, 1)})
eval_cfg['by_epoch'] = self.train_cfg["runner"]['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook
# `EvalHook` needs to be executed after `IterTimerHook`.
# Otherwise, it will cause a bug if use `IterBasedRunner`.
# Refers to https://github.com/open-mmlab/mmcv/issues/1261
self.runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/mmclassification/classification_trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils Function"""
import os
import os.path as osp
import tempfile
import time
import torch
import torch.distributed as dist
from abc import abstractmethod
import dataclasses
from omegaconf import OmegaConf
from nvidia_tao_pytorch.core.mmlab.mmclassification.model_params_mapping import map_params
import mmcv
from mmcv.runner import get_dist_info, load_checkpoint
from mmcls.apis.test import collect_results_gpu, collect_results_cpu
from mmcls.models import build_classifier
ROOT_DIR = os.getenv("NV_TLT_PYTORCH_TOP", os.getcwd())
class MMClsConfig(object):
"""Classification Config Class to convert Hydra config to MMcls config"""
def __init__(self,
config,
phase="train"):
"""Init Function."""
self.config = dataclasses.asdict(OmegaConf.to_object(config))
self.phase = phase
self.update_config(phase=phase)
def update_config(self, phase="train"):
""" Function to update hydra config to mmlab based config"""
self.config = self.update_dataset_config(self.config)
self.config = self.update_model_config(self.config)
if phase == "train":
self.config = self.update_train_params_config(self.config)
@abstractmethod
def update_custom_args(self, cfg):
"""Function to upate any custom args"""
custom_args = cfg.get("custom_args", None)
if custom_args:
cfg.update(custom_args)
cfg.pop("custom_args")
return cfg
@abstractmethod
def assign_arch_specific_params(self, cfg, map_params, backbone_type):
"""Function to assign arch specific parameters from the PARAMS json
Args:
map_params (Dict): Dictionary that has the mapping of the various classes.
backbone_type (str): Backbone type.
"""
if cfg and map_params:
params = map_params.keys()
for param in params:
orig = cfg[param]
map_params_tmp = map_params[param]
cfg[param] = map_params_tmp.get(backbone_type, orig)
return cfg
@abstractmethod
def update_dataset_config(self, cfg):
"""Update the dataset config"""
# Update Dataset config
# Update train data pipeline
img_norm_cfg = cfg["dataset"]["img_norm_cfg"]
pipeline = cfg["dataset"]["data"]["train"]["pipeline"] # Augmentations
pipeline_updated = [dict(type='LoadImageFromFile')] + pipeline + [dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label'])]
cfg["dataset"]["data"]["train"]["pipeline"] = pipeline_updated
# Update test pipeline
test_pipeline = []
test_pipeline_tmp = cfg["dataset"]["data"]["test"]["pipeline"]
# Convert resize size to tuple fro mmcv loader
for aug in test_pipeline_tmp:
if aug["type"] == "Resize":
aug["size"] = tuple(aug["size"])
test_pipeline.append(aug)
test_pipeline_updated = [dict(type='LoadImageFromFile')] + test_pipeline + [dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])]
cfg["dataset"]["data"]["test"]["pipeline"] = test_pipeline_updated
cfg["dataset"]["data"]["val"]["pipeline"] = test_pipeline_updated
return cfg
@abstractmethod
def update_model_config(self, cfg):
"""Update the model config"""
# Update Model Config
# Head Update
# Tok should be tuple. Hydra converts it to list by default
cfg["model"]["head"]["topk"] = tuple(cfg["model"]["head"]["topk"])
# init_cfg should be removed if checkpoint is none
if not cfg["model"]["init_cfg"]["checkpoint"]:
cfg["model"].pop("init_cfg")
# Update head params from the map json
map_params_head = map_params.get("head", None)
cfg["model"]["head"] = self.assign_arch_specific_params(cfg["model"]["head"], map_params_head, cfg["model"]["backbone"]["type"])
map_params_head = map_params.get("backbone", None)
# Update backbone params from the map json
cfg["model"]["backbone"] = self.assign_arch_specific_params(cfg["model"]["backbone"], map_params_head, cfg["model"]["backbone"]["type"])
if cfg["model"]["neck"]: # Neck config is not must. Hence we do this check
map_params_neck = map_params.get("neck", None)
cfg["model"]["neck"] = self.assign_arch_specific_params(cfg["model"]["neck"], map_params_neck, cfg["model"]["backbone"]["type"])
cfg["model"]["head"] = self.update_custom_args(cfg["model"]["head"])
cfg["model"]["backbone"] = self.update_custom_args(cfg["model"]["backbone"])
return cfg
def update_train_params_config(self, cfg):
"""Update train parameters"""
# Update Train Params
paramwise_cfg = cfg["train"]["train_config"].get("paramwise_cfg", None)
if paramwise_cfg:
cfg["train"]["train_config"]["optim_cfg"].update(paramwise_cfg)
return cfg
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
# Check if tmpdir is valid for cpu_collect
if (not gpu_collect) and (tmpdir is not None and osp.exists(tmpdir)):
raise OSError((f'The tmpdir {tmpdir} already exists.',
' Since tmpdir will be deleted after testing,',
' please make sure you specify an empty one.'))
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2)
dist.barrier()
img_names = []
for _, data in enumerate(data_loader):
img_names += [f["filename"] for f in data["img_metas"].data[0]]
with torch.no_grad():
result = model(return_loss=False, **data)
if isinstance(result, list):
results.extend(result)
else:
results.append(result)
if rank == 0:
batch_size = data['img'].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results, img_names
def load_model(model_path, mmcls_config=None, return_ckpt=False):
"""Load state dict from the model path
Args:
mmcls_config (Dict): Dictionary containing MMCLs config parameters.
return_ckpt (Bool): Bool whether to return the loaded checkpoint path
Returns:
Returns the loaded model instance.
"""
temp = tempfile.NamedTemporaryFile(suffix='.pth', delete=False)
tmp_model_path = temp.name
# Remove EMA related items from the state_dict
new_state_dict = {}
checkpoint = torch.load(model_path)
for k, v in checkpoint["state_dict"].items():
if 'ema_' not in k:
new_state_dict[k] = v
checkpoint['state_dict'] = new_state_dict
torch.save(checkpoint, tmp_model_path)
if return_ckpt:
return tmp_model_path
model_to_test = build_classifier(mmcls_config["model"])
_ = load_checkpoint(model_to_test, tmp_model_path, map_location='cpu')
temp.close()
return model_to_test
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/mmlab/mmclassification/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities using the NVML library for GPU devices."""
import json
import pynvml
BRAND_NAMES = {
pynvml.NVML_BRAND_UNKNOWN: "Unknown",
pynvml.NVML_BRAND_QUADRO: "Quadro",
pynvml.NVML_BRAND_TESLA: "Tesla",
pynvml.NVML_BRAND_NVS: "NVS",
pynvml.NVML_BRAND_GRID: "Grid",
pynvml.NVML_BRAND_TITAN: "Titan",
pynvml.NVML_BRAND_GEFORCE: "GeForce",
pynvml.NVML_BRAND_NVIDIA_VAPPS: "NVIDIA Virtual Applications",
pynvml.NVML_BRAND_NVIDIA_VPC: "NVIDIA Virtual PC",
pynvml.NVML_BRAND_NVIDIA_VCS: "NVIDIA Virtual Compute Server",
pynvml.NVML_BRAND_NVIDIA_VWS: "NVIDIA RTX Virtual Workstation",
pynvml.NVML_BRAND_NVIDIA_VGAMING: "NVIDIA Cloud Gaming",
pynvml.NVML_BRAND_QUADRO_RTX: "Quadro RTX",
pynvml.NVML_BRAND_NVIDIA_RTX: "NVIDIA RTX",
pynvml.NVML_BRAND_NVIDIA: "NVIDIA",
pynvml.NVML_BRAND_GEFORCE_RTX: "GeForce RTX",
pynvml.NVML_BRAND_TITAN_RTX: "TITAN RTX",
}
class GPUDevice:
"""Data structure to represent a GPU device."""
def __init__(self, pci_bus_id,
device_name,
device_brand,
memory,
cuda_compute_capability):
"""Data structure representing a GPU device.
Args:
pci_bus_id (hex): PCI bus ID of the GPU.
device_name (str): Name of the device GPU.
device_branch (int): Brand of the GPU.
"""
self.name = device_name
self.pci_bus_id = pci_bus_id
if device_brand in BRAND_NAMES.keys():
self.brand = BRAND_NAMES[device_brand]
else:
self.brand = None
self.defined = True
self.memory = memory
self.cuda_compute_capability = cuda_compute_capability
def get_config(self):
"""Get json config of the device.
Returns
device_dict (dict): Dictionary containing data about the device.
"""
assert self.defined, "Device wasn't defined."
config_dict = {}
config_dict["name"] = self.name.decode().replace(" ", "-")
config_dict["pci_bus_id"] = self.pci_bus_id
config_dict["brand"] = self.brand
config_dict["memory"] = self.memory
config_dict["cuda_compute_capability"] = self.cuda_compute_capability
return config_dict
def __str__(self):
"""Generate a printable representation of the device."""
config = self.get_config()
data_string = json.dumps(config, indent=2)
return data_string
def pynvml_context(fn):
"""Simple decorator to setup python nvml context.
Args:
f: Function pointer.
Returns:
output of f.
"""
def _fn_wrapper(*args, **kwargs):
"""Wrapper setting up nvml context."""
try:
pynvml.nvmlInit()
return fn(*args, **kwargs)
finally:
pynvml.nvmlShutdown()
return _fn_wrapper
@pynvml_context
def get_number_gpus_available():
"""Get the number of GPU's attached to the machine.
Returns:
num_gpus (int): Number of GPUs in the machine.
"""
num_gpus = pynvml.nvmlDeviceGetCount()
return num_gpus
@pynvml_context
def get_device_details():
"""Get details about each device.
Returns:
device_list (list): List of GPUDevice objects.
"""
num_gpus = pynvml.nvmlDeviceGetCount()
device_list = list()
assert num_gpus > 0, "Atleast 1 GPU is required for TAO Toolkit to run."
for idx in range(num_gpus):
handle = pynvml.nvmlDeviceGetHandleByIndex(idx)
pci_info = pynvml.nvmlDeviceGetPciInfo(handle)
device_name = pynvml.nvmlDeviceGetName(handle)
brand_name = pynvml.nvmlDeviceGetBrand(handle)
memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
cuda_compute_capability = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
device_list.append(
GPUDevice(
pci_info.busId,
device_name,
brand_name,
memory.total,
cuda_compute_capability
)
)
return device_list
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/telemetry/nvml_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO utils for uploading telemetry data."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/telemetry/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilties to send data to the TAO Toolkit Telemetry Remote Service."""
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib
import requests
import urllib3
TELEMETRY_TIMEOUT = int(os.getenv("TELEMETRY_TIMEOUT", "30"))
def get_url_from_variable(variable, default=None):
"""Get the Telemetry Server URL."""
url = os.getenv(variable, default)
return url
def url_exists(url):
"""Check if a URL exists.
Args:
url (str): String to be verified as a URL.
Returns:
valid (bool): True/Falso
"""
url_request = urllib.request.Request(url)
url_request.get_method = lambda: 'HEAD'
try:
urllib.request.urlopen(url_request) # noqa pylint: disable=R1732
return True
except urllib.request.URLError:
return False
def get_certificates():
"""Download the cacert.pem file and return the path.
Returns:
path (str): UNIX path to the certificates.
"""
certificates_url = get_url_from_variable("TAO_CERTIFICATES_URL")
if not url_exists(certificates_url):
raise urllib.request.URLError("Url for the certificates not found.")
tmp_dir = tempfile.mkdtemp()
download_command = f"wget {certificates_url} -P {tmp_dir} --quiet"
try:
subprocess.check_call(
download_command, shell=True, stdout=sys.stdout
)
except Exception as exc:
raise urllib.request.URLError("Download certificates.tar.gz failed.") from exc
tarfile_path = os.path.join(tmp_dir, "certificates.tar.gz")
assert tarfile.is_tarfile(tarfile_path), (
"The downloaded file isn't a tar file."
)
with tarfile.open(name=tarfile_path, mode="r:gz") as tar_file:
filenames = tar_file.getnames()
for memfile in filenames:
member = tar_file.getmember(memfile)
tar_file.extract(member, tmp_dir)
file_list = [item for item in os.listdir(tmp_dir) if item.endswith(".pem")]
assert file_list, (
f"Didn't get pem files. Directory contents {file_list}"
)
return tmp_dir
def send_telemetry_data(network, action, gpu_data, num_gpus=1, time_lapsed=None, pass_status=False):
"""Wrapper to send TAO telemetry data.
Args:
network (str): Name of the network being run.
action (str): Subtask of the network called.
gpu_data (dict): Dictionary containing data about the GPU's in the machine.
num_gpus (int): Number of GPUs used in the job.
time_lapsed (int): Time lapsed.
pass_status (bool): Job passed or failed.
Returns:
No explicit returns.
"""
urllib3.disable_warnings(urllib3.exceptions.SubjectAltNameWarning)
if os.getenv('TELEMETRY_OPT_OUT', "no").lower() in ["no", "false", "0"]:
url = get_url_from_variable("TAO_TELEMETRY_SERVER")
data = {
"version": os.getenv("TAO_TOOLKIT_VERSION", "4.0.0"),
"action": action,
"network": network,
"gpu": [device["name"] for device in gpu_data[:num_gpus]],
"success": pass_status
}
if time_lapsed is not None:
data["time_lapsed"] = time_lapsed
certificate_dir = get_certificates()
cert = ('client-cert.pem', 'client-key.pem')
requests.post(
url,
json=data,
cert=tuple([os.path.join(certificate_dir, item) for item in cert]), # noqa pylint: disable=R1728
timeout=TELEMETRY_TIMEOUT
)
shutil.rmtree(certificate_dir)
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/telemetry/telemetry.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom nn modules for Torch."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/modules/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom activation modules for Torch."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/modules/activation/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Activation modules for Torch."""
import math
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from torch.nn.functional import (
dropout,
linear,
_in_projection_packed,
_in_projection,
_mha_shape_check,
softmax,
pad
)
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn import Linear, Module
from torch.nn.parameter import Parameter
from torch.overrides import (
has_torch_function, handle_torch_function
)
class NonDynamicallyQuantizableLinear(Linear):
"""Non Dynamically Quantized Linear activation.
This class exists solely to avoid triggering an obscure error when scripting
an improperly quantized attention layer. See this issue for details:
https://github.com/pytorch/pytorch/issues/58969
TODO: fail fast on quantization API usage error, then remove this class
and replace uses of it with plain Linear.
"""
def __init__(self, in_features: int, out_features: int, bias: bool = True,
device=None, dtype=None) -> None:
"""Constructor class for NonDynamicallyQuantizedLinear class."""
super().__init__(in_features, out_features, bias=bias,
device=device, dtype=dtype)
class MultiheadAttention(Module):
r"""Allows the model to jointly attend to information
from different representation subspaces as described in the paper:
`Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_.
Multi-Head Attention is defined as:
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
``forward()`` will use a special optimized implementation if all of the following
conditions are met:
- self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor. This
restriction will be loosened in the future.)
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
- training is disabled (using ``.eval()``)
- dropout is 0
- ``add_bias_kv`` is ``False``
- ``add_zero_attn`` is ``False``
- ``batch_first`` is ``True`` and the input is batched
- ``kdim`` and ``vdim`` are equal to ``embed_dim``
- at most one of ``key_padding_mask`` or ``attn_mask`` is passed
- if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
nor ``attn_mask`` is passed
If the optimized implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
``query``/``key``/``value`` to represent padding more efficiently than using a
padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
will be returned, and an additional speedup proportional to the fraction of the input
that is padding can be expected.
Examples::
>>> # xdoctest: +SKIP
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
__constants__ = ['batch_first']
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False,
kdim=None, vdim=None, batch_first=False, device=None, dtype=None) -> None:
"""Constructor for MultiHeadAttention activation.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
Default: ``False``.
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if not self._qkv_same_embed_dim:
self.q_proj_weight = Parameter(torch.empty((embed_dim, embed_dim), **factory_kwargs))
self.k_proj_weight = Parameter(torch.empty((embed_dim, self.kdim), **factory_kwargs))
self.v_proj_weight = Parameter(torch.empty((embed_dim, self.vdim), **factory_kwargs))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty((3 * embed_dim, embed_dim), **factory_kwargs))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = NonDynamicallyQuantizableLinear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
"""Set state of the module."""
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True, attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True) -> Tuple[Tensor, Optional[Tensor]]:
r"""Forward pass method for the activation module.
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
Binary and byte masks are supported.
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
the purpose of attention. For a byte mask, a non-zero value indicates that the corresponding ``key``
value will be ignored.
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
Binary, byte, and float masks are supported. For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a byte mask, a non-zero value indicates that the
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
the attention weight.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
"""
is_batched = query.dim() == 3
why_not_fast_path = ''
if not is_batched:
why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
elif query is not key or key is not value:
# When lifting this restriction, don't forget to either
# enforce that the dtypes all match or test cases where
# they don't!
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
elif self.in_proj_weight is not None and query.dtype != self.in_proj_weight.dtype:
# this case will fail anyway, but at least they'll get a useful error message.
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
elif self.training:
why_not_fast_path = "training is enabled"
elif not self.batch_first:
why_not_fast_path = "batch_first was not True"
elif self.bias_k is not None:
why_not_fast_path = "self.bias_k was not None"
elif self.bias_v is not None:
why_not_fast_path = "self.bias_v was not None"
elif self.dropout:
why_not_fast_path = f"dropout was {self.dropout}, required zero"
elif self.add_zero_attn:
why_not_fast_path = "add_zero_attn was enabled"
elif not self._qkv_same_embed_dim:
why_not_fast_path = "_qkv_same_embed_dim was not True"
elif attn_mask is not None:
why_not_fast_path = "attn_mask was not None"
elif query.is_nested and key_padding_mask is not None:
why_not_fast_path = "key_padding_mask is not supported with NestedTensor input"
if not why_not_fast_path:
tensor_args = (
query,
key,
value,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_fast_path = "some Tensor argument has_torch_function"
elif not all([(x.is_cuda or 'cpu' in str(x.device)) for x in tensor_args]):
why_not_fast_path = "some Tensor argument is neither CUDA nor CPU"
elif torch.is_grad_enabled() and any([x.requires_grad for x in tensor_args]):
why_not_fast_path = ("grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad")
if not why_not_fast_path:
return torch._native_multi_head_attention(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
key_padding_mask if key_padding_mask is not None else attn_mask,
need_weights,
average_attn_weights,
1 if key_padding_mask is not None else 0 if attn_mask is not None else None)
any_nested = query.is_nested or key.is_nested or value.is_nested
assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
f"The fast path was not hit because {why_not_fast_path}")
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = [x.transpose(1, 0) for x in (query, key)]
value = key
else:
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight, average_attn_weights=average_attn_weights)
else:
attn_output, attn_output_weights = multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, average_attn_weights=average_attn_weights)
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
return attn_output, attn_output_weights
def _scaled_dot_product_attention(
q: Tensor,
k: Tensor,
v: Tensor,
attn_mask: Optional[Tensor] = None,
dropout_p: float = 0.0,
) -> Tuple[Tensor, Tensor]:
r"""
Computes scaled dot product attention on query, key and value tensors, using
an optional attention mask if passed, and applying dropout if a probability
greater than 0.0 is specified.
Returns a tensor pair containing attended values and attention weights.
Args:
q, k, v: query, key and value tensors. See Shape section for shape details.
attn_mask: optional tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dropout_p: dropout probability. If greater than 0.0, dropout is applied.
Shape:
- q: :math:`(B, Nt, E)` where B is batch size, Nt is the target sequence length,
and E is embedding dimension.
- key: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- value: :math:`(B, Ns, E)` where B is batch size, Ns is the source sequence length,
and E is embedding dimension.
- attn_mask: either a 3D tensor of shape :math:`(B, Nt, Ns)` or a 2D tensor of
shape :math:`(Nt, Ns)`.
- Output: attention values have shape :math:`(B, Nt, E)`; attention weights
have shape :math:`(B, Nt, Ns)`
"""
_, _, E = q.shape
q = q / math.sqrt(E)
# (B, Nt, E) x (B, E, Ns) -> (B, Nt, Ns)
if attn_mask is not None:
attn = torch.baddbmm(attn_mask, q, k.transpose(-2, -1))
else:
attn = torch.bmm(q, k.transpose(-2, -1))
attn = softmax(attn, dim=-1)
if dropout_p > 0.0:
attn = dropout(attn, p=dropout_p)
# (B, Nt, Ns) x (B, Ns, E) -> (B, Nt, E)
output = torch.bmm(attn, v)
return output, attn
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Optional[Tensor],
in_proj_bias: Optional[Tensor],
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Optional[Tensor],
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
average_attn_weights: bool = True,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads.
Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect
when ``need_weights=True.``. Default: True
Shape:
Inputs:
- query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns
attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`.
"""
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
average_attn_weights=average_attn_weights,
)
is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
# For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
# is batched, run the computation and before returning squeeze the
# batch dimension so that the output doesn't carry this temporary batch dimension.
if not is_batched:
# unsqueeze if the input is unbatched
query = query.unsqueeze(1)
key = key.unsqueeze(1)
value = value.unsqueeze(1)
if key_padding_mask is not None:
key_padding_mask = key_padding_mask.unsqueeze(0)
# set up shape vars
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
assert embed_dim == embed_dim_to_check, \
f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
if isinstance(embed_dim, torch.Tensor):
# embed_dim can be a tensor when JIT tracing
head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
else:
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
if use_separate_proj_weight:
# allow MHA to have different embedding dimensions when separate projection weights are used
assert key.shape[:2] == value.shape[:2], \
f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
else:
assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
#
# compute in-projection
#
if not use_separate_proj_weight:
assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
else:
assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
if in_proj_bias is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = in_proj_bias.chunk(3)
q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
# prep attention mask
if attn_mask is not None:
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
else:
assert attn_mask.is_floating_point() or attn_mask.dtype == torch.bool, \
f"Only float, byte, and bool types are supported for attn_mask, not {attn_mask.dtype}"
# ensure attn_mask's dim is 3
if attn_mask.dim() == 2:
correct_2d_size = (tgt_len, src_len)
if attn_mask.shape != correct_2d_size:
raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
if attn_mask.shape != correct_3d_size:
raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
else:
raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
# prep key padding mask
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
# add bias along batch dimension (currently second)
if bias_k is not None and bias_v is not None:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert bias_k is None
assert bias_v is None
#
# reshape q, k, v for multihead attention and make em batch first
#
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is None:
k = k.contiguous().view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert static_k.size(0) == bsz * num_heads, \
f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
assert static_k.size(2) == head_dim, \
f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
k = static_k
if static_v is None:
v = v.contiguous().view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
else:
# TODO finish disentangling control flow so we don't do in-projections when statics are passed
assert static_v.size(0) == bsz * num_heads, \
f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
assert static_v.size(2) == head_dim, \
f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
v = static_v
# add zero attention along batch dimension (now first)
if add_zero_attn:
zero_attn_shape = (bsz * num_heads, 1, head_dim)
k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
# update source sequence length after adjustments
src_len = k.size(1)
# merge key padding and attention masks
if key_padding_mask is not None:
assert key_padding_mask.shape == (bsz, src_len), \
f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
if attn_mask is None:
attn_mask = key_padding_mask
elif attn_mask.dtype == torch.bool:
attn_mask = attn_mask.logical_or(key_padding_mask)
else:
attn_mask = attn_mask.masked_fill(key_padding_mask, float("-inf"))
# convert mask to float
if attn_mask is not None and attn_mask.dtype == torch.bool:
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
attn_mask = new_attn_mask
# adjust dropout probability
if not training:
dropout_p = 0.0
#
# (deep breath) calculate attention and out projection
#
attn_output, attn_output_weights = _scaled_dot_product_attention(q, k, v, attn_mask, dropout_p)
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
if need_weights:
# optionally average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
if average_attn_weights:
attn_output_weights = attn_output_weights.sum(dim=1) / num_heads
if not is_batched:
# squeeze the output if input was unbatched
attn_output = attn_output.squeeze(1)
attn_output_weights = attn_output_weights.squeeze(0)
return attn_output, attn_output_weights
if not is_batched:
# squeeze the output if input was unbatched
attn_output = attn_output.squeeze(1)
return attn_output, None
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/modules/activation/activation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing vocabulary cookbook for PyTorch models."""
import operator
# Import the required libraries.
from dataclasses import dataclass
from eff.validator.conditions import Expression
from nvidia_tao_pytorch.core.cookbooks.cookbook import Cookbook
__all__ = ["VocabularyCondition", "VocabularyCookbook"]
@dataclass
class VocabularyCondition:
"""Condition used to activate the recipe that restores a vocab from archive.
Expressions that must be satisfied: EFF format version (=1), dict_archive_format (=1).
"""
format_version: Expression = Expression(operator.eq, 1)
vocab_archive_format: Expression = Expression(operator.eq, 1)
class VocabularyCookbook(Cookbook):
""" Class providing recipes for storing/restoring dicts. """
@classmethod
def to_csv(cls, obj: list) -> str:
"""
Serializes dictionary to "csv" string: one word per line, no commas.
Args:
obj: vocabulary (list)
Returns:
"Serialized" list.
"""
return "\n".join(obj)
@classmethod
def restore_from(cls, restore_path: str, filename: str) -> dict:
"""Restores and deserializes vocabulary.
Assumes simple format: one word per line.
Args:
archive_path: Path to the archive.
filename: Name of the file in the archive.
Returns:
Dictionary
"""
# @zeyuz: must process NeMo prefix
if 'nemo:' in restore_path:
restore_path = restore_path.split('nemo:')[1]
# Try to get the content of the file.
content = cls.get_file_content(archive_path=restore_path, filename=filename)
# Return vocabulary - list of words.
return content.split("\n")
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/cookbooks/vocabulary_cookbook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing generic cookbook for PyTorch models."""
import operator
from dataclasses import dataclass
from eff.core import Archive
from eff.validator.conditions import Expression
from eff.validator.validator import validate_metadata
from nvidia_tao_pytorch.core.cookbooks.cookbook import Cookbook
_TLT_PYTORCH_MODEL = "tlt_pytorch_model.pth"
@dataclass
class TLTPyTorchCheckpointCondition:
"""Condition used to activate the recipes able to save/restore full model (metadata, model_checkpoints, config)"""
# EFF format version
format_version: Expression = Expression(operator.eq, 1)
# Version of the TLT archive
tlt_archive_version: Expression = Expression(operator.eq, 1)
# Runtime that can execute the model stored in the archive
runtime: Expression = Expression(operator.eq, 'PyTorch')
# indicate that if the model_checkpoints can be used to resume training
resume_training: bool = True
@dataclass
class TLTPyTorchPretrainCondition:
"""Condition used to activate the recipes able to save/restore full model (metadata, model_checkpoints, config)"""
# EFF format version
format_version: Expression = Expression(operator.eq, 1)
# Version of the TLT archive
tlt_archive_version: Expression = Expression(operator.eq, 1)
# Runtime that can execute the model stored in the archive
runtime: Expression = Expression(operator.eq, 'PyTorch')
# indicate that if the model_checkpoints can be used to resume training
resume_training: bool = False
class TLTPyTorchCookbook(Cookbook):
""" Class providing recipes for storing/restoring TLT-PyTorch models. """
def save_checkpoint_to(self, ckpt, save_path: str, force: bool = True):
"""
Saves model instance (weights and configuration) into an EFF archive.
Method creates an EFF-based file that is an archive (tar.gz) with the following:
manifest.yaml - yaml file describing the content of the archive.
model_ckpt.pth - model checkpoint
..note:
For NVIDIA TLT the EFF archives will use .tlt postfix.
Args:
save_path: Path to archive file where model instance should be saved.
force: Setting to True enables to overwrite the existing properties/files with the ones coming from
class (DEFAULT:True)
"""
# Create EFF archive. Set some standard fields, most importantly:
# * obj_cls - fully classified class name (with modules).
with Archive.create(
save_path=save_path,
encryption_key=TLTPyTorchCookbook.get_passphrase(),
origin='TLT',
runtime='PyTorch',
tlt_archive_version=1,
resume_training=True,
) as effa:
# Add additional metadata stored by the TLT PyTorch class.
effa.add_metadata(force=force, **self.class_metadata) # pylint: disable=not-a-mapping
# Add model weights to archive - encrypt when the encryption key is provided.
model_ckpt_file = effa.create_file_handle(
name=_TLT_PYTORCH_MODEL,
description="File containing model weights and states to resume training",
encrypted=(TLTPyTorchCookbook.get_passphrase() is not None),
)
import torch
# Save models state using torch save.
torch.save(ckpt, model_ckpt_file)
if not validate_metadata(TLTPyTorchCheckpointCondition, metadata=effa.metadata):
raise TypeError("Archive doesn't have the required format, version or object class type")
def save_pretrain_to(self, model_state_dict, save_path: str, force: bool = True):
"""
Saves model instance (weights and configuration) into an EFF archive.
Method creates an EFF-based file that is an archive (tar.gz) with the following:
manifest.yaml - yaml file describing the content of the archive.
model_weights.pth - model weights only
..note:
For NVIDIA TLT the EFF archives will use .tlt postfix.
Args:
save_path: Path to archive file where model instance should be saved.
force: Setting to True enables to overwrite the existing properties/files with the ones coming from
class (DEFAULT:True)
"""
# Create EFF archive. Set some standard fields, most importantly:
# * obj_cls - fully classified class name (with modules).
with Archive.create(
save_path=save_path,
encryption_key=TLTPyTorchCookbook.get_passphrase(),
origin='TLT',
runtime='PyTorch',
tlt_archive_version=1,
resume_training=False,
) as effa:
# Add additional metadata stored by the TLT PyTorch class.
effa.add_metadata(force=force, **self.class_metadata) # pylint: disable=not-a-mapping
# Add model weights to archive - encrypt when the encryption key is provided.
model_file = effa.create_file_handle(
name=_TLT_PYTORCH_MODEL,
description="File containing only model weights",
encrypted=(TLTPyTorchCookbook.get_passphrase() is not None),
)
import torch
# Save models state using torch save.
torch.save(model_state_dict, model_file)
if not validate_metadata(TLTPyTorchPretrainCondition, metadata=effa.metadata):
raise TypeError("Archive doesn't have the required format, version or object class type")
def restore_from_ckpt(
self,
restore_path: str,
):
"""
Restores model checkpoint from EFF Archive.
..note:
For NVIDIA TLT the EFF archives will use .tlt postfix.
Args:
restore_path: path to file from which model should be instantiated
Returns:
model checkpoint
"""
# Restore the archive.
with Archive.restore_from(
restore_path=restore_path, passphrase=TLTPyTorchCookbook.get_passphrase()
) as restored_effa:
# Validate the indicated archive using the conditions associated with this recipe.
if not validate_metadata(TLTPyTorchCheckpointCondition, metadata=restored_effa.metadata):
raise TypeError("Archive doesn't have the required runtime, format, version or resume training status")
# Restore the model checkpoint.
import torch
model_ckpt_file, _ = restored_effa.retrieve_file_handle(name=_TLT_PYTORCH_MODEL)
model_ckpt = torch.load(model_ckpt_file)
return model_ckpt
def restore_from_pretrain(
self,
restore_path: str,
):
"""
Restores model pretrain from EFF Archive.
..note:
For NVIDIA TLT the EFF archives will use .tlt postfix.
Args:
restore_path: path to file from which model should be instantiated
Returns:
model checkpoint
"""
# Restore the archive.
with Archive.restore_from(
restore_path=restore_path, passphrase=TLTPyTorchCookbook.get_passphrase()
) as restored_effa:
# Validate the indicated archive using the conditions associated with this recipe.
if not validate_metadata(TLTPyTorchPretrainCondition, metadata=restored_effa.metadata):
raise TypeError("Archive doesn't have the required runtime, format, version or resume training status")
# Restore the model checkpoint.
import torch
model_state_dict_file, _ = restored_effa.retrieve_file_handle(name=_TLT_PYTORCH_MODEL)
model_state_dict = torch.load(model_state_dict_file)
return model_state_dict
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/cookbooks/tlt_pytorch_cookbook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing definitions of all cookbooks for TAO Toolkit."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/cookbooks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File containing cookbook base class and methods"""
import os
from typing import Union
from eff.callbacks import BinaryContentCallback, StringContentCallback
from eff.core.archive import Archive
from eff.utils.object_class import generate_obj_cls
from eff.validator.validator import validate_metadata
from ruamel.yaml import YAML, yaml_object
__all__ = ['Cookbook', 'ArtifactPathType']
yaml = YAML()
@yaml_object(yaml)
class ArtifactPathType():
"""
ArtifactPathType refers to the type of the path that the artifact is located at.
LOCAL_PATH: A user local filepath that exists on the file system.
TAR_PATH: A (generally flattened) filepath that exists inside of an archive (that may have its own full path).
"""
LOCAL_PATH = 'LOCAL_PATH'
TAR_PATH = 'TAR_PATH'
class classproperty(object):
"""Helper class, defining a combined classmethod+property decorator """
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
class Cookbook(object):
"""Parent, abstract cookbook class.
This class cannot be used as is. You will need to derive a class for your application.
"""
def __init_subclass__(cls, **kwargs):
""" Adds required properties to the (sub)classes. """
# Class attribute: a phrase used to generate encryption/decryption key.
cls._passphrase = None
# Class attribute: additional medatata that will be added to any instantiated object of that class.
cls._class_metadata = {}
# Class attribute: additional files that will be added to any instantiated object of that class.
cls._class_file_content = {}
@classmethod
def get_passphrase(cls):
"""Property getter, returning the passphrase (scope: given class)."""
return cls._passphrase
@classmethod
def set_passphrase(cls, passphrase: str):
"""Property setter, setting the passphrase (scope: given class).
Args:
passphrase (str): a phrase used to generate encryption/decryption key.
"""
cls._passphrase = passphrase
@classmethod
def add_class_metadata(cls, force: bool = False, **metadata) -> None:
"""
Method responsible for adding a new key-value pairs to class metadata.
Those pairs will be next added to every instance of the class, so then can be used in recipes, e.g.
added to every created archive.
Args:
force: Setting to True enables to overwrite the values for existing keys(DEFAULT:False)
metadata: keyword args (key-value pairs) that will be added to metadata.
Raises:
KeyError when variable with a given key is already present in the cookbook metadata (unless forced=True)
"""
# if self._state != Archive.States.SAVE_INIT:
# raise xxx
# Iterate through named arguments one by one.
for key, value in metadata.items():
# Handle the key duplicate.
if not force and key in cls._class_metadata.keys():
raise KeyError("Variable `{}` already present in class metadata".format(key))
# Add argument to metadata.
cls._class_metadata[key] = value
@classproperty
def class_metadata(cls):
"""
Property getter for class_metadata.
Returns:
Class metadata.
"""
return cls._class_metadata
@classmethod
def add_class_file_content(
cls,
name: str,
content: str,
description: str,
encryption: Union[bool, str] = False,
binary: bool = False,
**properties
) -> str:
"""
Method responsible for adding new file (virual file, i.e. "name" with content and additional properties) to class.
Those files will be passed to Archive on save.
Args:
name: Name of the file (can be relative/absolute path).
content: Content of the file.
description: Description of the content of the file
encryption: flag indicating whether file will be encrypted or not.
binary: flag indicating whether file will be binary or text (DEFAULT: False).
properties: list of additional named params that will be added to file properties.
Raises:
KeyError when file with a given name is already present in the archive.
"""
# Use file name as key (not the whole path).
_, file_key = os.path.split(name)
# Handle the key duplicate.
if name in cls._class_file_content.keys():
raise KeyError("File `{}` already present in the cookbook".format(name))
# Add "default" file properties.
props = {"description": description, "encryption": encryption, "binary": binary}
# Iterate throught the additional properties.
for key, value in properties.items():
props[key] = value
# Set properties.
cls._class_file_content[file_key] = (content, props)
def add_class_file_properties(self, name: str, force: bool = False, **properties) -> None:
"""
Method responsible for adding a new key-value pairs to a given class file properties.
Args:
name: Name of the file (can be relative/absolute path).
force: Setting to True enables to overwrite the values for existing keys (DEFAULT:False)
properties: list of additional named params that will be added to file properties.
Raises:
KeyError: when file with a name is not present in the archive.
KeyError: when property with a given name is already present in file properties.
"""
# if self._state != Archive.States.SAVE_INIT:
# raise xxx
# Use file name as key (not the whole path).
_, file_key = os.path.split(name)
# Check if file exists.
if file_key not in self._class_file_content.keys():
raise KeyError("Class file `{}` not present in the archive".format(file_key))
# Iterate through named arguments one by one.
for key, value in properties.items():
# Handle the key duplicate.
if not force and key in self._class_file_content[file_key][1].keys():
raise KeyError("Variable `{}` already present in file `{}` properties".format(key, file_key))
# Add/update properties.
self._class_file_content[file_key][1][key] = value
@classproperty
def class_file_content(cls):
"""
Property getter for class_file_content.
Returns:
Class dict with "files with content", key = filename: (content, properies).
"""
return cls._class_file_content
@classmethod
def validate_archive(cls, restore_path: str, *v_conditions, obj_cls=None, **kv_conditions) -> bool:
"""Opens the indicated archive and tries to validate it by comparing the metadata agains the provided conditions.
Args:
restore_path: Path to the file/archive to be validated.
obj_cls: Object class, if not None, it will be used as additional condition (DEFAULT: None)
v_conditions: Conditions.
kv_conditions: List of named conditions (key-values, key-Expressions)
Returns:
True if all conditions are fullfilled.
"""
# Extend key-value conditions.
conds = kv_conditions
if obj_cls is not None:
# Add target class to be validated.
conds["obj_cls"] = generate_obj_cls(obj_cls)
# Try to retrieve the manifest from the archive.
manifest = Archive.restore_manifest(restore_path=restore_path)
# Validate metadata using the provided conditions.
return validate_metadata(*v_conditions, metadata=manifest["metadata"], **conds)
@classmethod
def get_metadata(cls, archive_path: str):
"""
Elementary class method enabling the user to access metadata of the existing archive.
Args:
archive_path: Path to the archive.
Returns:
Dictionary showing the current content of the metadata object.
Note changes to this object won't affect the original metadata.
"""
# Open the archive, using the class encryption key.
with Archive.restore_from(restore_path=archive_path, passphrase=cls.get_passphrase()) as effa:
return effa.metadata
@classmethod
def get_files(cls, archive_path: str, **filter_properties):
"""
Elementary class method enabling the user to access list of files of the existing archive.
Args:
archive_path: Path to the archive.
filter_properties: key-value pairs that will be used to filter the files.
Returns:
Dictionary showing the files, in format (filename:properties), where properties is a dictionary, containing
file properties, starting from `description`.
"""
# Open the archive, using the class encryption key.
with Archive.restore_from(restore_path=archive_path, passphrase=cls.get_passphrase()) as effa:
return effa.artifacts.filter(filter_properties)
@classmethod
def get_file_properties(cls, archive_path: str, filename: str):
"""
Elementary class method enabling the user to access properties of a given file in the existing archive.
Args:
archive_path: Path to the archive.
filename: Name of the file in the archive.
Returns:
file properties, as dict of (key:value) pairs.
"""
# Open the archive, using the class encryption key.
with Archive.restore_from(restore_path=archive_path, passphrase=cls.get_passphrase()) as effa:
# Retrieve the "properties" of the file in archive.
return effa.artifacts[filename].properties
@classmethod
def get_file_content(cls, archive_path: str, filename: str, binary: bool = False):
"""
Elementary class method enabling the user to access content of a given file in the existing archive.
Args:
archive_path: Path to the archive.
filename: Name of the file in the archive.
binary: Flag indicating that we want to read/return the content in the binary format (DEFAULT: False).
Returns:
File content, as a "raw" string or bytes (depending on binary).
Raises:
UnicodeDecodeError: When trying to return the binary file in the text form.
"""
# Open the archive, using the class encryption key.
with Archive.restore_from(restore_path=archive_path, passphrase=cls.get_passphrase()) as effa:
if binary:
content_callback = BinaryContentCallback()
else:
content_callback = StringContentCallback()
# Return the content.
return effa.artifacts[filename].get_content(content_callback=content_callback)
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/cookbooks/cookbook.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hydra runner."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/hydra/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Original source taken from https://github.com/NVIDIA/NeMo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class to work with hydra config files."""
import functools
import os
import sys
from typing import Any, Callable, Optional
from hydra._internal.utils import _run_hydra, get_args_parser
from hydra.core.config_store import ConfigStore
from hydra.types import TaskFunction
from omegaconf import DictConfig
def hydra_runner(
config_path: Optional[str] = ".", config_name: Optional[str] = None, schema: Optional[Any] = None
) -> Callable[[TaskFunction], Any]:
"""
Decorator used for passing the Config paths to main function.
Optionally registers a schema used for validation/providing default values.
Args:
config_path: Optional path that will be added to config search directory.
NOTE: The default value of `config_path` has changed between Hydra 1.0 and Hydra 1.1+.
Please refer to https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_hydra_main_config_path/
for details.
config_name: Pathname of the config file.
schema: Structured config type representing the schema used for validation/providing default values.
"""
def decorator(task_function: TaskFunction) -> Callable[[], None]:
@functools.wraps(task_function)
def wrapper(cfg_passthrough: Optional[DictConfig] = None) -> Any:
# Check it config was passed.
if cfg_passthrough is not None:
return task_function(cfg_passthrough)
args = get_args_parser()
# Parse arguments in order to retrieve overrides
parsed_args = args.parse_args()
# Get overriding args in dot string format
overrides = parsed_args.overrides # type: list
# Disable the creation of .hydra subdir
# https://hydra.cc/docs/tutorials/basic/running_your_app/working_directory
overrides.append("hydra.output_subdir=null")
# Hydra logging outputs only to stdout (no log file).
# https://hydra.cc/docs/configure_hydra/logging
overrides.append("hydra/job_logging=stdout")
# Set run.dir ONLY for ExpManager "compatibility" - to be removed.
overrides.append("hydra.run.dir=.")
# Check if user set the schema.
if schema is not None:
# Create config store.
cs = ConfigStore.instance()
# Get the correct ConfigStore "path name" to "inject" the schema.
if parsed_args.config_name is not None:
path, name = os.path.split(parsed_args.config_name)
# Make sure the path is not set - as this will disable validation scheme.
if path != '':
sys.stderr.write(
"ERROR Cannot set config file path using `--config-name` when "
"using schema. Please set path using `--config-path` and file name using "
"`--config-name` separately.\n"
)
sys.exit(1)
else:
name = config_name
# Register the configuration as a node under the name in the group.
cs.store(name=name, node=schema) # group=group,
# Wrap a callable object with name `parse_args`
# This is to mimic the ArgParser.parse_args() API.
def parse_args(self, args=None, namespace=None):
return parsed_args
# Overwriting the default definition of parse_args
# function in argparse.Namespace.
parsed_args.parse_args = parse_args
# no return value from run_hydra() as it may sometime actually run the task_function
# multiple times (--multirun)
# argparse_wrapper = _argparse_wrapper(args)
argparse_wrapper = parsed_args
_run_hydra(
args=argparse_wrapper,
args_parser=args,
task_function=task_function,
config_path=config_path,
config_name=config_name,
)
return wrapper
return decorator
| tao_pytorch_backend-main | nvidia_tao_pytorch/core/hydra/hydra_runner.py |
# Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
"""EULA for TAO PYTORCH."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_pytorch_backend-main | nvidia_tao_pytorch/license/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version string for the PyTorch pruning."""
__version__ = "0.2.7"
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Root module for PyTorch model pruning."""
from nvidia_tao_pytorch.pruning.version import __version__ # noqa: F401
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Torch Pruning module."""
from .dependency import * # noqa: F401, F403
from .prune import * # noqa: F401, F403
from . import utils, prune_config # noqa: F401
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/torch_pruning/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for pruning."""
from .dependency import TORCH_CONV, TORCH_BATCHNORM, TORCH_PRELU, TORCH_LINEAR
from . import prune
import torch
def count_prunable_params(module):
"""Count prunable parameters."""
if isinstance(module, (TORCH_CONV, TORCH_LINEAR)):
num_params = module.weight.numel()
if module.bias is not None:
num_params += module.bias.numel()
return num_params
if isinstance(module, TORCH_BATCHNORM):
num_params = module.running_mean.numel() + module.running_var.numel()
if module.affine:
num_params += module.weight.numel() + module.bias.numel()
return num_params
if isinstance(module, TORCH_PRELU):
if len(module.weight) == 1:
return 0
return module.weight.numel
return 0
def count_prunable_channels(module):
"""Count prunable channels."""
if isinstance(module, TORCH_CONV):
return module.weight.shape[0]
if isinstance(module, TORCH_LINEAR):
return module.out_features
if isinstance(module, TORCH_BATCHNORM):
return module.num_features
if isinstance(module, TORCH_PRELU):
if len(module.weight) == 1:
return 0
return len(module.weight)
return 0
def count_params(module):
"""Count parameters."""
return sum([p.numel() for p in module.parameters()])
def ln_normalize_scores(weight, p=2):
"""Compute ln cumsum-normalized socres
Args:
weight (dict): weights of a torch module
p (int, optional): 1 for l1 norm, 2 for l2 norm. Defaults to 2
Returns:
scores: normalized ln scores
"""
# compute l2 norm of each output filter:
scores = torch.norm(weight.view(len(weight), -1), p=p, dim=1)
# sort scores in an ascending order
sorted_scores, sorted_idx = scores.view(-1).sort(descending=False)
# compute cumulative sum
scores_cumsum_temp = sorted_scores.cumsum(dim=0)
scores_cumsum = torch.zeros(scores_cumsum_temp.shape, device=scores.device)
scores_cumsum[1:] = scores_cumsum_temp[:len(scores_cumsum_temp) - 1]
# normalize by cumulative sum
sorted_scores /= (scores.sum() - scores_cumsum)
# tidy up and output
new_scores = torch.zeros(scores_cumsum.shape, device=scores.device)
new_scores[sorted_idx] = sorted_scores
return new_scores.view(scores.shape)
def get_global_thresh(model, prune_ratio, prunable_list=[torch.nn.Conv2d], p=2):
"""Get global thresh and importance socres of modules to be pruned
Args:
model (torch.module): Model to be pruned
prunable_list (list of torch.module): basic module to be pruned
Returns:
global_thresh (float): threshold for pruning
module2scores (dict): dict mapping module to the corresponding scores.
"""
total_scores = []
module2scores = {}
total_filters = 0
for _, m in model.named_modules():
if isinstance(m, tuple(prunable_list)):
scores = ln_normalize_scores(m.weight, p=p)
total_scores.append(scores)
module2scores[m] = scores
total_filters += len(m.weight)
concat_scores = torch.cat(total_scores, dim=0)
topks, _ = torch.topk(concat_scores, int(total_filters * (1 - prune_ratio)))
global_thresh = topks[-1]
return global_thresh, module2scores
def execute_custom_score_prune(model,
global_thresh,
module2scores,
dep_graph,
granularity=8,
prunable_list=[torch.nn.Conv2d],
excluded_layers=[],
merged_sets=None):
"""Execute pruning algorithm
Args:
model (nn.Module): The model to be pruned
global_thresh (float): the threshold to prune the model
module2scores (Dict[string, list[float]]): the dict mapping module to its pruning scores
dep_graph : DependenecyGraph of the model
granularity (int, optional): the pruning granularity. The number of pruned channels should be divisible by the granularity. Defautlts to 8
prunable_list (list, optional): the list of module that will be pruned. Defaults to [torch.nn.Conv2d]
excluded_layers (list, optional): the layers will not be pruned. Defaults to []
merged_sets (list, optional): . Defaults to None.
"""
pruned_module = set()
strategy = prune.strategy.CustomScoreStrategy()
for _, m in model.named_modules():
if isinstance(m, tuple(prunable_list)) and m not in excluded_layers and m not in pruned_module:
if m in merged_sets:
pruned_module.add(m)
score_list = []
score_list.append(module2scores[m])
merged_set = merged_sets[m]
for dep_m in merged_set:
score_list.append(module2scores[dep_m])
pruned_module.add(dep_m)
scores = torch.max(torch.stack(score_list), dim=0).values
merged_idxs = strategy(scores=scores, thresh=global_thresh, round_to=granularity)
else:
merged_idxs = strategy(scores=module2scores[m], thresh=global_thresh, round_to=granularity)
if isinstance(m, TORCH_CONV):
prune_func = prune.prune_conv
elif isinstance(m, TORCH_LINEAR):
prune_func = prune.prune_linear
pruning_plan = dep_graph.get_pruning_plan(m, prune_func, idxs=merged_idxs)
pruning_plan.exec()
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/torch_pruning/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dependency of nodes in the graph."""
import torch
import torch.nn as nn
import typing
from . import prune
from enum import IntEnum
from numbers import Number
__all__ = ['PruningPlan', 'Dependency', 'DependencyGraph']
TORCH_CONV = nn.modules.conv._ConvNd
TORCH_BATCHNORM = nn.modules.batchnorm._BatchNorm
TORCH_PRELU = nn.PReLU
TORCH_LINEAR = nn.Linear
class OPTYPE(IntEnum):
"""OP Type."""
CONV = 0
BN = 1
LINEAR = 2
PRELU = 3
GROUP_CONV = 4
CONCAT = 5
SPLIT = 6
CUSTOMIZED = 7
ELEMENTWISE = 8
def _get_module_type(module): # noqa pylint: disable=R0911
if isinstance(module, TORCH_CONV):
if module.groups > 1:
return OPTYPE.GROUP_CONV
return OPTYPE.CONV
if isinstance(module, TORCH_BATCHNORM):
return OPTYPE.BN
if isinstance(module, TORCH_PRELU):
return OPTYPE.PRELU
if isinstance(module, TORCH_LINEAR):
return OPTYPE.LINEAR
if isinstance(module, _ConcatOp):
return OPTYPE.CONCAT
if isinstance(module, _SplitOp):
return OPTYPE.SPLIT
if isinstance(module, _CustomizedOp):
return OPTYPE.CUSTOMIZED
return OPTYPE.ELEMENTWISE
def _get_node_out_channel(node): # noqa pylint: disable=R0911
if node.type == OPTYPE.CONV or node.type == OPTYPE.GROUP_CONV:
return node.module.out_channels
if node.type == OPTYPE.BN:
return node.module.num_features
if node.type == OPTYPE.LINEAR:
return node.module.out_features
if node.type == OPTYPE.PRELU:
if node.module.num_parameters == 1:
return None
return node.module.num_parameters
if node.type == OPTYPE.CUSTOMIZED:
return node.customized_op_fn['get_out_ch_fn'](node.module)
return None
def _get_node_in_channel(node): # noqa pylint: disable=R0911
if node.type == OPTYPE.CONV or node.type == OPTYPE.GROUP_CONV:
return node.module.in_channels # noqa pylint: disable=R0911
if node.type == OPTYPE.BN:
return node.module.num_features # noqa pylint: disable=R0911
if node.type == OPTYPE.LINEAR:
return node.module.in_features # noqa pylint: disable=R0911
if node.type == OPTYPE.PRELU:
if node.module.num_parameters == 1:
return None # noqa pylint: disable=R0911
return node.module.num_parameters # noqa pylint: disable=R0911
if node.type == OPTYPE.CUSTOMIZED:
return node.customized_op_fn['get_in_ch_fn'](node.module) # noqa pylint: disable=R0911
return None
# Dummy Pruning fn
def _prune_concat(layer, *args, **kargs):
return layer, 0
def _prune_split(layer, *args, **kargs):
return layer, 0
def _prune_elementwise_op(layer, *args, **kargs):
return layer, 0
class _CustomizedOp(nn.Module):
"""Custom op."""
def __init__(self, op_class):
"""Initialize."""
self.op_cls = op_class
def __repr__(self):
"""Format to string."""
return "CustomizedOp(%s)" % (str(self.op_cls))
# Dummy module
class _ConcatOp(nn.Module):
def __init__(self):
super(_ConcatOp, self).__init__()
self.offsets = None
def __repr__(self):
return "_ConcatOp(%s)" % (self.offsets)
class _SplitOp(nn.Module):
def __init__(self):
super(_SplitOp, self).__init__()
self.offsets = None
def __repr__(self):
return "_SplitOp(%s)" % (self.offsets)
class _ElementWiseOp(nn.Module):
def __repr__(self):
return "_ElementWiseOp()"
class _FlattenIndexTransform(object):
def __init__(self, stride=1, reverse=False):
self._stride = stride
self.reverse = reverse
def __call__(self, idxs):
new_idxs = []
if self.reverse:
for i in idxs:
new_idxs.append(i // self._stride)
new_idxs = list(set(new_idxs))
else:
for i in idxs:
new_idxs.extend(list(range(i * self._stride, (i + 1) * self._stride)))
return new_idxs
class _ConcatIndexTransform(object):
def __init__(self, offset, reverse=False):
self.offset = offset
self.reverse = reverse
def __call__(self, idxs):
if self.reverse:
new_idxs = [i - self.offset[0] for i in idxs if (self.offset[0] <= i < self.offset[1])]
else:
new_idxs = [i + self.offset[0] for i in idxs]
return new_idxs
class _SplitIndexTransform(object):
def __init__(self, offset, reverse=False):
self.offset = offset
self.reverse = reverse
def __call__(self, idxs):
if self.reverse:
new_idxs = [i + self.offset[0] for i in idxs]
else:
new_idxs = [i - self.offset[0] for i in idxs if (self.offset[0] <= i < self.offset[1])]
return new_idxs
class Node(object):
"""Node."""
def __init__(self, module, grad_fn, node_name=None):
"""Initialize."""
self.module = module
self.grad_fn = grad_fn
self.inputs = []
self.outputs = []
self.dependencies = []
self._node_name = node_name
self.type = _get_module_type(module)
@property
def node_name(self):
"""Node name."""
return "%s (%s)" % (self._node_name, str(self.module)) if self._node_name is not None else str(self.module)
def add_input(self, node):
"""Add input."""
if node not in self.inputs:
self.inputs.append(node)
def add_output(self, node):
"""Add output."""
if node not in self.outputs:
self.outputs.append(node)
def __repr__(self):
"""Format to string."""
return "<Node: (%s, %s)>" % (self.node_name, self.grad_fn)
def __str__(self):
"""Convert to string."""
return "<Node: (%s, %s)>" % (self.node_name, self.grad_fn)
def details(self):
"""Details."""
fmt = "<Node: (%s, %s)>\n" % (self.node_name, self.grad_fn)
fmt += ' ' * 4 + 'IN:\n'
for in_node in self.inputs:
fmt += ' ' * 8 + '%s\n' % (in_node)
fmt += ' ' * 4 + 'OUT:\n'
for out_node in self.outputs:
fmt += ' ' * 8 + '%s\n' % (out_node)
fmt += ' ' * 4 + 'DEP:\n'
for dep in self.dependencies:
fmt += ' ' * 8 + "%s\n" % (dep)
return fmt
class Dependency(object):
"""Graph Dependency."""
def __init__(self, trigger, handler, broken_node: Node, index_transform: typing.Callable = None):
""" Layer dependency in structed neural network pruning.
Parameters:
trigger (Callable or None): a pruning function which will break the dependency
handler (Callable): a pruning function to fix the broken dependency
broken_node (nn.Module): the broken layer
"""
self.trigger = trigger
self.handler = handler
self.broken_node = broken_node
self.index_transform = index_transform
def __call__(self, idxs: list, dry_run: bool = False):
"""Call method."""
result = self.handler(self.broken_node.module, idxs, dry_run=dry_run)
return result
def __repr__(self):
"""Format to string."""
return str(self)
def __str__(self):
"""Convert to string."""
return "<DEP: %s => %s on %s>" % ("None" if self.trigger is None else self.trigger.__name__, self.handler.__name__, self.broken_node.node_name)
def is_triggered_by(self, pruning_fn):
"""Is triggered by."""
return pruning_fn == self.trigger
def __eq__(self, other):
"""Equals to."""
return ((self.trigger == other.trigger) and
self.handler == other.handler and
self.broken_node == other.broken_node)
class PruningPlan(object):
""" Pruning plan.
Args:
dry_run (Callable or None): only return the info about pruning.
module_to_name (dict): mapping nn.module to a readable name. It will be filled by DependencyGraph.
"""
def __init__(self):
"""Initialize."""
self._plans = list()
def add_plan(self, dep, idxs):
"""Add plan."""
self._plans.append((dep, idxs))
@property
def plan(self):
"""Plan"""
return self._plans
def exec(self, dry_run=False):
"""Execute plan."""
num_pruned = 0
for dep, idxs in self._plans:
_, n = dep(idxs, dry_run=dry_run)
num_pruned += n
return num_pruned
def has_dep(self, dep):
"""Has dependency."""
for _dep, _ in self._plans:
if dep == _dep:
return True
return False
def has_pruning_op(self, dep, idxs):
"""Has pruning op."""
for _dep, _idxs in self._plans:
if _dep.broken_node == dep.broken_node and _dep.handler == dep.handler and _idxs == idxs:
return True
return False
def add_plan_and_merge(self, dep, idxs):
"""Add plan and merge."""
for i, (_dep, _idxs) in enumerate(self._plans):
if _dep.broken_node == dep.broken_node and _dep.handler == dep.handler:
self._plans[i] = (_dep, list(set(_idxs + idxs)))
return
self.add_plan(dep, idxs)
def __str__(self):
"""Convert to string."""
fmt = ""
fmt += "\n-------------\n"
totally_pruned = 0
for dep, idxs in self._plans:
_, n_pruned = dep(idxs, dry_run=True)
totally_pruned += n_pruned
fmt += "[%s, Index=%s, NumPruned=%d]\n" % (dep, idxs, n_pruned)
fmt += "%d parameters will be pruned\n" % (totally_pruned)
fmt += "-------------\n"
return fmt
class DependencyGraph(object):
"""Dependency Graph."""
PRUNABLE_MODULES = [nn.modules.conv._ConvNd, nn.modules.batchnorm._BatchNorm, nn.Linear, nn.PReLU]
HANDLER = { # pruning function that changes: 1. in_channel, 2. out_channel
OPTYPE.CONV: (prune.prune_related_conv, prune.prune_conv),
OPTYPE.BN: (prune.prune_batchnorm, prune.prune_batchnorm),
OPTYPE.PRELU: (prune.prune_prelu, prune.prune_prelu),
OPTYPE.LINEAR: (prune.prune_related_linear, prune.prune_linear),
OPTYPE.GROUP_CONV: (prune.prune_group_conv, prune.prune_group_conv),
OPTYPE.CONCAT: (_prune_concat, _prune_concat),
OPTYPE.SPLIT: (_prune_split, _prune_split),
OPTYPE.ELEMENTWISE: (_prune_elementwise_op, _prune_elementwise_op),
OPTYPE.CUSTOMIZED: (None, None)}
OUTPUT_NODE_RULES = {}
INPUT_NODE_RULES = {}
for t1 in HANDLER.keys():
for t2 in HANDLER.keys():
OUTPUT_NODE_RULES[(t1, t2)] = (HANDLER[t1][1], HANDLER[t2][0]) # change in_channels of output layer
INPUT_NODE_RULES[(t1, t2)] = (HANDLER[t1][0], HANDLER[t2][1]) # change out_channels of input layer
CUSTOMIZED_OP_FN = {}
def build_dependency(self,
model: torch.nn.Module,
example_inputs: typing.Union[torch.Tensor, typing.Sequence],
output_transform: typing.Callable = None,
verbose: bool = True):
""" Build a dependency graph through forwarding.
Parameters:
model (class): the model to be pruned.
example_inputs (torch.Tensor or List): dummy inputs for the model.
output_transform (Callable): A function to transform network outputs.
verbose (Callable): verbose mode.
"""
self.verbose = verbose
# get module name
self._module_to_name = {module: name for (name, module) in model.named_modules()}
# build dependency graph
self.module_to_node = self._obtain_forward_graph(model, example_inputs, output_transform=output_transform)
self._build_dependency(self.module_to_node)
self.update_index()
return self
def register_customized_layer(self, layer_type, in_ch_pruning_fn, out_ch_pruning_fn, get_in_ch_fn, get_out_ch_fn):
""" Register a customized layer for pruning.
Parameters:
layer_type (class): the type of layer
in_ch_pruning_fn (Callable): A function to prune channels/dimensions of input tensor
out_ch_pruning_fn (Callable): A function to prune channels/dimensions of output tensor
get_in_ch_fn (Callable): estimate the n_channel of layer input. Return None if the layer does not change tensor shape.
get_out_ch_fn (Callable):estimate the n_channel of layer output. Return None if the layer does not change tensor shape.
"""
self.CUSTOMIZED_OP_FN[layer_type] = {
"in_ch_pruning_fn": in_ch_pruning_fn,
"out_ch_pruning_fn": out_ch_pruning_fn,
"get_in_ch_fn": get_in_ch_fn,
"get_out_ch_fn": get_out_ch_fn,
}
self.PRUNABLE_MODULES.append(layer_type)
def get_pruning_plan(self, module: nn.Module, pruning_fn: typing.Callable, idxs: typing.Union[list, tuple]):
""" Get a pruning plan from the dependency graph, according to user's pruning operations.
Parameters:
module (nn.Module): the module to be pruned.
pruning_fn (Callable): the pruning function.
idxs (list or tuple): the indices of paramters to be pruned.
"""
if isinstance(module, TORCH_CONV) and module.groups > 1:
pruning_fn = prune.prune_group_conv
if isinstance(idxs, Number):
idxs = [idxs]
self.update_index()
plan = PruningPlan()
# the user pruning operation
# change by nvidia
if module not in self.module_to_node.keys():
return None
root_node = self.module_to_node[module]
plan.add_plan(Dependency(pruning_fn, pruning_fn, root_node), idxs)
visited = set()
def _fix_denpendency_graph(node, fn, indices):
visited.add(node)
for dep in node.dependencies:
if dep.is_triggered_by(fn): # and dep.broken_node not in visited:
if dep.index_transform is not None:
new_indices = dep.index_transform(indices)
else:
new_indices = indices
if len(new_indices) == 0:
continue
if dep.broken_node in visited and plan.has_pruning_op(dep, new_indices):
continue
else:
plan.add_plan(dep, new_indices)
_fix_denpendency_graph(dep.broken_node, dep.handler, new_indices)
_fix_denpendency_graph(root_node, pruning_fn, idxs)
# merge pruning ops
merged_plan = PruningPlan()
for dep, pidxs in plan.plan:
merged_plan.add_plan_and_merge(dep, pidxs)
return merged_plan
def _build_dependency(self, module_to_node):
for _, node in module_to_node.items():
for in_node in node.inputs:
in_node_rule = self.INPUT_NODE_RULES.get((node.type, in_node.type), None)
if in_node_rule is not None:
trigger = in_node_rule[0]
handler = in_node_rule[1]
if trigger is None:
trigger = self.CUSTOMIZED_OP_FN[type(node.module)]['in_ch_pruning_fn']
if handler is None:
handler = self.CUSTOMIZED_OP_FN[type(in_node.module)]['out_ch_pruning_fn']
dep = Dependency(trigger=trigger, handler=handler, broken_node=in_node)
node.dependencies.append(dep)
for out_node in node.outputs:
out_node_rule = self.OUTPUT_NODE_RULES.get((node.type, out_node.type), None)
if out_node_rule is not None:
trigger = out_node_rule[0]
handler = out_node_rule[1]
if trigger is None:
trigger = self.CUSTOMIZED_OP_FN[type(node.module)]['out_ch_pruning_fn']
if handler is None:
handler = self.CUSTOMIZED_OP_FN[type(out_node.module)]['in_ch_pruning_fn']
dep = Dependency(trigger=trigger, handler=handler, broken_node=out_node)
node.dependencies.append(dep)
def _obtain_forward_graph(self, model, example_inputs, output_transform):
# module_to_node = { m: Node(m) for m in model.modules() if isinstance(m, self.PRUNABLE_MODULES) }
# Get grad_fn from prunable modules
grad_fn_to_module = {}
visited = {}
def _record_module_grad_fn(module, inputs, outputs):
if module not in visited:
visited[module] = 1
else:
visited[module] += 1
grad_fn_to_module[outputs.grad_fn] = module
hooks = [m.register_forward_hook(_record_module_grad_fn) for m in model.modules() if isinstance(m, tuple(self.PRUNABLE_MODULES))]
if isinstance(example_inputs, (tuple, list)):
out = model(*example_inputs)
elif isinstance(example_inputs, dict):
out = model(example_inputs)
elif isinstance(example_inputs, torch.Tensor):
out = model(example_inputs)
for hook in hooks:
hook.remove()
reused = [m for (m, count) in visited.items() if count > 1]
# create nodes and dummy modules
module_to_node = {}
def _build_graph(grad_fn):
module = grad_fn_to_module.get(grad_fn, None)
if module is not None and module in module_to_node and module not in reused:
return module_to_node[module]
if module is None:
if not hasattr(grad_fn, 'name'):
module = _ElementWiseOp() # skip customized modules
if self.verbose:
print("[Warning] Unrecognized operation: %s. It will be treated as element-wise op" % (str(grad_fn)))
elif 'catbackward' in grad_fn.name().lower(): # concat op
module = _ConcatOp()
elif 'splitbackward' in grad_fn.name().lower():
module = _SplitOp()
else:
module = _ElementWiseOp() # All other ops are treated as element-wise ops
grad_fn_to_module[grad_fn] = module # record grad_fn
if module not in module_to_node:
node = Node(module, grad_fn, self._module_to_name.get(module, None))
if type(module) in self.CUSTOMIZED_OP_FN.keys(): # mark it as a customized OP
node.type = OPTYPE.CUSTOMIZED
node.customized_op_fn = self.CUSTOMIZED_OP_FN[type(module)]
module_to_node[module] = node
else:
node = module_to_node[module]
if hasattr(grad_fn, 'next_functions'):
for f in grad_fn.next_functions:
if f[0] is not None:
if hasattr(f[0], 'name') and 'accumulategrad' in f[0].name().lower(): # skip leaf variables
continue
input_node = _build_graph(f[0])
node.add_input(input_node)
input_node.add_output(node)
return node
if output_transform is not None:
out = output_transform(out)
out = out[0]
for o in flatten_as_list(out):
_build_graph(o.grad_fn)
return module_to_node
def update_index(self):
"""Update index."""
for _, node in self.module_to_node.items():
if node.type == OPTYPE.LINEAR:
self._set_fc_index_transform(node)
if node.type == OPTYPE.CONCAT:
self._set_concat_index_transform(node)
if node.type == OPTYPE.SPLIT:
self._set_split_index_transform(node)
def _set_fc_index_transform(self, fc_node: Node):
if fc_node.type != OPTYPE.LINEAR:
return
fc_in_features = fc_node.module.in_features
feature_channels = _get_out_channels_of_in_node(fc_node.inputs[0])
if feature_channels <= 0: # the first layer: https://github.com/VainF/Torch-Pruning/issues/21
return
stride = fc_in_features // feature_channels
if stride > 1:
for in_node in fc_node.inputs:
for dep in fc_node.dependencies:
if dep.broken_node == in_node:
dep.index_transform = _FlattenIndexTransform(stride=stride, reverse=True)
for dep in in_node.dependencies:
if dep.broken_node == fc_node:
dep.index_transform = _FlattenIndexTransform(stride=stride, reverse=False)
def _set_concat_index_transform(self, cat_node: Node):
if cat_node.type != OPTYPE.CONCAT:
return
chs = []
for n in cat_node.inputs:
chs.append(_get_out_channels_of_in_node(n))
offsets = [0]
for ch in chs:
offsets.append(offsets[-1] + ch)
cat_node.module.offsets = offsets
for i, in_node in enumerate(cat_node.inputs):
for dep in cat_node.dependencies:
if dep.broken_node == in_node:
dep.index_transform = _ConcatIndexTransform(offset=offsets[i:i + 2], reverse=True)
for dep in in_node.dependencies:
if dep.broken_node == cat_node:
dep.index_transform = _ConcatIndexTransform(offset=offsets[i:i + 2], reverse=False)
def _set_split_index_transform(self, split_node: Node):
if split_node.type != OPTYPE.SPLIT:
return
chs = []
for n in split_node.outputs:
chs.append(_get_in_channels_of_out_node(n))
offsets = [0]
for ch in chs:
offsets.append(offsets[-1] + ch)
split_node.module.offsets = offsets
for i, out_node in enumerate(split_node.outputs):
for dep in split_node.dependencies:
if dep.broken_node == out_node:
dep.index_transform = _SplitIndexTransform(offset=offsets[i:i + 2], reverse=False)
for dep in out_node.dependencies:
if dep.broken_node == split_node:
dep.index_transform = _SplitIndexTransform(offset=offsets[i:i + 2], reverse=True)
def _get_out_channels_of_in_node(node):
ch = _get_node_out_channel(node)
if ch is None:
ch = 0
for in_node in node.inputs:
if node.type == OPTYPE.CONCAT:
ch += _get_out_channels_of_in_node(in_node)
else:
ch = _get_out_channels_of_in_node(in_node)
return ch
def _get_in_channels_of_out_node(node):
ch = _get_node_in_channel(node)
if ch is None:
ch = 0
for out_node in node.outputs:
if node.type == OPTYPE.SPLIT:
ch += _get_in_channels_of_out_node(out_node)
else:
ch = _get_in_channels_of_out_node(out_node)
return ch
def flatten_as_list(obj):
"""Flatten as list."""
if isinstance(obj, torch.Tensor):
return [obj]
if isinstance(obj, (list, tuple)):
flattened_list = []
for sub_obj in obj:
flattened_list.extend(flatten_as_list(sub_obj))
return flattened_list
if isinstance(obj, dict):
flattened_list = []
for sub_obj in obj.values():
flattened_list.extend(flatten_as_list(sub_obj))
return flattened_list
return obj
def find_merged_set(root_node, pruning_fn):
fake_idxs = [0]
print(root_node)
plan = PruningPlan()
plan.add_plan(Dependency(pruning_fn, pruning_fn, root_node), fake_idxs)
visited = set()
def _find_denpendency_graph(node, fn, indices):
visited.add(node)
for dep in node.dependencies:
if dep.is_triggered_by(fn): # and dep.broken_node not in visited:
if dep.index_transform is not None:
new_indices = dep.index_transform(indices)
else:
new_indices = indices
if len(new_indices) == 0:
continue
if dep.broken_node in visited and plan.has_pruning_op(dep, new_indices):
continue
else:
plan.add_plan(dep, new_indices)
_find_denpendency_graph(dep.broken_node, dep.handler, new_indices)
_find_denpendency_graph(root_node, pruning_fn, fake_idxs)
merged_set = set()
for dep, _ in plan.plan:
if dep.handler == pruning_fn:
merged_set.add(dep.broken_node.module)
return merged_set
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/torch_pruning/dependency.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional
from dataclasses import dataclass
@dataclass
class PruneConfig:
"""Prune config."""
mode: str = "amount" # [amount, threshold, experimental_hybrid]
amount: Optional[float] = None
threshold: Optional[float] = None
granularity: int = 8
raw_prune_score: str = "L1" # [L1, L2]
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/torch_pruning/prune_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unstructured pruning."""
import torch
from copy import deepcopy
__all__ = ['mask_weight', 'mask_bias']
def _mask_weight_hook(module, inp):
if hasattr(module, 'weight_mask'):
module.weight.data *= module.weight_mask
def _mask_bias_hook(module, inp):
if module.bias is not None and hasattr(module, 'bias_mask'):
module.bias.data *= module.bias_mask
def mask_weight(layer, mask, inplace=True):
"""Unstructed pruning for convolution layer
Args:
layer: a convolution layer.
mask: 0-1 mask.
"""
if not inplace:
layer = deepcopy(layer)
if mask.shape != layer.weight.shape:
return layer
mask = torch.tensor(mask, dtype=layer.weight.dtype, device=layer.weight.device, requires_grad=False)
if hasattr(layer, 'weight_mask'):
mask = mask + layer.weight_mask
mask[mask > 0] = 1
layer.weight_mask = mask
else:
layer.register_buffer('weight_mask', mask)
layer.register_forward_pre_hook(_mask_weight_hook)
return layer
def mask_bias(layer, mask, inplace=True):
"""Unstructed pruning for convolution layer
Args:
layer: a convolution layer.
mask: 0-1 mask.
"""
if not inplace:
layer = deepcopy(layer)
if layer.bias is None or mask.shape != layer.bias.shape:
return layer
mask = torch.tensor(mask, dtype=layer.weight.dtype, device=layer.weight.device, requires_grad=False)
if hasattr(layer, 'bias_mask'):
mask = mask + layer.bias_mask
mask[mask > 0] = 1
layer.bias_mask = mask
else:
layer.register_buffer('bias_mask', mask)
layer.register_forward_pre_hook(_mask_bias_hook)
return layer
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/torch_pruning/prune/unstructured.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prune module."""
from .structured import * # noqa: F401, F403
from .unstructured import * # noqa: F401, F403
from . import strategy # noqa: F401
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/torch_pruning/prune/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strategy of pruning."""
import torch
from abc import abstractclassmethod, ABC
from typing import Sequence
import random
def round_pruning_amount(total_parameters, n_to_prune, round_to):
"""round the parameter amount after pruning to an integer multiple of `round_to`.
"""
round_to = int(round_to)
if round_to <= 1:
return n_to_prune
after_pruning = total_parameters - n_to_prune
compensation = after_pruning % round_to
# round to the nearest (round_to * N)
# avoid negative n_to_prune
if (compensation < round_to // 2 and after_pruning > round_to) or round_to > n_to_prune:
n_to_prune = n_to_prune + compensation # floor
else:
n_to_prune = n_to_prune - round_to + compensation # ceiling
return n_to_prune
class BaseStrategy(ABC):
"""Base Strategy class."""
def __call__(self, *args, **kwargs):
"""Call method."""
return self.apply(*args, **kwargs)
@abstractclassmethod
def apply(cls, weights, amount=0.0, round_to=1) -> Sequence[int]: # return index
""" Apply the strategy on weights with user specified pruning percentage.
Parameters:
weights (torch.Parameter): weights to be pruned.
amount (Callable): the percentage of weights to be pruned (amount<1.0) or the amount of weights to be pruned (amount>=1.0)
round_to (int): the number to which the number of pruned channels is rounded.
"""
raise NotImplementedError
class RandomStrategy(BaseStrategy):
"""Random Strategy class."""
def apply(self, weights, amount=0.0, round_to=1) -> Sequence[int]: # return index
"""Apply the strategy."""
if amount <= 0:
return []
n = len(weights)
n_to_prune = int(amount * n) if amount < 1.0 else amount
n_to_prune = round_pruning_amount(n, n_to_prune, round_to)
if n_to_prune == 0:
return []
indices = random.sample(list(range(n)), k=n_to_prune)
return indices
class LNStrategy(BaseStrategy):
"""LN magnitude based pruning strategy.
Two mode of LN-magnitude-based (L1 or L2) pruning startegy are provided through this class:
- "amount": The pruning algorithm in original Torch-pruning. "amount" means the ratio of
number of filters to be pruned to the total number of filters. Suppose the total number of
filters is N, then the number of filters to be pruned is N * amount. The filters are sorted
along the LN-magnitude of each filter and the smallest N* amount filters will be pruned.
- "thresh": The pruning algorithm in tao-keras. The filter with smaller LN-magnitude than
a threshold will be pruned.
Common tricks:
- granularity. The pruned number of filters will be divisible by the granularity number.
"""
def __init__(self, p, mode="amount"):
"""Constructor for LNS strategy."""
self.p = p
self.mode = mode
if self.mode not in ["amount", "thresh"]:
raise ValueError("Only support \"amount\" and \"thresh\" mode")
def apply(self, weights, amount=0.0, round_to=1, scores=None) -> Sequence[int]: # return index
"""Apply the pruning."""
if amount <= 0:
return []
n = len(weights)
if scores is None:
l1_norm = torch.norm(weights.view(n, -1), p=self.p, dim=1)
else:
l1_norm = scores
if self.mode == "amount":
n_to_prune = int(amount * n) if amount < 1.0 else amount
n_to_prune = round_pruning_amount(n, n_to_prune, round_to)
if n_to_prune == 0:
return []
threshold = torch.kthvalue(l1_norm, k=n_to_prune).values
indices = torch.nonzero(l1_norm <= threshold).view(-1).tolist()
elif self.mode == "thresh":
# Thresh is the strategy in tao-tf
l1_norm /= torch.max(l1_norm)
remained_idx = torch.nonzero(l1_norm > amount).view(-1).tolist()
num_remained = len(remained_idx)
# Granularity
if num_remained % round_to > 0:
num_remained += round_to - (num_remained % round_to)
num_remained = min(num_remained, n)
if num_remained == n:
return []
sorted_idx = torch.argsort(-l1_norm)
indices = torch.sort(sorted_idx[num_remained:])[0].view(-1).tolist()
return indices
class CustomScoreStrategy(BaseStrategy):
"""Custom Score Strategy.
A helper class to execute sorting and filtering with any pruning score.
common trick:
- granularity. The pruned number of filters will be divisible by the granularity number.
"""
def apply(self, scores, thresh=0.0, round_to=1) -> Sequence[int]:
"""Apply the pruning."""
if thresh <= 0:
return []
n = len(scores)
remained_idx = torch.nonzero(scores > thresh).view(-1).tolist()
num_remained = len(remained_idx)
# Granularity
if num_remained % round_to > 0:
num_remained += round_to - (num_remained % round_to)
# keep the min idxs
num_remained = max(num_remained, round_to)
num_remained = min(num_remained, n)
if num_remained == n:
return []
sorted_idx = torch.argsort(-scores)
indices = torch.sort(sorted_idx[num_remained:])[0].view(-1).tolist()
return indices
class L1Strategy(LNStrategy):
"""L1 Strategy class."""
def __init__(self):
"""Initialize."""
super(L1Strategy, self).__init__(p=1)
class L2Strategy(LNStrategy):
"""L2 Strategy class."""
def __init__(self):
"""Initialize."""
super(L2Strategy, self).__init__(p=2)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/torch_pruning/prune/strategy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Structured pruning."""
import torch
import torch.nn as nn
from copy import deepcopy
from functools import reduce
from operator import mul
from abc import ABC, abstractstaticmethod
from typing import Sequence, Tuple
class BasePruningFunction(ABC):
"""Base pruning function
"""
@classmethod
def apply(cls, layer: nn.Module, idxs: Sequence[int], inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""Apply the pruning function."""
idxs = list(set(idxs))
cls.check(layer, idxs)
nparams_to_prune = cls.calc_nparams_to_prune(layer, idxs)
if dry_run:
return layer, nparams_to_prune
if not inplace:
layer = deepcopy(layer)
layer = cls.prune_params(layer, idxs)
return layer, nparams_to_prune
@staticmethod
def check(layer: nn.Module, idxs: Sequence[int]) -> None:
"""check."""
pass
@abstractstaticmethod
def prune_params(layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
pass
@abstractstaticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
pass
class ConvPruning(BasePruningFunction):
"""Conv Pruning."""
@staticmethod
def prune_params(layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
keep_idxs = list(set(range(layer.out_channels)) - set(idxs))
layer.out_channels = layer.out_channels - len(idxs)
if not layer.transposed:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
else:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[:, keep_idxs])
if layer.bias is not None:
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
nparams_to_prune = len(idxs) * reduce(mul, layer.weight.shape[1:]) + (len(idxs) if layer.bias is not None else 0)
return nparams_to_prune
class GroupConvPruning(ConvPruning):
"""Group Conv pruning."""
@staticmethod
def check(layer, idxs) -> nn.Module:
"""Check."""
if layer.groups > 1:
assert layer.groups == layer.in_channels and layer.groups == layer.out_channels, "only group conv with in_channel==groups==out_channels is supported"
@staticmethod
def prune_params(layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
keep_idxs = list(set(range(layer.out_channels)) - set(idxs))
layer.out_channels = layer.out_channels - len(idxs)
layer.in_channels = layer.in_channels - len(idxs)
layer.groups = layer.groups - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
if layer.bias is not None:
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
class RelatedConvPruning(BasePruningFunction):
"""Related Conv Pruning."""
@staticmethod
def prune_params(layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
keep_idxs = list(set(range(layer.in_channels)) - set(idxs))
layer.in_channels = layer.in_channels - len(idxs)
if not layer.transposed:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[:, keep_idxs])
else:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
# no bias pruning because it does not change the output size
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
nparams_to_prune = len(idxs) * layer.weight.shape[0] * reduce(mul, layer.weight.shape[2:])
return nparams_to_prune
class LinearPruning(BasePruningFunction):
"""Linear Pruning."""
@staticmethod
def prune_params(layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
keep_idxs = list(set(range(layer.out_features)) - set(idxs))
layer.out_features = layer.out_features - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
if layer.bias is not None:
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
nparams_to_prune = len(idxs) * layer.weight.shape[1] + (len(idxs) if layer.bias is not None else 0)
return nparams_to_prune
class RelatedLinearPruning(BasePruningFunction):
"""Related Linear Pruning."""
@staticmethod
def prune_params(layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
keep_idxs = list(set(range(layer.in_features)) - set(idxs))
layer.in_features = layer.in_features - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[:, keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
nparams_to_prune = len(idxs) * layer.weight.shape[0]
return nparams_to_prune
class BatchnormPruning(BasePruningFunction):
"""BatchNorm Pruning."""
@staticmethod
def prune_params(layer: nn.Module, idxs: Sequence[int]) -> nn.Module:
"""Prune parameters."""
keep_idxs = list(set(range(layer.num_features)) - set(idxs))
layer.num_features = layer.num_features - len(idxs)
layer.running_mean = layer.running_mean.data.clone()[keep_idxs]
layer.running_var = layer.running_var.data.clone()[keep_idxs]
if layer.affine:
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
layer.bias = torch.nn.Parameter(layer.bias.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.Module, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
nparams_to_prune = len(idxs) * (2 if layer.affine else 1)
return nparams_to_prune
class PReLUPruning(BasePruningFunction):
"""PReLU pruning."""
@staticmethod
def prune_params(layer: nn.PReLU, idxs: list) -> nn.Module:
"""Prune parameters."""
if layer.num_parameters == 1:
return layer
keep_idxs = list(set(range(layer.num_parameters)) - set(idxs))
layer.num_parameters = layer.num_parameters - len(idxs)
layer.weight = torch.nn.Parameter(layer.weight.data.clone()[keep_idxs])
return layer
@staticmethod
def calc_nparams_to_prune(layer: nn.PReLU, idxs: Sequence[int]) -> int:
"""Compute number of parameters to prune."""
nparams_to_prune = 0 if layer.num_parameters == 1 else len(idxs)
return nparams_to_prune
# Funtional
def prune_conv(layer: nn.modules.conv._ConvNd, idxs: list, inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""Prune conv."""
return ConvPruning.apply(layer, idxs, inplace, dry_run)
def prune_related_conv(layer: nn.modules.conv._ConvNd, idxs: list, inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""Prune related conv."""
return RelatedConvPruning.apply(layer, idxs, inplace, dry_run)
def prune_group_conv(layer: nn.modules.conv._ConvNd, idxs: list, inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""Prune group conv."""
return GroupConvPruning.apply(layer, idxs, inplace, dry_run)
def prune_batchnorm(layer: nn.modules.conv._ConvNd, idxs: list, inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""Prune Batch Norm."""
return BatchnormPruning.apply(layer, idxs, inplace, dry_run)
def prune_linear(layer: nn.modules.conv._ConvNd, idxs: list, inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""Prune Linear."""
return LinearPruning.apply(layer, idxs, inplace, dry_run)
def prune_related_linear(layer: nn.modules.conv._ConvNd, idxs: list, inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""Prune related linear."""
return RelatedLinearPruning.apply(layer, idxs, inplace, dry_run)
def prune_prelu(layer: nn.modules.conv._ConvNd, idxs: list, inplace: bool = True, dry_run: bool = False) -> Tuple[nn.Module, int]:
"""Prune prelu."""
return PReLUPruning.apply(layer, idxs, inplace, dry_run)
| tao_pytorch_backend-main | nvidia_tao_pytorch/pruning/torch_pruning/prune/structured.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version string for the CV models/tasks."""
__version__ = "0.0.1-dev"
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Root module for CV models/tasks."""
import re
import torch
from nvidia_tao_pytorch.cv.version import __version__ # noqa: F401
numbering = re.search(r"^(\d+).(\d+).(\d+)([^\+]*)(\+\S*)?$", torch.__version__)
major_version, minor_version = [int(numbering.group(n)) for n in range(1, 3)]
if major_version >= 1 and minor_version >= 14:
from third_party.onnx.utils import _export
# Monkey Patch ONNX Export to disable onnxscript
torch.onnx.utils._export = _export
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init Module for Classification. """
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init Module for tools """
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/tools/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmclassification
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ONNX related tools."""
import onnx
import torch
import numpy as np
from functools import partial
import onnxruntime as rt
from mmcv.onnx import register_extra_symbolics
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of classification classes
"""
(N, _, _, _) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
gt_labels = rng.randint(
low=0, high=num_classes, size=(N, 1)).astype(np.uint8)
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'gt_labels': torch.LongTensor(gt_labels),
}
return mm_inputs
def pytorch_to_onnx(model,
input_shape,
num_classes,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
logger=None):
"""Export Pytorch model to ONNX model and verify the outputs are same
between Pytorch and ONNX.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
opset_version (int): The onnx op version. Default: 11.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output ONNX model.
Default: `tmp.onnx`.
verify (bool): Whether compare the outputs between Pytorch and ONNX.
Default: False.
"""
model.cpu().eval()
if hasattr(model.head, 'num_classes'):
num_classes = model.head.num_classes
# Some backbones use `num_classes=-1` to disable top classifier.
elif getattr(model.backbone, 'num_classes', -1) > 0:
num_classes = model.backbone.num_classes
else:
raise AttributeError('Cannot find "num_classes" in both head and '
'backbone, please check the config file.')
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
imgs = mm_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# replace original forward function
origin_forward_test = model.forward_test
model.forward = partial(model.forward_test)
register_extra_symbolics(opset_version)
# support dynamic shape export
dynamic_axes = {
'input_1': {
0: 'batch',
},
'probs': {
0: 'batch'
}
}
with torch.no_grad():
torch.onnx.export(
model, (img_list, ),
output_file,
input_names=['input_1'],
output_names=['probs'],
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
dynamic_axes=dynamic_axes,
verbose=show,
opset_version=opset_version)
if logger:
logger.info(f'Successfully exported ONNX model: {output_file}')
model.forward_test = origin_forward_test
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# test the dynamic model
dynamic_test_inputs = _demo_mm_inputs(
(input_shape[0], input_shape[1], input_shape[2],
input_shape[3]), model.head.num_classes)
imgs = dynamic_test_inputs.pop('imgs')
img_list = [img[None, :] for img in imgs]
# check the numerical value
# get pytorch output
pytorch_result = model(img_list)[0]
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1), "The input dimension is not equal to one"
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(
None, {net_feed_input[0]: img_list[0].detach().numpy()})[0][0]
if not np.testing.assert_allclose(pytorch_result, onnx_result, rtol=1e-04):
raise ValueError(
'The outputs are different between Pytorch and ONNX')
if logger:
logger.info('The outputs are same between Pytorch and ONNX')
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/tools/onnx_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Conversion Tool for timm (GCViT & FAN) to MMCls Weights."""
import argparse
import os
from collections import OrderedDict
import torch
def convert_timm(model_path, output_file):
""" Convert timm (GCViT & FAN) Model """
tmp = torch.load(model_path, map_location='cpu')
if 'state_dict' in tmp:
model = tmp['state_dict']
else:
model = tmp
state_dict = OrderedDict()
for k, v in model.items():
if not k.startswith('head'):
state_dict['backbone.' + k] = v
else:
state_dict['head.fc.' + k[5:]] = v
torch.save({"state_dict": state_dict}, output_file)
def build_command_line_parser(parser=None):
"""Build command line parser for model_convert."""
if parser is None:
parser = argparse.ArgumentParser(
prog='model_converter',
description='Convert timm to mmclassification.'
)
parser.add_argument(
'-m',
'--model_path',
required=True,
help='Path to timm pth file.')
parser.add_argument(
"-o",
"--out_file",
type=str,
required=True,
help="Path to the result mmcls pretrained weights."
)
return parser
def parse_command_line_args(cl_args=None):
"""Parse sys.argv arguments from commandline.
Args:
cl_args: List of command line arguments.
Returns:
args: list of parsed arguments.
"""
parser = build_command_line_parser()
args = parser.parse_args(cl_args)
return args
def main(args=None):
"""
Convert a torchvision model to MMCls weights.
Args:
args(list): list of arguments to be parsed if called from another module.
"""
args = parse_command_line_args(cl_args=args)
# Defining the results directory.
results_dir = os.path.abspath(os.path.join(args.out_file, os.pardir))
if results_dir is not None:
if not os.path.exists(results_dir):
os.makedirs(results_dir)
try:
convert_timm(args.model_path, args.out_file)
print("Successfully Converted !")
except Exception as e:
raise e
if __name__ == '__main__':
try:
main()
except Exception as e:
raise e
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/tools/convert_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init Module for FAN """
from .fan_linear_head import FANLinearClsHead
__all__ = ["FANLinearClsHead"]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/heads/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmsegmentation
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" FAN Linear Class Head """
import torch.nn as nn
import torch.nn.functional as F
from mmcls.models.builder import HEADS
from mmcls.models.heads import ClsHead
@HEADS.register_module()
class FANLinearClsHead(ClsHead):
"""Linear classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
init_cfg (dict | optional): The extra init config of layers.
Defaults to use dict(type='Normal', layer='Linear', std=0.01).
"""
def __init__(self,
num_classes,
in_channels,
head_init_scale,
backbone=None,
init_cfg=None,
*args, # noqa pylint: disable=W1113
**kwargs # noqa pylint: disable=W1113
):
""" Init Module """
super(FANLinearClsHead, self).__init__(init_cfg=init_cfg, *args, **kwargs)
self.in_channels = in_channels
self.num_classes = num_classes
self.head_init_scale = head_init_scale
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self.fc = nn.Linear(self.in_channels, self.num_classes)
self.fc.weight.data.mul_(head_init_scale)
self.fc.bias.data.mul_(head_init_scale)
def pre_logits(self, x):
""" Function to Get the Features """
if isinstance(x, tuple):
x = x[-1]
return x
def simple_test(self, x, softmax=True, post_process=True):
"""Inference without augmentation.
Args:
x (tuple[Tensor]): The input features.
Multi-stage inputs are acceptable but only the last stage will
be used to classify. The shape of every item should be
``(num_samples, in_channels)``.
softmax (bool): Whether to softmax the classification score.
post_process (bool): Whether to do post processing the
inference results. It will convert the output to a list.
Returns:
Tensor | list: The inference results.
- If no post processing, the output is a tensor with shape
``(num_samples, num_classes)``.
- If post processing, the output is a multi-dimentional list of
float and the dimensions are ``(num_samples, num_classes)``.
"""
x = self.pre_logits(x)
cls_score = self.fc(x)
if softmax:
pred = (
F.softmax(cls_score, dim=1) if cls_score is not None else None)
else:
pred = cls_score
if post_process:
return self.post_process(pred)
return pred
def forward_train(self, x, gt_label, **kwargs):
""" Forward Train Module """
x = self.pre_logits(x)
cls_score = self.fc(x)
losses = self.loss(cls_score, gt_label, **kwargs)
return losses
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/heads/fan_linear_head.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
""" FAN Model Module """
import math
from functools import partial
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
from mmcls.models.builder import BACKBONES
from mmcv.runner import BaseModule
from nvidia_tao_pytorch.cv.backbone.convnext_utils import _create_hybrid_backbone
from nvidia_tao_pytorch.cv.backbone.fan import (TokenMixing, SqueezeExcite, OverlapPatchEmbed,
PositionalEncodingFourier, ConvPatchEmbed,
ClassAttentionBlock, DWConv, adaptive_avg_pool)
import warnings
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': 1.0, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj.0.0', 'classifier': 'head',
**kwargs
}
class SEMlp(nn.Module):
""" SE Mlp Model Module """
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False, use_se=True):
""" Init Module """
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.linear = linear
if self.linear:
self.relu = nn.ReLU(inplace=True)
self.se = SqueezeExcite(out_features, se_ratio=0.25) if use_se else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
""" Initialize Weights """
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
""" Forward Function """
B, N, C = x.shape
x = self.fc1(x)
if self.linear:
x = self.relu(x)
x = self.drop(self.gamma * self.dwconv(x, H, W)) + x
x = self.fc2(x)
x = self.drop(x)
x = self.se(x.permute(0, 2, 1).reshape(B, C, H, W)).reshape(B, C, N).permute(0, 2, 1)
return x, H, W
class Mlp(nn.Module):
""" MLP Module """
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False):
"""Init Function"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.linear = linear
if self.linear:
self.relu = nn.ReLU(inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
""" Init Weights """
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
""" Forward Function """
x = self.fc1(x)
if self.linear:
x = self.relu(x)
x = self.drop(self.gamma * self.dwconv(x, H, W)) + x
x = self.fc2(x)
x = self.drop(x)
return x
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, patch_size=2, feature_size=None, in_chans=3, embed_dim=384):
""" Init Function """
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# NOTE Most reliable way of determining output dims is to run forward pass
training = backbone.training
if training:
backbone.eval()
o = self.backbone.forward_features(torch.zeros(1, in_chans, img_size[0], img_size[1]))
if isinstance(o, (list, tuple)):
o = o[-1] # last feature if backbone outputs list/tuple of features
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
if hasattr(self.backbone, 'feature_info'):
feature_dim = self.backbone.feature_info.channels()[-1]
else:
feature_dim = self.backbone.num_features
assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0
self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
""" Forward Function """
x = self.backbone.forward_features(x)
if isinstance(x, (list, tuple)):
x = x[-1] # last feature if backbone outputs list/tuple of features
_, _, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x, (H // self.patch_size[0], W // self.patch_size[1])
class ChannelProcessing(nn.Module):
""" Channel Processing in FAN Module """
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., linear=False, drop_path=0.,
mlp_hidden_dim=None, act_layer=nn.GELU, drop=None, norm_layer=nn.LayerNorm, cha_sr_ratio=1, c_head_num=None):
""" Init Function """
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
num_heads = c_head_num or num_heads
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.cha_sr_ratio = cha_sr_ratio if num_heads > 1 else 1
# config of mlp for v processing
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp_v = Mlp(in_features=dim // self.cha_sr_ratio, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, linear=linear)
self.norm_v = norm_layer(dim // self.cha_sr_ratio)
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.apply(self._init_weights)
def _init_weights(self, m):
""" Init Weights """
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def _gen_attn(self, q, k):
""" Function to Get Attention """
_, _, N, _ = k.shape
if torch.onnx.is_in_onnx_export():
# If softmax dim is not the last dimension, then PyTorch decompose the softmax ops into
# smaller ops like ReduceMax, ReduceSum, Sub, and Div.
# As a result, ONNX export fails for opset_version >= 12.
# Here, we rearrange the transpose so that softmax is done over the last dimension.
q = q.transpose(-1, -2).softmax(-1)
k = k.transpose(-1, -2).softmax(-1)
warnings.warn("Replacing default adatpive_avg_pool2d to custom implementation for ONNX export")
# adaptive_avg_pool2d is not supported for torch to onnx export
k = adaptive_avg_pool(k.transpose(-1, -2), (N, 1))
else:
q = q.softmax(-2).transpose(-1, -2)
k = torch.nn.functional.adaptive_avg_pool2d(k.softmax(-2), (N, 1))
attn = torch.nn.functional.sigmoid(q @ k)
return attn * self.temperature
def forward(self, x, H, W, atten=None):
""" Forward Function """
B, N, C = x.shape
v = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = self._gen_attn(q, k)
attn = self.attn_drop(attn)
Bv, Hd, Nv, Cv = v.shape
v = self.norm_v(self.mlp_v(v.transpose(1, 2).reshape(Bv, Nv, Hd * Cv), H, W)).reshape(Bv, Nv, Hd, Cv).transpose(1, 2)
repeat_time = N // attn.shape[-1]
attn = attn.repeat_interleave(repeat_time, dim=-1) if attn.shape[-1] > 1 else attn
x = (attn * v.transpose(-1, -2)).permute(0, 3, 1, 2).reshape(B, N, C)
return x, (attn * v.transpose(-1, -2)).transpose(-1, -2) # attn
@torch.jit.ignore
def no_weight_decay(self):
""" Ignore Weight Decay """
return {'temperature'}
class FANBlock_SE(nn.Module):
""" FAN Block SE """
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., sharpen_attn=False, use_se=False,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., sr_ratio=1., qk_scale=None, linear=False, downsample=None, c_head_num=None):
""" Init Module """
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = TokenMixing(dim, num_heads=num_heads, qkv_bias=qkv_bias, mlp_hidden_dim=int(dim * mlp_ratio), sharpen_attn=sharpen_attn,
attn_drop=attn_drop, proj_drop=drop, drop=drop, drop_path=drop_path, sr_ratio=sr_ratio, linear=linear, emlp=False)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = SEMlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
def forward(self, x, H: int, W: int, attn=None):
""" Forward Function """
x_new, _ = self.attn(self.norm1(x), H, W)
x = x + self.drop_path(self.gamma1 * x_new)
x_new, H, W = self.mlp(self.norm2(x), H, W)
x = x + self.drop_path(self.gamma2 * x_new)
return x, H, W
class FANBlock(nn.Module):
"""FAN block from https://arxiv.org/abs/2204.12451"""
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., sharpen_attn=False,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, eta=1., sr_ratio=1., downsample=None, c_head_num=None):
"""Initialize FANBlock class"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = TokenMixing(dim, num_heads=num_heads, qkv_bias=qkv_bias, mlp_hidden_dim=int(dim * mlp_ratio), sharpen_attn=sharpen_attn,
attn_drop=attn_drop, proj_drop=drop, drop=drop, drop_path=drop_path, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ChannelProcessing(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,
drop_path=drop_path, drop=drop, mlp_hidden_dim=int(dim * mlp_ratio), c_head_num=c_head_num)
self.gamma1 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.gamma2 = nn.Parameter(eta * torch.ones(dim), requires_grad=True)
self.downsample = downsample
self.H = None
self.W = None
def forward(self, x, attn=None, return_attention=False):
"""Forward function"""
H, W = self.H, self.W
x_new, attn_s = self.attn(self.norm1(x), H, W)
x = x + self.drop_path(self.gamma1 * x_new)
x_new, _ = self.mlp(self.norm2(x), H, W, atten=attn)
x = x + self.drop_path(self.gamma2 * x_new)
if return_attention:
return x, attn_s
if self.downsample is not None:
x, H, W = self.downsample(x, H, W)
self.H, self.W = H, W
return x
class FAN(BaseModule):
"""Based on timm code bases
https://github.com/rwightman/pytorch-image-models/tree/master/timm
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, sharpen_attn=False, channel_dims=None,
num_heads=12, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., sr_ratio=None, backbone=None, use_checkpoint=False,
act_layer=None, norm_layer=None, se_mlp=False, cls_attn_layers=2, use_pos_embed=True, eta=1., tokens_norm=False, c_head_num=None, hybrid_patch_size=2, head_init_scale=1.0,
init_cfg=None, **kwargs):
""" Init Module """
super().__init__(init_cfg=init_cfg)
img_size = to_2tuple(img_size)
self.head_init_scale = head_init_scale
self.use_checkpoint = use_checkpoint
assert (img_size[0] % patch_size == 0) and (img_size[0] % patch_size == 0), \
'`patch_size` should divide image dimensions evenly'
self.num_classes = num_classes
num_heads = [num_heads] * depth if not isinstance(num_heads, list) else num_heads
channel_dims = [embed_dim] * depth if channel_dims is None else channel_dims
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
if backbone is None:
self.patch_embed = ConvPatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, act_layer=act_layer)
else:
self.patch_embed = HybridEmbed(backbone=backbone, patch_size=hybrid_patch_size, embed_dim=embed_dim)
self.use_pos_embed = use_pos_embed
if use_pos_embed:
self.pos_embed = PositionalEncodingFourier(dim=embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
if se_mlp:
build_block = FANBlock_SE
else:
build_block = FANBlock
self.blocks = nn.ModuleList([])
for i in range(depth):
if i < depth - 1 and channel_dims[i] != channel_dims[i + 1]:
downsample = OverlapPatchEmbed(img_size=img_size,
patch_size=3,
stride=2,
in_chans=channel_dims[i],
embed_dim=channel_dims[i + 1])
else:
downsample = None
self.blocks.append(build_block(dim=channel_dims[i], num_heads=num_heads[i], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, sr_ratio=sr_ratio[i],
attn_drop=attn_drop_rate, drop_path=drop_path_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta,
downsample=downsample, c_head_num=c_head_num[i] if c_head_num is not None else None))
self.num_features = self.embed_dim = channel_dims[i]
self.cls_token = nn.Parameter(torch.zeros(1, 1, channel_dims[i]))
self.cls_attn_blocks = nn.ModuleList([ClassAttentionBlock(dim=channel_dims[-1], num_heads=num_heads[-1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, act_layer=act_layer, norm_layer=norm_layer, eta=eta, tokens_norm=tokens_norm)
for _ in range(cls_attn_layers)])
# Classifier head
self.norm = norm_layer(channel_dims[i])
# Init weights
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
"""layers to ignore for weight decay"""
return {'pos_embed', 'cls_token'} # , 'patch_embed'}
def get_classifier(self):
"""Returns classifier"""
return self.head
def forward_features(self, x):
"""Extract features"""
B = x.shape[0]
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
H, W = Hp, Wp
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
H, W = blk.H, blk.W
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for blk in self.cls_attn_blocks:
x = blk(x)
x = self.norm(x)[:, 0]
return x
def forward(self, x):
"""Base forward function"""
x = self.forward_features(x)
return x
def get_last_selfattention(self, x, use_cls_attn=False, layer_idx=11):
""" Output of Self-Attention """
B = x.shape[0]
x, (Hp, Wp) = self.patch_embed(x)
if self.use_pos_embed:
pos_encoding = self.pos_embed(B, Hp, Wp).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = x + pos_encoding
x = self.pos_drop(x)
return_idx = layer_idx or len(self.blocks) - 1
for i, blk in enumerate(self.blocks):
if i == return_idx:
x, attn = blk(x, Hp, Wp, return_attention=True)
else:
x, Hp, Wp = blk(x, Hp, Wp)
if use_cls_attn:
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
for i, blk in enumerate(self.cls_attn_blocks):
if i < len(self.cls_attn_blocks) - 1:
x = blk(x)
else:
attn = blk(x, return_attention=True)
return attn
return attn
# FAN-ViT Models
@BACKBONES.register_module()
class fan_tiny_12_p16_224(FAN):
""" FAN Tiny ViT """
def __init__(self, **kwargs):
""" Init Function """
depth = 12
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=192, depth=depth, num_heads=4, eta=1.0, tokens_norm=True, sharpen_attn=False, sr_ratio=sr_ratio, **kwargs)
super(fan_tiny_12_p16_224, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class fan_small_12_p16_224_se_attn(FAN):
""" FAN Small SE ViT """
def __init__(self, **kwargs):
""" Init Module """
depth = 12
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0,
tokens_norm=True, sharpen_attn=False, se_mlp=True, sr_ratio=sr_ratio, **kwargs)
super(fan_small_12_p16_224_se_attn, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class fan_small_12_p16_224(FAN):
""" FAN Small ViT """
def __init__(self, **kwargs):
""" Init Module """
depth = 12
sr_ratio = [1] * depth
model_kwargs = dict(
patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sr_ratio=sr_ratio, **kwargs)
super(fan_small_12_p16_224, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class fan_base_18_p16_224(FAN):
""" FAN Base ViT """
def __init__(self, **kwargs):
""" Init Module """
depth = 18
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=448, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, sr_ratio=sr_ratio, **kwargs)
super(fan_base_18_p16_224, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class fan_large_24_p16_224(FAN):
""" FAN Large ViT """
def __init__(self, **kwargs):
""" Init Module """
depth = 24
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_kwargs = dict(
patch_size=16, embed_dim=480, depth=depth, num_heads=10, eta=1.0, tokens_norm=True, sharpen_attn=False, sr_ratio=sr_ratio, **kwargs)
super(fan_large_24_p16_224, self).__init__(**model_kwargs)
# FAN-Hybrid Models
# CNN backbones are based on ConvNeXt architecture with only first two stages for downsampling purpose
# This has been verified to be beneficial for downstream tasks
@BACKBONES.register_module()
class fan_tiny_8_p4_hybrid(FAN):
""" FAN Tiny Hybrid """
def __init__(self, **kwargs):
""" Init Module """
depth = 8
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2 + 1)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=192, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, sr_ratio=sr_ratio, backbone=backbone, **kwargs)
super(fan_tiny_8_p4_hybrid, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class fan_small_12_p4_hybrid(FAN):
""" FAN Small Hybrid """
def __init__(self, **kwargs):
"""Init Module"""
depth = 10
channel_dims = [384] * 10 + [384] * (depth - 10)
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=384, depth=depth, num_heads=8, eta=1.0, channel_dims=channel_dims, tokens_norm=True, sharpen_attn=False, backbone=backbone, sr_ratio=sr_ratio, **kwargs)
super(fan_small_12_p4_hybrid, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class fan_base_16_p4_hybrid(FAN):
""" FAN Base Hybrid """
def __init__(self, **kwargs):
""" Init Module """
depth = 16
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 3], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=448, depth=depth, num_heads=8, eta=1.0, tokens_norm=True, sharpen_attn=False, sr_ratio=sr_ratio, backbone=backbone, **kwargs)
super(fan_base_16_p4_hybrid, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class fan_large_16_p4_hybrid(FAN):
""" FAN Large Hybrid """
def __init__(self, **kwargs):
"""Init Module"""
depth = 22
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2)
model_args = dict(depths=[3, 5], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=480, depth=depth, num_heads=10, eta=1.0, tokens_norm=True, sharpen_attn=False, head_init_scale=0.001, backbone=backbone, sr_ratio=sr_ratio, **kwargs)
super(fan_large_16_p4_hybrid, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class fan_Xlarge_16_p4_hybrid(FAN):
"""FAN XLarge hybrid"""
def __init__(self, **kwargs):
"""Init Module"""
depth = 23
stage_depth = 20
channel_dims = [528] * stage_depth + [768] * (depth - stage_depth)
num_heads = [11] * stage_depth + [16] * (depth - stage_depth)
sr_ratio = [1] * (depth // 2) + [1] * (depth // 2 + 1)
model_args = dict(depths=[3, 7], dims=[128, 256, 512, 1024], use_head=False)
backbone = _create_hybrid_backbone(pretrained=False, pretrained_strict=False, **model_args)
model_kwargs = dict(
patch_size=16, embed_dim=channel_dims[0], depth=depth, num_heads=num_heads, eta=1.0, tokens_norm=True, sharpen_attn=False, sr_ratio=sr_ratio, channel_dims=channel_dims, backbone=backbone, **kwargs)
super(fan_Xlarge_16_p4_hybrid, self).__init__(**model_kwargs)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/models/fan.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model Init Module."""
from nvidia_tao_pytorch.cv.classification.models.fan import FAN
from nvidia_tao_pytorch.cv.classification.models.gc_vit import GCViT
__all__ = ["FAN", "GCViT"]
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GCViT Model Module """
import torch
import torch.nn as nn
from timm.models.layers import trunc_normal_
from mmcls.models.builder import BACKBONES
from mmcv.runner import BaseModule
from nvidia_tao_pytorch.cv.backbone.gc_vit import PatchEmbed, GCViTLayer, _to_channel_first
class GCViT(BaseModule):
"""
GCViT based on: "Hatamizadeh et al.,
Global Context Vision Transformers <https://arxiv.org/abs/2206.09959>"
"""
def __init__(self,
dim,
depths,
window_size,
mlp_ratio,
num_heads,
resolution=224,
drop_path_rate=0.2,
in_chans=3,
num_classes=1000,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
norm_layer=nn.LayerNorm,
layer_scale=None,
use_rel_pos_bias=True,
**kwargs):
"""
Args:
dim: feature size dimension.
depths: number of layers in each stage.
window_size: window size in each stage.
mlp_ratio: MLP ratio.
num_heads: number of heads in each stage.
resolution: input image resolution.
drop_path_rate: drop path rate.
in_chans: number of input channels.
num_classes: number of classes.
qkv_bias: bool argument for query, key, value learnable bias.
qk_scale: bool argument to scaling query, key.
drop_rate: dropout rate.
attn_drop_rate: attention dropout rate.
norm_layer: normalization layer.
layer_scale: layer scaling coefficient.
use_rel_pos_bias: set bias for relative positional embedding
"""
super().__init__()
num_features = int(dim * 2 ** (len(depths) - 1))
self.num_classes = num_classes
self.patch_embed = PatchEmbed(in_chans=in_chans, dim=dim)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.levels = nn.ModuleList()
for i in range(len(depths)):
level = GCViTLayer(dim=int(dim * 2 ** i),
depth=depths[i],
num_heads=num_heads[i],
window_size=window_size[i],
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])],
norm_layer=norm_layer,
downsample=(i < len(depths) - 1),
layer_scale=layer_scale,
input_resolution=int(2 ** (-2 - i) * resolution),
image_resolution=resolution,
use_rel_pos_bias=use_rel_pos_bias)
self.levels.append(level)
self.norm = norm_layer(num_features)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay_keywords(self):
"""Returns eywords to ignore during weight decay"""
return {'rpb'}
def forward_features(self, x):
"""Extract features"""
x = self.patch_embed(x)
x = self.pos_drop(x)
for level in self.levels:
x = level(x)
x = self.norm(x)
x = _to_channel_first(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward(self, x):
"""Forward function."""
x = self.forward_features(x)
return x
@BACKBONES.register_module()
class gc_vit_xxtiny(GCViT):
"""GCViT-XXTiny model."""
def __init__(self, **kwargs):
"""Initialize"""
model_kwargs = dict(depths=[2, 2, 6, 2],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
**kwargs)
super(gc_vit_xxtiny, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class gc_vit_xtiny(GCViT):
"""GCViT-XTiny model."""
def __init__(self, **kwargs):
"""Initialize"""
model_kwargs = dict(depths=[3, 4, 6, 5],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
**kwargs)
super(gc_vit_xtiny, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class gc_vit_tiny(GCViT):
"""GCViT-Tiny model."""
def __init__(self, **kwargs):
"""Initialize"""
model_kwargs = dict(depths=[3, 4, 19, 5],
num_heads=[2, 4, 8, 16],
window_size=[7, 7, 14, 7],
dim=64,
mlp_ratio=3,
drop_path_rate=0.2,
**kwargs)
super(gc_vit_tiny, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class gc_vit_small(GCViT):
"""GCViT-Small model."""
def __init__(self, **kwargs):
"""Initialize"""
model_kwargs = dict(depths=[3, 4, 19, 5],
num_heads=[3, 6, 12, 24],
window_size=[7, 7, 14, 7],
dim=96,
mlp_ratio=2,
drop_path_rate=0.3,
layer_scale=1e-5,
**kwargs)
super(gc_vit_small, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class gc_vit_base(GCViT):
"""GCViT-Base model."""
def __init__(self, **kwargs):
"""Initialize"""
model_kwargs = dict(depths=[3, 4, 19, 5],
num_heads=[4, 8, 16, 32],
window_size=[7, 7, 14, 7],
dim=128,
mlp_ratio=2,
drop_path_rate=0.5,
layer_scale=1e-5,
**kwargs)
super(gc_vit_base, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class gc_vit_large(GCViT):
"""GCViT-Large model."""
def __init__(self, **kwargs):
"""Initialize"""
model_kwargs = dict(depths=[3, 4, 19, 5],
num_heads=[6, 12, 24, 48],
window_size=[7, 7, 14, 7],
dim=192,
mlp_ratio=2,
drop_path_rate=0.5,
layer_scale=1e-5,
**kwargs)
super(gc_vit_large, self).__init__(**model_kwargs)
@BACKBONES.register_module()
class gc_vit_large_384(GCViT):
"""GCViT-Large Input Resolution 384 model."""
def __init__(self, **kwargs):
"""Initialize"""
model_kwargs = dict(depths=[3, 4, 19, 5],
num_heads=[6, 12, 24, 48],
window_size=[12, 12, 24, 12],
dim=192,
mlp_ratio=2,
drop_path_rate=0.5,
layer_scale=1e-5,
**kwargs)
super(gc_vit_large_384, self).__init__(**model_kwargs)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/models/gc_vit.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init Module for Scripts """
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Original source taken from https://github.com/open-mmlab/mmclassification
# Copyright 2019 OpenMMLAB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Export of Classification model.
"""
import datetime
import os
from mmcls.utils import get_root_logger
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
from nvidia_tao_pytorch.cv.classification.models import * # noqa pylint: disable=W0401, W0614
from nvidia_tao_pytorch.cv.classification.heads import * # noqa pylint: disable=W0401, W0614
from nvidia_tao_pytorch.cv.classification.tools.onnx_utils import pytorch_to_onnx
from nvidia_tao_pytorch.core.mmlab.mmclassification.classification_default_config import ExperimentConfig
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.mmlab.mmclassification.utils import MMClsConfig, load_model
def run_experiment(experiment_config, results_dir):
"""Start the Export."""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Classification Export"
)
status_logger = status_logging.get_status_logger()
mmcls_config_obj = MMClsConfig(experiment_config, phase="eval")
mmcls_config = mmcls_config_obj.config
# Set the logger
log_file = os.path.join(results_dir, 'log_export_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = get_root_logger(log_file=log_file, log_level="INFO")
# log to file
logger.info('********************** Start logging for Export **********************')
status_logger.write(message="**********************Start logging for Export**********************.")
model_path = mmcls_config["export"]["checkpoint"]
if not model_path:
raise ValueError("You need to provide the model path for Export.")
model_to_test = load_model(model_path, mmcls_config)
output_file = mmcls_config["export"]["onnx_file"]
if not output_file:
onnx_name = model_path.split("/")[-1]
onnx_name = onnx_name.replace(".pth", ".onnx")
onnx_path = os.path.join(results_dir, onnx_name)
else:
onnx_path = output_file
input_channel = mmcls_config["export"]["input_channel"]
input_height = mmcls_config["export"]["input_height"]
input_width = mmcls_config["export"]["input_width"]
input_shape = [1] + [input_channel, input_height, input_width]
opset_version = mmcls_config["export"]["opset_version"]
# export
pytorch_to_onnx(
model_to_test,
input_shape,
opset_version=opset_version,
show=False,
output_file=onnx_path,
verify=mmcls_config["export"]["verify"],
num_classes=mmcls_config["model"]["head"]["num_classes"],
logger=logger)
status_logger.write(message="Completed Export.", status_level=status_logging.Status.SUCCESS)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="test_cats_and_dogs", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the Export."""
try:
if cfg.export.results_dir is not None:
results_dir = cfg.export.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "export")
run_experiment(experiment_config=cfg,
results_dir=results_dir)
status_logging.get_status_logger().write(status_level=status_logging.Status.SUCCESS,
message="Export finished successfully.")
except Exception as e:
status_logging.get_status_logger().write(message=str(e),
status_level=status_logging.Status.FAILURE)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" MMClassification Train Module """
from mmcls.utils import get_root_logger
from mmcls.models import build_classifier
from mmcls.datasets import build_dataset
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.mmlab.mmclassification.classification_default_config import ExperimentConfig
from nvidia_tao_pytorch.core.mmlab.mmclassification.classification_trainer import MMClsTrainer
from nvidia_tao_pytorch.core.mmlab.mmclassification.utils import MMClsConfig
from nvidia_tao_pytorch.core.mmlab.common.utils import set_env, set_distributed, get_latest_pth_model
from nvidia_tao_pytorch.cv.classification.heads import * # noqa pylint: disable=W0401, W0614
from nvidia_tao_pytorch.cv.classification.models import * # noqa pylint: disable=W0401, W0614
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
import warnings
import json
import time
import datetime
import os
def run_experiment(experiment_config, results_dir):
"""Start the training."""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Classification Train"
)
status_logger = status_logging.get_status_logger()
mmcls_config_obj = MMClsConfig(experiment_config)
mmcls_config = mmcls_config_obj.config
resume_checkpoint_local = get_latest_pth_model(results_dir)
resume_checkpoint_config = mmcls_config["train"]["train_config"]["resume_training_checkpoint_path"]
if not resume_checkpoint_config: # If no resume ckpt was provided in the config
mmcls_config["train"]["train_config"]["resume_training_checkpoint_path"] = resume_checkpoint_local
# Set the logger
log_file = os.path.join(results_dir, 'log_train_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = get_root_logger(log_file=log_file, log_level="INFO")
# log to file
logger.info('**********************Start logging for Training**********************')
status_logger.write(message="**********************Start logging for Training**********************.")
meta = set_env()
set_distributed(mmcls_config)
# set the encryption key:
seed = mmcls_config["train"]["exp_config"]["manual_seed"]
meta['seed'] = seed
datasets = [build_dataset(mmcls_config["dataset"]["data"]["train"])]
status_logger.write(message="Completed Data Module Construction", status_level=status_logging.Status.RUNNING)
model = build_classifier(
mmcls_config["model"])
model.init_weights()
status_logger.write(message="Model Classifier Construction", status_level=status_logging.Status.RUNNING)
exp_params_file = os.path.join(results_dir, "experiment_params.json")
try:
with open(exp_params_file, 'w') as fp:
json.dump(mmcls_config, fp)
except Exception as e:
logger.info(e)
warnings.warn("The expeirment spec paras could not be dumped into file.")
meta["CLASSES"] = datasets[0].CLASSES
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
cls_trainer = MMClsTrainer(
datasets,
model,
timestamp=timestamp,
meta=meta,
result_dir=results_dir,
experiment_spec=mmcls_config)
cls_trainer.set_up_trainer() # This will setup dataloader, model, runner
cls_trainer.fit()
status_logger.write(message="Completed Train.", status_level=status_logging.Status.SUCCESS)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="train_cats_dogs_new_fan", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the training process."""
try:
if cfg.train.results_dir is not None:
results_dir = cfg.train.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "train")
run_experiment(cfg, results_dir=results_dir)
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(
message="Training was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Inference of Classification model.
"""
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.mmlab.common.utils import set_env, set_distributed
from nvidia_tao_pytorch.cv.classification.models import * # noqa pylint: disable=W0401, W0614
from nvidia_tao_pytorch.cv.classification.heads import * # noqa pylint: disable=W0401, W0614
from nvidia_tao_pytorch.core.mmlab.mmclassification.classification_default_config import ExperimentConfig
from nvidia_tao_pytorch.core.mmlab.mmclassification.utils import MMClsConfig, multi_gpu_test, load_model
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.utils import get_root_logger
from mmcv.parallel import MMDistributedDataParallel
import torch
import datetime
import os
import numpy as np
import pandas as pd
def run_experiment(experiment_config, results_dir):
"""Start the Inference."""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Classification inference"
)
status_logger = status_logging.get_status_logger()
mmcls_config_obj = MMClsConfig(experiment_config, phase="eval")
mmcls_config = mmcls_config_obj.config
# Set the logger
log_file = os.path.join(results_dir, 'log_evaluation_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = get_root_logger(log_file=log_file, log_level="INFO")
# log to file
logger.info('**********************Start logging for Evaluation**********************')
status_logger.write(message="**********************Start logging for Inference**********************.")
meta = set_env()
set_distributed(mmcls_config, "inference")
# set the encryption key:
seed = mmcls_config["inference"]["exp_config"]["manual_seed"]
meta['seed'] = seed
test_dataset = build_dataset(mmcls_config["dataset"]["data"]["test"])
# Dataloader building
data_loader = build_dataloader(
test_dataset,
samples_per_gpu=mmcls_config["dataset"]["data"]["samples_per_gpu"],
workers_per_gpu=mmcls_config["dataset"]["data"]["workers_per_gpu"],
dist=True,
shuffle=False)
model_path = experiment_config["inference"]["checkpoint"]
if not model_path:
raise ValueError("You need to provide the model path for Inference.")
model_to_test = load_model(model_path, mmcls_config)
model_to_test = MMDistributedDataParallel(
model_to_test.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs, img_names = multi_gpu_test(model_to_test, data_loader, None,
gpu_collect=True)
rank = os.environ['LOCAL_RANK']
predictions = []
if int(rank) == 0:
for idx, img_name in enumerate(img_names):
assert (len(outputs[idx]) == len(test_dataset.CLASSES)), "The number of classes in the prediction: {} \
does not match with the number of classes in the test dataset: {}. Please ensure to provide \
the classes text file in the dataset config.".format(len(outputs[idx]), len(test_dataset.CLASSES))
class_index = np.argmax(outputs[idx])
class_label = test_dataset.CLASSES[class_index]
class_conf = outputs[idx][class_index]
predictions.append((img_name, class_label, class_conf))
result_csv_path = os.path.join(results_dir, 'result.csv')
with open(result_csv_path, 'w', encoding='utf-8') as csv_f:
# Write predictions to file
df = pd.DataFrame(predictions)
df.to_csv(csv_f, header=False, index=False)
logger.info("The inference result is saved at: %s", result_csv_path)
status_logger.write(message="Completed Inference.", status_level=status_logging.Status.SUCCESS)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="test_cats_and_dogs", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the inference process."""
try:
if cfg.inference.results_dir is not None:
results_dir = cfg.inference.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "inference")
run_experiment(experiment_config=cfg,
results_dir=results_dir)
status_logging.get_status_logger().write(status_level=status_logging.Status.SUCCESS,
message="Inference finished successfully.")
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(message="Inference was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE)
except Exception as e:
status_logging.get_status_logger().write(message=str(e),
status_level=status_logging.Status.FAILURE)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluation of Classification model.
"""
from nvidia_tao_pytorch.cv.segformer.utils.common_utils import check_and_create
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.core.mmlab.common.utils import set_env, set_distributed
from nvidia_tao_pytorch.cv.classification.models import * # noqa pylint: disable=W0401, W0614
from nvidia_tao_pytorch.cv.classification.heads import * # noqa pylint: disable=W0401, W0614
from nvidia_tao_pytorch.core.mmlab.mmclassification.classification_default_config import ExperimentConfig
from nvidia_tao_pytorch.core.mmlab.mmclassification.utils import MMClsConfig, load_model
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from mmcls.datasets import build_dataloader, build_dataset
from mmcls.apis import multi_gpu_test
from mmcls.utils import get_root_logger
from mmcv.parallel import MMDistributedDataParallel
import torch
import datetime
import os
import numpy as np
from numbers import Number
def run_experiment(experiment_config, results_dir):
"""Start the Evaluation."""
check_and_create(results_dir)
# Set status logging
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
append=True
)
)
status_logging.get_status_logger().write(
status_level=status_logging.Status.STARTED,
message="Starting Classification evaluation"
)
status_logger = status_logging.get_status_logger()
mmcls_config_obj = MMClsConfig(experiment_config, phase="eval")
mmcls_config = mmcls_config_obj.config
# Set the logger
log_file = os.path.join(results_dir, 'log_evaluation_{}.txt'.format(datetime.datetime.now().strftime('%Y%m%d-%H%M%S')))
logger = get_root_logger(log_file=log_file, log_level="INFO")
# log to file
logger.info('**********************Start logging for Evaluation**********************')
status_logger.write(message="**********************Start logging for Inference**********************.")
meta = set_env()
set_distributed(mmcls_config, "evaluate")
# set the encryption key:
seed = mmcls_config["evaluate"]["exp_config"]["manual_seed"]
meta['seed'] = seed
test_dataset = build_dataset(mmcls_config["dataset"]["data"]["test"])
# Dataloader building
data_loader = build_dataloader(
test_dataset,
samples_per_gpu=mmcls_config["dataset"]["data"]["samples_per_gpu"],
workers_per_gpu=mmcls_config["dataset"]["data"]["workers_per_gpu"],
dist=True,
shuffle=False)
model_path = experiment_config["evaluate"]["checkpoint"]
if not model_path:
raise ValueError("You need to provide the model path for Evaluation.")
model_to_test = load_model(model_path, mmcls_config)
model_to_test = MMDistributedDataParallel(
model_to_test.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model_to_test, data_loader, None,
gpu_collect=True)
rank = os.environ['LOCAL_RANK']
if int(rank) == 0:
results = {}
logger = get_root_logger()
eval_results = test_dataset.evaluate(
results=outputs,
metric=["accuracy", "precision", "recall"],
metric_options={"topk": mmcls_config["evaluate"]["topk"]},
logger=logger)
results.update(eval_results)
for k, v in eval_results.items():
if isinstance(v, np.ndarray):
v = [round(out, 2) for out in v.tolist()]
elif isinstance(v, Number):
v = round(v, 2)
else:
raise ValueError(f'Unsupport metric type: {type(v)}')
print(f'\n{k} : {v}')
status_logger.write(message="Completed Evaluation.", status_level=status_logging.Status.SUCCESS)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="test_cats_and_dogs", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Run the Evaluate process."""
try:
if cfg.evaluate.results_dir is not None:
results_dir = cfg.evaluate.results_dir
else:
results_dir = os.path.join(cfg.results_dir, "evaluate")
run_experiment(experiment_config=cfg,
results_dir=results_dir)
status_logging.get_status_logger().write(status_level=status_logging.Status.SUCCESS,
message="Evaluation finished successfully.")
except (KeyboardInterrupt, SystemExit):
status_logging.get_status_logger().write(message="Evaluation was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE)
except Exception as e:
status_logging.get_status_logger().write(message=str(e),
status_level=status_logging.Status.FAILURE)
raise e
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point script running subtasks related to Classification."""
import importlib
import os
import pkgutil
import argparse
import subprocess # nosec B404
import sys
from time import time
import nvidia_tao_pytorch.cv.classification.scripts as scripts
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, network=None):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
network: Name of the network running training.
"""
if network is None:
network = "tao_pytorch"
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TLT arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument(
"--num_nodes",
help="Number of nodes to run the train subtask.",
default=1,
type=int
)
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
parser.add_argument("--gpus", "-g", type=int, default=1, help="Number of GPUs")
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
if args.gpus > 1:
if args.subtask == "export":
raise ValueError("Export does not support multi-gpu")
else:
if args.subtask in ["train", "evaluate", "inference"]:
if args.gpus:
script_args += f" {args.subtask}.num_gpus={args.gpus}"
# And add other params AFTERWARDS!
if args.subtask in ["train"]:
if args.results_dir:
script_args += " results_dir=" + args.results_dir
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
if args.subtask == "export":
call = (
"python " + script + script_args + " " + unknown_args_as_str
)
else:
call = (
f"torchrun --nproc_per_node={args.gpus} --nnodes={args.num_nodes} " + script + script_args + " " + unknown_args_as_str
)
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout) # nosec B602
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=1,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"classification_pyt", add_help=True, description="TAO Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="classification_pyt")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/entrypoint/classification.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Init File for Classification"""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/classification/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric-learning recognition root module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric-learning recognition config module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class GaussianBlur:
"""Gaussian Blur configuration template."""
enabled: bool = True
kernel: List[int] = field(default_factory=lambda: [15, 15])
sigma: List[float] = field(default_factory=lambda: [0.3, 0.7])
@dataclass
class ColorAugmentation:
"""Color Augmentation configuration template."""
enabled: bool = True
brightness: float = 0.5
contrast: float = 0.3
saturation: float = 0.1
hue: float = 0.1
@dataclass
class DatasetConfig:
"""Metric Learning Recognition Dataset configuration template."""
train_dataset: Optional[str] = None
val_dataset: Optional[Dict[str, str]] = None
workers: int = 8
class_map: Optional[str] = None
pixel_mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406])
pixel_std: List[float] = field(default_factory=lambda: [0.226, 0.226, 0.226])
prob: float = 0.5
re_prob: float = 0.5
gaussian_blur: GaussianBlur = GaussianBlur()
color_augmentation: ColorAugmentation = ColorAugmentation()
random_rotation: bool = False
num_instance: int = 4
@dataclass
class ModelConfig:
"""Metric Learning Recognition model configuration for training, testing & validation."""
backbone: str = "resnet_50"
pretrain_choice: Optional[str] = "imagenet"
pretrained_model_path: Optional[str] = None
input_width: int = 224
input_height: int = 224
input_channels: int = 3
feat_dim: int = 256
@dataclass
class LRConfig:
"""Optimizer learning rate configuration for the LR scheduler."""
bias_lr_factor: float = 1
base_lr: float = 0.00035
momentum: float = 0.9
weight_decay: float = 0.0005
weight_decay_bias: float = 0.0005
@dataclass
class OptimConfig:
"""Optimizer configuration for the LR scheduler."""
name: str = "Adam"
steps: List[int] = field(default_factory=lambda: [40, 70])
gamma: float = 0.1
warmup_factor: float = 0.01
warmup_iters: int = 10
warmup_method: str = 'linear'
triplet_loss_margin: float = 0.3
embedder: LRConfig = LRConfig()
trunk: LRConfig = LRConfig()
miner_function_margin: float = 0.1
@dataclass
class TrainConfig:
"""Train experiment configuration template."""
gpu_ids: List[int] = field(default_factory=lambda: [0])
optim: OptimConfig = OptimConfig()
num_epochs: int = 1
checkpoint_interval: int = 5
clip_grad_norm: float = 0.0
resume_training_checkpoint_path: Optional[str] = None
report_accuracy_per_class: bool = True
smooth_loss: bool = True
batch_size: int = 64
val_batch_size: int = 64
results_dir: Optional[str] = None
@dataclass
class EvalConfig:
"""Evaluation experiment configuration template."""
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
gpu_id: int = 0
topk: int = 1 # TODO: add topk support
batch_size: int = 64
report_accuracy_per_class: bool = True
results_dir: Optional[str] = None
@dataclass
class InferenceConfig:
"""Inference experiment configuration template."""
checkpoint: Optional[str] = None
trt_engine: Optional[str] = None
input_path: str = MISSING # a image file or a folder
inference_input_type: str = "image_folder" # possible values are "image", "image_folder" and "classification_folder"
gpu_id: int = 0
batch_size: int = 64
results_dir: Optional[str] = None
topk: int = 1
@dataclass
class ExportConfig:
"""Export experiment configuraiton template."""
batch_size: int = -1
checkpoint: Optional[str] = None
gpu_id: int = 0
onnx_file: Optional[str] = None
on_cpu: bool = False
opset_version: int = 14
verbose: bool = True
results_dir: Optional[str] = None
@dataclass
class CalibrationConfig:
"""Calibration config."""
cal_cache_file: Optional[str] = None
cal_batch_size: int = 1
cal_batches: int = 1
cal_image_dir: Optional[List[str]] = field(default_factory=lambda: [])
@dataclass
class TrtConfig:
"""Trt config."""
data_type: str = "FP32"
workspace_size: int = 1024
min_batch_size: int = 1
opt_batch_size: int = 1
max_batch_size: int = 1
calibration: CalibrationConfig = CalibrationConfig()
@dataclass
class TrtEngineConfig:
"""Gen TRT Engine experiment config."""
results_dir: Optional[str] = None
gpu_id: int = 0
onnx_file: str = MISSING
trt_engine: Optional[str] = None
batch_size: int = -1
verbose: bool = False
tensorrt: TrtConfig = TrtConfig()
@dataclass
class ExperimentConfig:
"""Experiment config."""
train: TrainConfig = TrainConfig()
model: ModelConfig = ModelConfig()
evaluate: EvalConfig = EvalConfig()
dataset: DatasetConfig = DatasetConfig()
export: ExportConfig = ExportConfig()
gen_trt_engine: TrtEngineConfig = TrtEngineConfig() # placeholder
inference: InferenceConfig = InferenceConfig()
results_dir: Optional[str] = None
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric-learning recognition utils module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Match finder for metric-learning recognition."""
from pytorch_metric_learning.utils.inference import (FaissKNN, try_gpu, return_results)
from pytorch_metric_learning.utils import common_functions as c_f
class EmbeddingKNN(FaissKNN):
"""Uses the faiss library to compute k-nearest-neighbors.
Inherits from `pytorch_metric_learning.utils.inference.FaissKNN` but removes logging
function when calling the object.
Attributes:
reset_before (Boolean): Reset the faiss index before knn is computed
reset_after (Boolean): Reset the faiss index after knn is computed (good for clearing memory)
index_init_fn (Callable, optional): A callable that takes in the embedding dimensionality and returns a faiss index. The default is faiss.IndexFlatL2
gpus (List[int], optional): A list of gpu indices to move the faiss index onto. The default is to use all available gpus, if the input tensors are also on gpus
"""
def __call__(
self,
query,
k,
reference=None,
embeddings_come_from_same_source=False,
):
"""Calculates the K nearest neighghbors.
Args:
query (torch.Tensor): Query embeddings.
k (int): The k in k-nearest-neighbors.
reference (torch.Tensor, optional): The embeddings to search.
embeddings_come_from_same_source (Boolean, optional): Whether or not query and reference share datapoints.
Returns:
distances (torch.Tensor): the distances of k-nearest-neighbors in increasing order.
indices (torch.Tensor): the indices of k-nearest-neighbors in dataset.
"""
if embeddings_come_from_same_source:
k = k + 1
device = query.device
is_cuda = query.is_cuda
d = query.shape[1]
if self.reset_before:
self.index = self.index_init_fn(d)
distances, indices = try_gpu(
self.index,
query,
reference,
k,
is_cuda,
self.gpus,
)
distances = c_f.to_device(distances, device=device)
indices = c_f.to_device(indices, device=device)
if self.reset_after:
self.reset()
return return_results(distances, indices, embeddings_come_from_same_source)
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/utils/match_finder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for metric-learning recognition."""
import os
def no_folders_in(path_to_parent):
"""Checks whether folders exist in the directory.
Args:
path_to_parent (String): a directory for an image file or folder.
Returns:
no_folders (Boolean): If true, the directory is an image folder, otherwise it's a classifcation folder.
"""
no_folders = True
for fname in os.listdir(path_to_parent):
if os.path.isdir(os.path.join(path_to_parent, fname)):
no_folders = False
break
return no_folders
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/utils/common_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auto logging for Metric Learning Recognition subtasks."""
import os
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from functools import wraps
def monitor_status(name='Metric Learning Recognition', mode='train'):
"""Status monitoring decorator."""
def inner(runner):
@wraps(runner)
def _func(cfg, **kwargs):
if cfg[mode]["results_dir"]:
results_dir = cfg[mode]["results_dir"]
elif cfg.results_dir:
results_dir = os.path.join(cfg.results_dir, mode)
else:
raise ValueError("You need to set at least one of following fields: results_dir, {mode}.results_dir")
os.makedirs(results_dir, exist_ok=True)
status_file = os.path.join(results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
try:
s_logger.write(
status_level=status_logging.Status.STARTED,
message=f"Starting {name} {mode}."
)
runner(cfg, **kwargs)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message=f"{mode.capitalize()} finished successfully."
)
except (KeyboardInterrupt, SystemError):
s_logger.write(
message=f"{mode.capitalize()} was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
except Exception as e:
s_logger.write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
raise e
return _func
return inner
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/utils/decorators.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metric-learning recognition scripts module."""
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Export metric-learning recognition model to ONNX."""
import os
import tempfile
import torch
from onnxsim import simplify
import onnx
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.metric_learning_recognition.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.metric_learning_recognition.model.pl_ml_recog_model import MLRecogModel
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
from nvidia_tao_pytorch.cv.metric_learning_recognition.utils.decorators import monitor_status
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def run_export(args):
"""Wrapper to run export of .pth checkpoints.
Args:
args (DictConfig): Configuration dictionary
"""
experiment_config = args
# no need to check `else` as it's verified in the decorator already
if experiment_config['export']["results_dir"]:
results_dir = experiment_config['export']["results_dir"]
elif experiment_config["results_dir"]:
results_dir = os.path.join(experiment_config["results_dir"], "export")
if experiment_config['export']["on_cpu"]:
device = "cpu"
elif torch.cuda.is_available():
device = "cuda"
gpu_id = experiment_config.export.gpu_id
torch.cuda.set_device(gpu_id)
else:
error_msg = "No GPU available for export."
raise ValueError(error_msg)
checkpoint = experiment_config["export"]["checkpoint"]
if checkpoint is not None:
status_logging.get_status_logger().write(
message=f"Loading checkpoint: {experiment_config['export']['checkpoint']}",
status_level=status_logging.Status.STARTED)
pl_model = MLRecogModel.load_from_checkpoint(experiment_config["export"]["checkpoint"],
map_location="cpu",
experiment_spec=experiment_config,
results_dir=results_dir,
subtask="export")
# Set default output filename if the filename
# isn't provided over the command line.
if experiment_config['export']['onnx_file'] is None:
split_name = os.path.splitext(os.path.basename(checkpoint))[0]
output_file = os.path.join(results_dir, f"{split_name}.onnx")
else:
output_file = experiment_config['export']['onnx_file']
else:
pl_model = MLRecogModel(experiment_config,
results_dir,
subtask="export")
if experiment_config['export']['onnx_file'] is None:
output_file = os.path.join(results_dir, "metric_learning_recognition.onnx")
else:
output_file = experiment_config['export']['onnx_file']
assert not os.path.exists(output_file), f"output file {output_file} "\
"already exists."
# Make an output directory if necessary.
output_root = os.path.dirname(os.path.realpath(output_file))
if not os.path.exists(output_root):
os.makedirs(output_root)
model = pl_model.model
model.eval()
model.to(device)
input_names = ["input"]
output_names = ["fc_pred"]
# create dummy input
input_channels = experiment_config["model"]["input_channels"]
input_width = experiment_config["model"]["input_width"]
input_height = experiment_config["model"]["input_height"]
batch_size = experiment_config["export"]["batch_size"]
if batch_size == -1:
dynamic_axes = {"input": {0: "batch"}, "fc_pred": {0: "batch"}}
dummy_input = torch.randn(
1, input_channels, input_width, input_height).to(device)
elif batch_size >= 1:
dynamic_axes = None
dummy_input = torch.randn(
batch_size, input_channels, input_width, input_height).to(device)
else:
raise ValueError("`export.batch_size` must be greater than 0 or -1.")
# export
status_logging.get_status_logger().write(
message="Exporting model to ONNX",
status_level=status_logging.Status.STARTED)
os_handle, tmp_onnx_file = tempfile.mkstemp(suffix=".onnx")
os.close(os_handle)
torch.onnx.export(model,
dummy_input,
tmp_onnx_file,
input_names=input_names,
output_names=output_names,
opset_version=experiment_config["export"]["opset_version"],
dynamic_axes=dynamic_axes,
verbose=experiment_config["export"]["verbose"])
# add simplification
status_logging.get_status_logger().write(
message="Simplifying ONNX model",
status_level=status_logging.Status.STARTED)
simplified_model, _ = simplify(
tmp_onnx_file,
test_input_shapes={'input': (1, input_channels, input_width, input_height)},
check_n=3)
onnx.save(simplified_model, output_file)
status_logging.get_status_logger().write(
message=f"ONNX model saved at {output_file}",
status_level=status_logging.Status.SUCCESS)
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="export", schema=ExperimentConfig
)
@monitor_status(mode="export")
def main(cfg: ExperimentConfig) -> None:
"""CLI wrapper to run export.
This function parses the command line interface for tlt-export, instantiates the respective
exporter and serializes the trained model to an onnx file. The tools also runs optimization
to the int8 backend.
Args:
cl_args(list): Arguments to parse.
Returns:
No explicit returns.
"""
obfuscate_logs(cfg)
run_export(cfg)
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/scripts/export.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train metric-learning recognition model."""
import os
import re
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from nvidia_tao_pytorch.core.callbacks.loggers import TAOStatusLogger
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
from nvidia_tao_pytorch.core.utilities import get_last_generated_file
from nvidia_tao_pytorch.cv.metric_learning_recognition.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.metric_learning_recognition.model.pl_ml_recog_model import MLRecogModel
from nvidia_tao_pytorch.cv.metric_learning_recognition.utils.decorators import monitor_status
def run_experiment(experiment_config):
"""Starts the training.
Args:
experiment_config (DictConfig): Configuration dictionary
results_dir (str): Output directory
"""
# no need to check `else` as it's verified in the decorator already
if experiment_config['train']["results_dir"]:
results_dir = experiment_config['train']["results_dir"]
elif experiment_config["results_dir"]:
results_dir = os.path.join(experiment_config["results_dir"], "train")
status_logger_callback = TAOStatusLogger(
results_dir, append=True,
num_epochs=experiment_config['train']['num_epochs'])
status_logging.set_status_logger(status_logger_callback.logger)
metric_learning_recognition = MLRecogModel(
experiment_config,
results_dir,
subtask="train")
total_epochs = experiment_config['train']['num_epochs']
clip_grad = experiment_config['train']['clip_grad_norm']
gpus_ids = experiment_config['train']["gpu_ids"]
acc_flag = None
if len(gpus_ids) > 1:
acc_flag = "ddp"
ckpt_inter = experiment_config['train']['checkpoint_interval']
trainer = Trainer(gpus=gpus_ids,
max_epochs=total_epochs,
check_val_every_n_epoch=ckpt_inter,
val_check_interval=0.99,
default_root_dir=results_dir,
num_sanity_val_steps=0,
accelerator='gpu',
strategy=acc_flag,
gradient_clip_val=clip_grad)
# setup checkpointer:
ModelCheckpoint.FILE_EXTENSION = ".pth"
checkpoint_callback = ModelCheckpoint(every_n_epochs=ckpt_inter,
dirpath=results_dir,
monitor=None,
save_top_k=-1,
filename='ml_model_{epoch:03d}',
save_on_train_epoch_end=True)
trainer.callbacks.append(checkpoint_callback)
if experiment_config['train']['resume_training_checkpoint_path']:
resume_training_checkpoint_path = experiment_config['train']['resume_training_checkpoint_path']
resume_training_checkpoint_path = get_last_generated_file(results_dir, extension="pth") # None if no pth files found
if resume_training_checkpoint_path:
status_logging.get_status_logger().write(
message=f"Resuming training from checkpoint: {resume_training_checkpoint_path}",
status_level=status_logging.Status.STARTED
)
resumed_epoch = re.search('epoch=(\\d+)', resume_training_checkpoint_path)
if resumed_epoch:
resumed_epoch = int(resumed_epoch.group(1))
else:
resumed_epoch = 0
status_logger_callback.epoch_counter = resumed_epoch + 1 # make sure callback epoch matches resumed epoch
trainer.callbacks.append(status_logger_callback)
trainer.fit(metric_learning_recognition, ckpt_path=resume_training_checkpoint_path)
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="train", schema=ExperimentConfig
)
@monitor_status(mode="train")
def main(cfg: ExperimentConfig) -> None:
"""Run the training process.
Args:
cfg (DictConfig): Hydra config object.
"""
obfuscate_logs(cfg)
run_experiment(experiment_config=cfg)
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/scripts/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference on single patch."""
import os
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.metric_learning_recognition.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.metric_learning_recognition.inference.inferencer import Inferencer
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
from nvidia_tao_pytorch.cv.metric_learning_recognition.utils.decorators import monitor_status
def run_experiment(experiment_config):
"""Starts the inference.
Args:
experiment_config (DictConfig): Configuration dictionary
"""
# no need to check `else` as it's verified in the decorator alreadya
if experiment_config["inference"]["results_dir"]:
results_dir = experiment_config["inference"]["results_dir"]
elif experiment_config["results_dir"]:
results_dir = os.path.join(experiment_config["results_dir"], "inference")
inferencer = Inferencer(experiment_config, results_dir)
inferencer.infer()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="infer", schema=ExperimentConfig
)
@monitor_status(mode="inference")
def main(cfg: ExperimentConfig) -> None:
"""Run the training process.
Args:
cfg (DictConfig): Hydra config object.
"""
# Obfuscate logs.
obfuscate_logs(cfg)
run_experiment(experiment_config=cfg)
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/scripts/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluate a trained Metric Learning Recognition model."""
import os
import torch
from nvidia_tao_pytorch.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_pytorch.cv.metric_learning_recognition.config.default_config import ExperimentConfig
from nvidia_tao_pytorch.cv.metric_learning_recognition.dataloader.build_data_loader import build_dataloader
from nvidia_tao_pytorch.cv.metric_learning_recognition.model.pl_ml_recog_model import MLRecogModel
from nvidia_tao_pytorch.core.tlt_logging import obfuscate_logs
from nvidia_tao_pytorch.cv.metric_learning_recognition.utils.decorators import monitor_status
import nvidia_tao_pytorch.core.loggers.api_logging as status_logging
def run_experiment(experiment_config):
"""Starts the evaluate.
Args:
experiment_config (DictConfig): Configuration dictionary
"""
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cuda":
gpu_id = experiment_config["evaluate"].gpu_id
torch.cuda.set_device(gpu_id)
# no need to check `else` as it's verified in the decorator already
if experiment_config['evaluate']["results_dir"]:
results_dir = experiment_config['evaluate']["results_dir"]
elif experiment_config["results_dir"]:
results_dir = os.path.join(experiment_config["results_dir"], "evaluate")
# get datasets
_, _, _, dataset_dict = build_dataloader(experiment_config, mode="eval")
status_logging.get_status_logger().write(
message=f"Loading checkpoint: {experiment_config['evaluate']['checkpoint']}",
status_level=status_logging.Status.STARTED)
metric_learning_recognition = MLRecogModel.load_from_checkpoint(
experiment_config["evaluate"]["checkpoint"],
map_location="cpu",
experiment_spec=experiment_config,
results_dir=results_dir,
subtask="evaluate")
metric_learning_recognition.load_tester()
metric_learning_recognition.dataset_dict = dataset_dict
metric_learning_recognition.class_dict = dataset_dict["query"].class_dict
metric_learning_recognition.to(torch.device(device))
all_acc = metric_learning_recognition.get_query_accuracy()
# df = metric_learning_recognition.report_accuracies(all_acc, save_results=True)
print("******************* Evaluation results **********************")
ami = all_acc['query']['AMI_level0']
nmi = all_acc['query']['NMI_level0']
mean_avg_prec = all_acc['query']['mean_average_precision_level0']
mean_reciprocal_rank = all_acc['query']['mean_reciprocal_rank_level0']
mean_r_precision = all_acc['query']['r_precision_level0']
val_accuracy = all_acc['query']['precision_at_1_level0']
status_logging_dict = {}
status_logging_dict['AMI'] = ami
status_logging_dict['NMI'] = nmi
if experiment_config["evaluate"]["report_accuracy_per_class"]:
status_logging_dict['Mean Average Precision'] = sum(mean_avg_prec) / len(mean_avg_prec)
status_logging_dict['Mean Reciprocal Rank'] = sum(mean_reciprocal_rank) / len(mean_reciprocal_rank)
status_logging_dict['r-Precision'] = sum(mean_r_precision) / len(mean_r_precision)
status_logging_dict['Precision at Rank 1'] = sum(val_accuracy) / len(val_accuracy)
else:
status_logging_dict['Mean Average Precision'] = mean_avg_prec
status_logging_dict['Mean Reciprocal Rank'] = mean_reciprocal_rank
status_logging_dict['r-Precision'] = mean_r_precision
status_logging_dict['Precision at Rank 1'] = val_accuracy
status_logging.get_status_logger().kpi = status_logging_dict
for metric in status_logging_dict:
print(f"{metric}: {status_logging_dict[metric]:.4f}")
if experiment_config["evaluate"]["report_accuracy_per_class"]:
print("\n******************* Accuracy per class **********************")
for k, v in all_acc['query'].items():
if "level0" in k:
if isinstance(v, list):
print(f"{k[:-7]}:")
for i, acc in enumerate(v):
print(f" {metric_learning_recognition.class_dict[i]}: {acc:.4f}")
print("*************************************************************")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Load experiment specification, additially using schema for validation/retrieving the default values.
# --config_path and --config_name will be provided by the entrypoint script.
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"), config_name="evaluate", schema=ExperimentConfig
)
@monitor_status(mode="evaluate")
def main(cfg: ExperimentConfig) -> None:
"""Run the training process.
Args:
cfg (DictConfig): Hydra config object.
"""
obfuscate_logs(cfg)
run_experiment(experiment_config=cfg)
if __name__ == "__main__":
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/scripts/evaluate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'Entry point' script running subtasks related to metric-learning recognition."""
import os
import argparse
import subprocess
import sys
from time import time
from nvidia_tao_pytorch.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_pytorch.core.telemetry.telemetry import send_telemetry_data
from nvidia_tao_pytorch.cv.metric_learning_recognition import scripts
from nvidia_tao_pytorch.cv.re_identification.entrypoint.re_identification import get_subtasks
def launch(parser, subtasks, network=None):
"""CLI function that executes subtasks for Metric Learning Recognition model.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
"""
# Subtasks for a given model.
if network is None:
network = "tao_pytorch"
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TAO arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written.",
default=None,
required=False,
)
parser.add_argument("-e", "--experiment_spec_file", help="Path to the experiment spec file.", default=None)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print("ERROR: The subtask `{}` requires the following argument: -e/--experiment_spec_file".format(args.subtask))
exit(1)
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print("ERROR: The indicated experiment spec file `{}` doesn't exist!".format(args.experiment_spec_file))
exit(1)
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# And add other params AFTERWARDS!
# Add results dir.
if args.subtask in ["train", "evaluate", "inference", "export"]:
if args.results_dir is not None:
script_args += " results_dir=" + args.results_dir
# Find relevant module and pass args.
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
process_passed = True
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
send_telemetry_data(
network,
args.subtask,
gpu_data,
num_gpus=1,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[WARNING]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"ml_recog", add_help=True, description="TAO Toolkit"
)
# Build list of subtasks by inspecting the package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(parser, subtasks, network="ml_recog")
if __name__ == '__main__':
main()
| tao_pytorch_backend-main | nvidia_tao_pytorch/cv/metric_learning_recognition/entrypoint/metric_learning_recognition.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.