python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
import argparse
import itertools
import math
import os
import subprocess
import sys
import time
import yaml
def get_factors(n):
factors = []
for i in range(1, n + 1):
if i > math.sqrt(n):
break
if n % i == 0:
factors.append(i)
if (n // i != i):
factors.append(n // i)
return sorted(factors)
def load_yaml_config(yaml_file, config_name):
with open(yaml_file, 'r') as f:
config = yaml.safe_load(f)[config_name]
return config
def generate_command_lines(config, args):
generic_cmd = f"{args.launcher_cmd}"
generic_cmd += " {0}"
generic_cmd += " --pr {1} --pc {2}"
if config["skip_correctness_testing"]:
generic_cmd += " -s"
generic_cmd += f" --gx {args.gx} --gy {args.gy} --gz {args.gz}"
cmds = []
prs = get_factors(args.ngpu)
pcs = [args.ngpu // x for x in prs]
if config['run_autotuning']:
prs = [0] + prs
pcs = [0] + pcs
if (not 0 in config['backend']):
config['backend'].append(0)
for pr, pc in zip(prs, pcs):
for vals in itertools.product(*[config[x] for x in config['args']]):
arg_dict = dict(zip(config['args'], vals))
cmd = generic_cmd.format(f"{config['executable_prefix']}", pr, pc)
cmd += " " + " ".join([f"--{x} {y}" for x, y in arg_dict.items()])
# Only run full (grid and backend) autotuning cases
if (config['run_autotuning']):
if ((pr == 0 and pc == 0) and arg_dict["backend"] != 0):
continue
elif ((pr != 0 and pc != 0) and arg_dict["backend"] == 0):
continue
extra_flags = []
extra_flags.append(['-m' if x else '' for x in config['managed_memory']])
extra_flags.append(['-o' if x else '' for x in config['out_of_place']])
extra_flags.append(['--acx 1 --acy 1 --acz 1' if x else '' for x in config['axis_contiguous']])
for extras in itertools.product(*extra_flags):
cmds.append(f"{cmd} {' '.join(filter(lambda x: x != '', extras))}")
return cmds
def setup_env(config, args):
print("Setting environment variables...")
# Environment variables from config
for var, val in config['env_vars'].items():
os.environ[var] = f"{val}"
print(f"Set {var} = {val}")
# Dynamically setting NVSHMEM_SYMMETRIC_SIZE based on expected workspace size
# for input grid dimensions (with 5% margin)
symmetric_size = 0
wordsize = 8 if "_f" in config["executable_prefix"] else 16
if "c2c" in config["executable_prefix"]:
symmetric_size = 2 * (args.gx * args.gy * args.gz * wordsize) // args.ngpu
else:
symmetric_size = 2* ((args.gx // 2 + 1) * args.gy * args.gz * wordsize) // args.ngpu
symmetric_size = int(1.05 * symmetric_size)
os.environ["NVSHMEM_SYMMETRIC_SIZE"] = f"{symmetric_size}"
print(f"Set NVSHMEM_SYMMETRIC_SIZE = {symmetric_size}")
def add_csv_entry(csvfile, cmd, stdout_str):
if not os.path.exists(csvfile):
# Create file with header
with open(csvfile, 'w') as f:
f.write("nx,ny,nz,fft_mode,precision,pr,pc,backend,"
"acx,acy,acz,out_of_place,managed,"
"tmin,tmax,tavg,tstd,gfmin,gfmax,gfavg,gfstd,"
"at_grid,at_backend,at_results\n")
at_grid = "--pr 0" in cmd and "--pc 0" in cmd
at_backend = "--backend 0" in cmd
lines = stdout_str.split('\n')
at_lines = [x for x in lines if "CUDECOMP:" in x]
at_results = []
if at_lines:
for i, line in enumerate(at_lines):
if "grid:" in line and not "SELECTED" in line:
grid = line.split(', ')[0].split(': ')[-1].strip()
backend = line.split(', ')[1].split(': ')[-1].strip()
tmin, tmax, tavg, tstd = [float(x) for x in at_lines[i+1].split(': ')[-1].split('/')]
at_results.append(f"{grid},{backend},{tmin},{tmax},{tavg},{tstd}")
if at_results:
at_results_str = ";".join(at_results)
else:
at_results_str = ""
results_lines = stdout_str.split("Result Summary:")[-1].split('\n')
for line in results_lines:
line = line.strip()
if "FFT size" in line:
nx, ny, nz = [int(x) for x in line.split(': ')[1].split('x')]
elif "FFT mode" in line:
fft_mode = line.split(': ')[1]
elif "Precision" in line:
precision = line.split(': ')[1]
elif "Process grid" in line:
pr, pc = [int(x) for x in line.split(': ')[1].split('x')]
elif "Comm backend" in line:
backend = line.split(': ')[1]
elif "Axis contiguous" in line:
acx, acy, acz = [int(x) for x in line.split(': ')[1].split(' ')]
elif "Out of place" in line:
out_of_place = True if "true" in line.split(': ')[1] else False
elif "Managed memory" in line:
managed = True if "true" in line.split(': ')[1] else False
elif "Time" in line:
tmin, tmax, tavg, tstd = [float(x) for x in line.split(': ')[1].split('/')]
elif "Throughput" in line:
gfmin, gfmax, gfavg, gfstd = [float(x) for x in line.split(': ')[1].split('/')]
with open(csvfile, 'a') as f:
f.write(f"{nx},{ny},{nz},{fft_mode},{precision},{pr},{pc},{backend},"
f"{acx},{acy},{acz},{out_of_place},{managed},"
f"{tmin},{tmax},{tavg},{tstd},{gfmin},{gfmax},{gfavg},{gfstd},"
f"{at_grid},{at_backend},\"{at_results_str}\"\n")
def run_test(cmd, args):
print(f"command: {cmd}", sep="")
cmd_fields = cmd.split()
failed = False
try:
status = subprocess.run(cmd_fields, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
timeout=300, check=True)
except subprocess.TimeoutExpired as ex:
print(f" FAILED (timeout)")
print(f"Failing output:\n{ex.stdout.decode('utf-8')}")
failed = True
except subprocess.CalledProcessError as ex:
print(f" FAILED")
print(f"Failing output:\n{ex.stdout.decode('utf-8')}")
failed = True
else:
print(" PASSED")
if args.csvfile:
add_csv_entry(args.csvfile, cmd, status.stdout.decode('utf-8'))
if failed:
if args.exit_on_failure:
print("Stopping tests...")
sys.exit(1)
return False
if args.verbose:
print(f"Passing output:\n{status.stdout.decode('utf-8')}")
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--launcher_cmd', type=str, required=True, help='parallel launch command')
parser.add_argument('--ngpu', type=int, required=True, help='number of gpus')
parser.add_argument('--gx', type=int, required=True, help='fft dimension in X')
parser.add_argument('--gy', type=int, required=True, help='fft dimension in Y')
parser.add_argument('--gz', type=int, required=True, help='fft dimension in Z')
parser.add_argument('--csvfile', type=str, default='', required=False, help='csv file to dump results (will append if file exists)')
parser.add_argument('--verbose', action='store_true', required=False, help='flag to enable full run output')
parser.add_argument('--exit_on_failure', action='store_true', required=False, help='flag to control whether script exits on case failure')
parser.add_argument('config_name', type=str, help='configuration name from benchmark_configs.yaml')
args = parser.parse_args()
config = load_yaml_config("benchmark_config.yaml", args.config_name)
cmds = generate_command_lines(config, args)
setup_env(config, args)
print(f"Running {len(cmds)} tests...")
t0 = time.time()
failed_cmds = []
for i,c in enumerate(cmds):
status = run_test(c, args)
if not status:
failed_cmds.append(c)
if (i+1) % 10 == 0:
t1 = time.time()
print(f"Completed {i+1}/{len(cmds)} tests, running time {t1-t0} s")
print(f"Completed all tests, running time {time.time() - t0} s")
if len(failed_cmds) == 0:
print("Passed all tests.")
return 0
else:
print(f"Failed {len(failed_cmds)} / {len(cmds)} tests. Failing commands:")
for c in failed_cmds:
print(f"\t{c}")
return -1
if __name__ == "__main__":
main()
| cuDecomp-main | benchmark/benchmark_runner.py |
import argparse
import collections
import itertools
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas
def get_cases(df):
cases = collections.OrderedDict()
cases["grid"] = list(set((x, y, z) for x, y, z in zip(df["nx"], df["ny"], df["nz"])))
cases["precision"] = df["precision"].unique()
cases["axis_contiguous"] = list(set((x, y, z) for x, y, z in zip(df["acx"], df["acy"], df["acz"])))
cases["out_of_place"] = df["out_of_place"].unique()
cases["managed"] = df["managed"].unique()
cases["fft_mode"] = df["fft_mode"].unique()
return cases
def get_case_df(df, grid, precision, axis_contiguous, out_of_place, managed, fft_mode):
cond = (df["nx"] == grid[0]) & (df["ny"] == grid[1]) & (df["nz"] == grid[2])
cond = (cond) & (df["precision"] == precision)
cond = (cond) & (df["acx"] == axis_contiguous[0]) & (df["acy"] == axis_contiguous[1]) & (df["acz"] == axis_contiguous[2])
cond = (cond) & (df["out_of_place"] == out_of_place)
cond = (cond) & (df["managed"] == managed)
cond = (cond) & (df["fft_mode"] == fft_mode)
return df[cond], f"{fft_mode}, {precision}, grid: {grid}, axis_contiguous: {axis_contiguous}, out_of_place: {out_of_place}, managed: {managed}"
def get_factors(n):
factors = []
for i in range(1, n + 1):
if i > math.sqrt(n):
break
if n % i == 0:
factors.append(i)
if (n // i != i):
factors.append(n // i)
return sorted(factors)
def plot_heatmap(ax, data, backends, factors, cmap_name='Greens', x_label=True, y_label=True,
mark_min_col=False, mark_min_row=False, mark_min_all=False, precision=0,
title=""):
ax.matshow(data, vmin=np.min(data), vmax=np.max(data), interpolation='none', cmap=plt.get_cmap(cmap_name))
ax.xaxis.set_ticks(np.arange(0,data.shape[1]))
ax.yaxis.set_ticks(np.arange(0,data.shape[0]))
ax.xaxis.set_ticks(np.arange(0,data.shape[1])-0.5, minor=True)
ax.yaxis.set_ticks(np.arange(0,data.shape[0])-0.5, minor=True)
ax.grid(which='minor', color='w', linestyle='-', linewidth=2)
if x_label:
ax.xaxis.set_ticklabels([f"{x} x {factors[-1] // x}" for x in factors], fontsize=12)
else:
ax.xaxis.set_ticklabels([])
if y_label:
ax.yaxis.set_ticklabels(backends, fontsize=12)
else:
ax.yaxis.set_ticklabels([])
ax.tick_params(axis=u'both', which=u'both',length=0)
cm = plt.get_cmap('Greys_r')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
#color = cm((data[i,j]-np.min(data))/(np.max(data) - np.min(data)))
color = cm(float((data[i,j]-np.min(data))/(np.max(data) - np.min(data)) > 0.5))
ndigits = int(np.log10(data[i,j])) + precision
ax.text(j-0.165*ndigits/2, i, f"{{:.{precision}f}}".format(data[i,j]), color=color, fontsize=12, fontweight='demi')
if (mark_min_col and data[i,j] == np.min(data[:,j])):
ax.plot(j-0.39, i+0.39, color=color, marker='v', markersize=10)
if (mark_min_row and data[i,j] == np.min(data[i,:])):
ax.plot(j+0.39, i-0.39, color=color, marker='>', markersize=10)
if (mark_min_all and data[i,j] == np.min(data[:,:])):
ax.plot(j-0.39, i-0.39, color=color, marker='*', markersize=10)
if title:
ax.set_title(title, y=-0.05)
def plot_case_heatmaps(df_in, case_label, outputfile):
backends = sorted(df_in["backend"].unique().tolist())
ngpu = df_in["pr"].iloc[0] * df_in["pc"].iloc[0]
factors = get_factors(ngpu)
heatmap_data = np.zeros([len(backends), len(factors)])
fig, axes = plt.subplots(1,2,figsize=(3*len(factors),8))
fig.suptitle(case_label, fontsize=14)
# Plot FFT GFLOPS heatmap
df = df_in[(~df_in["at_grid"]) & (~df_in["at_backend"])]
for b in backends:
df_backend = df[df["backend"] == b]
heatmap_i = backends.index(b)
for i, row in df_backend.iterrows():
pr = row["pr"]
pc = row["pc"]
gfavg = row["gfavg"]
heatmap_j = factors.index(pr)
if (heatmap_data[heatmap_i, heatmap_j] != 0):
print("Duplicate entry detected!")
heatmap_data[heatmap_i, heatmap_j] = gfavg
plot_heatmap(axes[0], heatmap_data, backends, factors,
title="FFT Performanace [GFLOP/s]")
# Plot full autotuned heatmap
heatmap_data[:] = 0
df = df_in[(df_in["at_grid"]) & (df_in["at_backend"])]
for i, row in df.iterrows():
at_results = row["at_results"].split(';')
for entry in at_results:
fields = entry.split(',')
pr, pc = [int(x) for x in fields[0].split('x')]
backend = fields[1]
tmin, tmax, tavg, tstd = [float(x) for x in fields[2:]]
heatmap_i = backends.index(backend)
heatmap_j = factors.index(pr)
if (heatmap_data[heatmap_i, heatmap_j] != 0):
print("Duplicate entry detected!")
heatmap_data[heatmap_i, heatmap_j] = tavg
plot_heatmap(axes[1], heatmap_data, backends, factors, cmap_name='Purples',
x_label=True, y_label=False, precision=2, mark_min_all=True,
mark_min_col=True, mark_min_row=True, title="Avg. Transpose Trial Time [ms]")
plt.tight_layout()
plt.savefig(outputfile)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--csvfile', type=str, required=True, help='csvfile containing benchmark run data')
parser.add_argument('--output_prefix', type=str, required=True, help='Output prefix for heatmap PNG files. Output is written to <output_prefix>_*.png, with one file per case found in csv.')
args = parser.parse_args()
df = pandas.read_csv(args.csvfile)
cases = get_cases(df)
for i, (grid, precision, axis_contiguous, out_of_place, managed, fft_mode) in enumerate(itertools.product(*cases.values())):
case_df, case_label = get_case_df(df, grid, precision, axis_contiguous, out_of_place, managed, fft_mode)
plot_case_heatmaps(case_df, case_label, f"{args.output_prefix}_{i}.png")
if __name__ == "__main__":
main()
| cuDecomp-main | benchmark/heatmap_scripts/plot_heatmaps.py |
import argparse
import itertools
import math
import os
import subprocess
import sys
import time
import yaml
def get_factors(n):
factors = []
for i in range(1, n + 1):
if i > math.sqrt(n):
break
if n % i == 0:
factors.append(i)
if (n // i != i):
factors.append(n // i)
return sorted(factors)
def load_yaml_config(yaml_file, config_name):
with open(yaml_file, 'r') as f:
config = yaml.safe_load(f)[config_name]
return config
def should_skip_case(arg_dict):
skip = False
try:
# No need to test periodic flags if halo extent is zero
if arg_dict["hex"] == 0 and arg_dict["hpx"] == 1:
skip = True
if arg_dict["hey"] == 0 and arg_dict["hpy"] == 1:
skip = True
if arg_dict["hez"] == 0 and arg_dict["hpz"] == 1:
skip = True
# Skip cases with all halo extents as zero
if arg_dict["hex"] == 0 and arg_dict["hey"] == 0 and arg_dict["hez"] == 0:
skip = True
except:
pass
return skip
def generate_command_lines(config, args):
generic_cmd = f"{args.launcher_cmd}"
generic_cmd += " {0}"
generic_cmd += " --pr {1} --pc {2}"
cmds = []
for dtype in config['dtypes']:
prs = get_factors(args.ngpu)
pcs = [args.ngpu // x for x in prs]
if config['run_autotuning']:
prs = [0] + prs
pcs = [0] + pcs
if (not 0 in config['backend']):
config['backend'].append(0)
for pr, pc in zip(prs, pcs):
for vals in itertools.product(*[config[x] for x in config['args']]):
arg_dict = dict(zip(config['args'], vals))
cmd = generic_cmd.format(f"{config['executable_prefix']}_{dtype}", pr, pc)
cmd += " " + " ".join([f"--{x} {y}" for x, y in arg_dict.items()])
# Only run full (grid and backend) autotuning cases
if (config['run_autotuning']):
if ((pr == 0 and pc == 0) and arg_dict["backend"] != 0):
continue
elif ((pr != 0 and pc != 0) and arg_dict["backend"] == 0):
continue
# Check additional skip conditions
if should_skip_case(arg_dict): continue
extra_flags = []
extra_flags.append(['-m' if x else '' for x in config['managed_memory']])
extra_flags.append(['-o' if x else '' for x in config['out_of_place']])
for extras in itertools.product(*extra_flags):
cmds.append(f"{cmd} {' '.join(filter(lambda x: x != '', extras))}")
return cmds
def setup_env(config, args):
print("Setting environment variables...")
# Environment variables from config
for var, val in config['env_vars'].items():
os.environ[var] = f"{val}"
print(f"Set {var} = {val}")
def run_test(cmd, args):
print(f"command: {cmd}", sep="")
cmd_fields = cmd.split()
failed = False
try:
status = subprocess.run(cmd_fields, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
timeout=300, check=True)
except subprocess.TimeoutExpired as ex:
print(f" FAILED (timeout)")
print(f"Failing output:\n{ex.stdout.decode('utf-8')}")
failed = True
except subprocess.CalledProcessError as ex:
print(f" FAILED")
print(f"Failing output:\n{ex.stdout.decode('utf-8')}")
failed = True
else:
print(" PASSED")
if failed:
if args.exit_on_failure:
print("Stopping tests...")
sys.exit(1)
return False
if args.verbose:
print(f"Passing output:\n{status.stdout.decode('utf-8')}")
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--launcher_cmd', type=str, required=True, help='parallel launch command')
parser.add_argument('--ngpu', type=int, required=True, help='number of gpus')
parser.add_argument('--verbose', action='store_true', required=False, help='flag to enable full run output')
parser.add_argument('--exit_on_failure', action='store_true', required=False, help='flag to control whether script exits on case failure')
parser.add_argument('config_name', type=str, help='configuration name from test_configs.yaml')
args = parser.parse_args()
config = load_yaml_config("test_config.yaml", args.config_name)
cmds = generate_command_lines(config, args)
setup_env(config, args)
print(f"Running {len(cmds)} tests...")
t0 = time.time()
failed_cmds = []
for i,c in enumerate(cmds):
status = run_test(c, args)
if not status:
failed_cmds.append(c)
if (i+1) % 10 == 0:
t1 = time.time()
print(f"Completed {i+1}/{len(cmds)} tests, running time {t1-t0} s")
print(f"Completed all tests, running time {time.time() - t0} s")
if len(failed_cmds) == 0:
print("Passed all tests.")
return 0
else:
print(f"Failed {len(failed_cmds)} / {len(cmds)} tests. Failing commands:")
for c in failed_cmds:
print(f"\t{c}")
return -1
if __name__ == "__main__":
main()
| cuDecomp-main | tests/test_runner.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import sphinx_rtd_theme
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'cuDecomp'
copyright = '2022, NVIDIA Corporation'
author = 'NVIDIA Corporation'
# The full version, including alpha/beta/rc tags
release = '2022'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'breathe',
'sphinx.ext.mathjax',
'sphinx_tabs.tabs',
'sphinxfortran.fortran_domain',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
highlight_language = 'cpp'
def setup(app):
app.add_css_file('style.css')
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
"collapse_navigation" : False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
breathe_projects = { "cuDecomp": "xml/" }
breathe_default_project = "cuDecomp"
| cuDecomp-main | docs/conf.py |
"""
Author: Vinay Bagde
Modifier: Anurag Guda
Maintainer: Andrew Liu, Anurag Guda
Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
"""
import sys
import os
from collections import OrderedDict
tile_map = {1:(1,1), 2: (1,2), 4:(2,2), 6:(2,3), 8:(2,4), 10:(2,5), 12:(3,4), 15:(3,5), 18:(3,6)}
tile_map = OrderedDict(tile_map)
def read_camera_file():
camera_path = '/etc/config'
files = os.listdir(camera_path)
camera_ips = []
for file in files:
current_file = os.path.join(camera_path,file)
if os.path.isfile(current_file):
camera_file = open(current_file)
camera_ip = camera_file.readline()
camera_ip = camera_ip.strip("\n")
camera_file.close()
camera_ips.append(camera_ip)
return camera_ips
def main():
ips = read_camera_file()
print(ips)
print(len(ips))
n_rows = None
n_columns = None
for key,val in tile_map.items():
if len(ips) < key:
break
n_rows = val[0]
n_columns = val[1]
write_list = []
if len(ips) != 0:
with open(sys.argv[2]) as fp:
line = fp.readline()
while line:
pair = line.split("=")
if pair[0] == "rows":
pair[1] = str(n_rows)
elif pair[0] == "columns":
pair[1] = str(n_columns)
elif pair[0] == "batch-size":
pair[1] = str(len(ips))
output = line
if len(pair) > 1:
output = "=".join(pair)
output = output.replace('\n','')
write_list.append(output)
line = fp.readline()
fp.close()
else:
with open(sys.argv[2]) as fp:
line = fp.readline()
while line:
pair = line.split("=")
if pair[0] == "rows":
pair[1] = str("1")
elif pair[0] == "columns":
pair[1] = str("1")
elif pair[0] == "num-sources":
pair[1] = str("1")
elif pair[0] == "file-loop":
pair[1] = str("1")
elif pair[0] == "batch-size":
pair[1] = str("1")
output = line
if len(pair) > 1:
output = "=".join(pair)
output = output.replace('\n','')
write_list.append(output)
line = fp.readline()
fp.close()
if len(ips) != 0:
for index,ip in enumerate(ips):
write_list.append("\n")
write_list.append("[source{}]".format(index))
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("uri={}".format(ip))
write_list.append("num-sources=1")
write_list.append("gpu-id=0")
write_list.append("cudadec-memtype=0")
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("sync=0")
write_list.append("bitrate=2000000")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
if len(ips) == 0:
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=1")
write_list.append("sync=1")
write_list.append("codec=1")
write_list.append("bitrate=4000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("source-id=0")
write_list.append("gpu-id=0")
write_list.append("nvbuf-memory-type=0")
write_list.append("\n")
write_list.append("[sink2]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("sync=1")
write_list.append("bitrate=2000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
write_file = os.path.join(os.path.dirname(sys.argv[2]),'run.txt')
with open(write_file,"w") as file:
for line in write_list:
file.write(line)
file.write("\n")
file.close()
print(write_file)
os.system("{} -c {}".format(sys.argv[1],write_file))
if __name__ == '__main__':
main()
| fleet-command-master | azure-iot/video-analytics-demo/config/create_config_backup.py |
"""
Author: Vinay Bagde
Modifier: Anurag Guda
Maintainer: Andrew Liu, Anurag Guda
Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
"""
import sys
import os
from collections import OrderedDict
tile_map = {1:(1,1), 2: (1,2), 4:(2,2), 6:(2,3), 8:(2,4), 10:(2,5), 12:(3,4), 15:(3,5), 18:(3,6)}
tile_map = OrderedDict(tile_map)
def read_camera_file():
camera_path = '/etc/config'
files = os.listdir(camera_path)
camera_ips = []
for file in files:
current_file = os.path.join(camera_path,file)
if os.path.isfile(current_file):
camera_file = open(current_file)
camera_ip = camera_file.readline()
camera_ip = camera_ip.strip("\n")
camera_file.close()
camera_ips.append(camera_ip)
return camera_ips
def main():
ips = read_camera_file()
print(ips)
print(len(ips))
n_rows = None
n_columns = None
for key,val in tile_map.items():
if len(ips) < key:
break
n_rows = val[0]
n_columns = val[1]
write_list = []
if len(ips) != 0:
with open(sys.argv[2]) as fp:
line = fp.readline()
while line:
pair = line.split("=")
if pair[0] == "rows":
pair[1] = str(n_rows)
elif pair[0] == "columns":
pair[1] = str(n_columns)
elif pair[0] == "batch-size":
pair[1] = str(len(ips))
output = line
if len(pair) > 1:
output = "=".join(pair)
output = output.replace('\n','')
write_list.append(output)
line = fp.readline()
fp.close()
else:
with open(sys.argv[2]) as fp:
line = fp.readline()
while line:
pair = line.split("=")
if pair[0] == "rows":
pair[1] = str("1")
elif pair[0] == "columns":
pair[1] = str("1")
elif pair[0] == "num-sources":
pair[1] = str("1")
elif pair[0] == "file-loop":
pair[1] = str("1")
elif pair[0] == "batch-size":
pair[1] = str("1")
output = line
if len(pair) > 1:
output = "=".join(pair)
output = output.replace('\n','')
write_list.append(output)
line = fp.readline()
fp.close()
gpulist = os.popen('nvidia-smi -L').read()
print(gpulist)
if 'T4' in gpulist:
if len(ips) != 0:
for index,ip in enumerate(ips):
write_list.append("\n")
write_list.append("[source{}]".format(index))
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("uri={}".format(ip))
write_list.append("num-sources=1")
write_list.append("gpu-id=0")
write_list.append("cudadec-memtype=0")
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("sync=0")
write_list.append("bitrate=2000000")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
if len(ips) == 0:
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=1")
write_list.append("sync=1")
write_list.append("codec=1")
write_list.append("bitrate=4000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("source-id=0")
write_list.append("gpu-id=0")
write_list.append("nvbuf-memory-type=0")
write_list.append("\n")
write_list.append("[sink2]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("sync=1")
write_list.append("bitrate=2000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
else:
if len(ips) != 0:
for index,ip in enumerate(ips):
write_list.append("\n")
write_list.append("[source{}]".format(index))
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("uri={}".format(ip))
write_list.append("num-sources=1")
write_list.append("gpu-id=0")
write_list.append("cudadec-memtype=0")
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("enc-type=1")
write_list.append("sync=0")
write_list.append("bitrate=2000000")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
if len(ips) == 0:
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=1")
write_list.append("sync=1")
write_list.append("codec=1")
write_list.append("enc-type=1")
write_list.append("bitrate=4000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("source-id=0")
write_list.append("gpu-id=0")
write_list.append("nvbuf-memory-type=0")
write_list.append("\n")
write_list.append("[sink2]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("enc-type=1")
write_list.append("sync=1")
write_list.append("bitrate=2000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
write_file = os.path.join(os.path.dirname(sys.argv[2]),'run.txt')
with open(write_file,"w") as file:
for line in write_list:
file.write(line)
file.write("\n")
file.close()
print(write_file)
os.system("{} -c {}".format(sys.argv[1],write_file))
if __name__ == '__main__':
main()
| fleet-command-master | azure-iot/video-analytics-demo/config/create_config.py |
"""
Author: Vinay Bagde
Modifier: Anurag Guda
Maintainer: Andrew Liu, Anurag Guda
Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
"""
import sys
import os
from collections import OrderedDict
tile_map = {1:(1,1), 2: (1,2), 4:(2,2), 6:(2,3), 8:(2,4), 10:(2,5), 12:(3,4), 15:(3,5), 18:(3,6)}
tile_map = OrderedDict(tile_map)
def read_camera_file():
camera_path = '/etc/config'
files = os.listdir(camera_path)
camera_ips = []
for file in files:
current_file = os.path.join(camera_path,file)
if os.path.isfile(current_file):
camera_file = open(current_file)
camera_ip = camera_file.readline()
camera_ip = camera_ip.strip("\n")
camera_file.close()
camera_ips.append(camera_ip)
return camera_ips
def main():
ips = read_camera_file()
print(ips)
print(len(ips))
n_rows = None
n_columns = None
for key,val in tile_map.items():
if len(ips) < key:
break
n_rows = val[0]
n_columns = val[1]
write_list = []
if len(ips) != 0:
with open(sys.argv[2]) as fp:
line = fp.readline()
while line:
pair = line.split("=")
if pair[0] == "rows":
pair[1] = str(n_rows)
elif pair[0] == "columns":
pair[1] = str(n_columns)
elif pair[0] == "batch-size":
pair[1] = str(len(ips))
output = line
if len(pair) > 1:
output = "=".join(pair)
output = output.replace('\n','')
write_list.append(output)
line = fp.readline()
fp.close()
else:
with open(sys.argv[2]) as fp:
line = fp.readline()
while line:
pair = line.split("=")
if pair[0] == "rows":
pair[1] = str("1")
elif pair[0] == "columns":
pair[1] = str("1")
elif pair[0] == "num-sources":
pair[1] = str("1")
elif pair[0] == "file-loop":
pair[1] = str("1")
elif pair[0] == "batch-size":
pair[1] = str("1")
output = line
if len(pair) > 1:
output = "=".join(pair)
output = output.replace('\n','')
write_list.append(output)
line = fp.readline()
fp.close()
if len(ips) != 0:
for index,ip in enumerate(ips):
write_list.append("\n")
write_list.append("[source{}]".format(index))
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("uri={}".format(ip))
write_list.append("num-sources=1")
write_list.append("gpu-id=0")
write_list.append("cudadec-memtype=0")
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("enc-type=1")
write_list.append("sync=0")
write_list.append("bitrate=2000000")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
if len(ips) == 0:
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=1")
write_list.append("sync=1")
write_list.append("codec=1")
write_list.append("enc-type=1")
write_list.append("bitrate=4000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("source-id=0")
write_list.append("gpu-id=0")
write_list.append("nvbuf-memory-type=0")
write_list.append("\n")
write_list.append("[sink2]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("enc-type=1")
write_list.append("sync=1")
write_list.append("bitrate=2000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
write_file = os.path.join(os.path.dirname(sys.argv[2]),'run.txt')
with open(write_file,"w") as file:
for line in write_list:
file.write(line)
file.write("\n")
file.close()
print(write_file)
os.system("{} -c {}".format(sys.argv[1],write_file))
if __name__ == '__main__':
main()
| fleet-command-master | azure-iot/video-analytics-demo/config/create_config_a100.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import asyncio
from azure.iot.device.aio import IoTHubDeviceClient
import time
import datetime
async def main():
# Fetch the connection string from an enviornment variable
conn_str = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING")
hostname = os.getenv("HOSTNAME")
# Create instance of the device client using the authentication provider
device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
# Connect the device client.
await device_client.connect()
# Send a single message
print("Sending message to {}".format(conn_str))
#tail = sh.tail("-f", "/home/deepstream.log", _iter=True)
tail = os.popen("cat /home/deepstream.log | tail -125f | grep -v -e '^$'")
#await device_client.send_message("This is a message that is being sent from {}".format(hostname))
await device_client.send_message("{} host current time is {}".format(hostname, str(datetime.datetime.now())))
await device_client.send_message(tail.read())
#await device_client.send_message("This is a message that is being sent from {}".format(hostname))
print("Message successfully sent!")
print()
# finally, disconnect
await device_client.disconnect()
if __name__ == "__main__":
while True:
time.sleep(3)
asyncio.run(main())
| fleet-command-master | azure-iot/azure-iot-read/azure-send-message-deviceto-cloud.py |
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from azure.eventhub import TransportType
from azure.eventhub.aio import EventHubConsumerClient
import os
import logging
# Event Hub-compatible endpoint
# az iot hub show --query properties.eventHubEndpoints.events.endpoint --name {your IoT Hub name}
EVENTHUB_COMPATIBLE_ENDPOINT = "{your Event Hubs compatible endpoint}"
# Event Hub-compatible name
# az iot hub show --query properties.eventHubEndpoints.events.path --name {your IoT Hub name}
EVENTHUB_COMPATIBLE_PATH = "{your Event Hubs compatible name}"
# Primary key for the "service" policy to read messages
# az iot hub policy show --name service --query primaryKey --hub-name {your IoT Hub name}
IOTHUB_SAS_KEY = "{your service primary key}"
# If you have access to the Event Hub-compatible connection string from the Azure portal, then
# you can skip the Azure CLI commands above, and assign the connection string directly here.
#CONNECTION_STR = f'Endpoint={EVENTHUB_COMPATIBLE_ENDPOINT}/;SharedAccessKeyName=service;SharedAccessKey={IOTHUB_SAS_KEY};EntityPath={EVENTHUB_COMPATIBLE_PATH}'
CONNECTION_STR = os.getenv("IOTHUB_CONNECTION_STRING")
# Define callbacks to process events
async def on_event_batch(partition_context, events):
for event in events:
#print("Received event from partition: {}.".format(partition_context.partition_id))
print("Telemetry received: ", event.body_as_str())
#print("Properties (set by device): ", event.properties)
#print("System properties (set by IoT Hub): ", event.system_properties)
print()
await partition_context.update_checkpoint()
async def on_error(partition_context, error):
# Put your code here. partition_context can be None in the on_error callback.
if partition_context:
print("An exception: {} occurred during receiving from Partition: {}.".format(
partition_context.partition_id,
error
))
else:
print("An exception: {} occurred during the load balance process.".format(error))
async def main():
loop = asyncio.get_event_loop()
client = EventHubConsumerClient.from_connection_string(
conn_str=CONNECTION_STR,
consumer_group="$default",
eventhub_name = os.getenv("IOTHUB_NAME")
#eventhub_name="egx-iot"
# transport_type=TransportType.AmqpOverWebsocket, # uncomment it if you want to use web socket
# http_proxy={ # uncomment if you want to use proxy
# 'proxy_hostname': '127.0.0.1', # proxy hostname.
# 'proxy_port': 3128, # proxy port.
# 'username': '<proxy user name>',
# 'password': '<proxy password>'
# }
)
try:
#print("try loop")
recv_task = asyncio.ensure_future(client.receive_batch(on_event_batch=on_event_batch, on_error=on_error))
#loop.run_until_complete(client.receive_batch(on_event_batch=on_event_batch, on_error=on_error))
await asyncio.sleep(3)
recv_task.cancel()
await client.close()
except KeyboardInterrupt:
print("Receiving has stopped.")
if __name__ == '__main__':
while True:
asyncio.run(main())
| fleet-command-master | azure-iot/azure-iot-read/read.py |
"""
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import sys
import os
from collections import OrderedDict
tile_map = {1:(1,1), 2: (1,2), 4:(2,2), 6:(2,3), 8:(2,4), 10:(2,5), 12:(3,4), 15:(3,5), 18:(3,6)}
tile_map = OrderedDict(tile_map)
def read_camera_file():
camera_path = '/etc/config'
files = os.listdir(camera_path)
camera_ips = []
for file in files:
current_file = os.path.join(camera_path,file)
if os.path.isfile(current_file):
camera_file = open(current_file)
camera_ip = camera_file.readline()
camera_ip = camera_ip.strip("\n")
camera_file.close()
camera_ips.append(camera_ip)
return camera_ips
def main():
ips = read_camera_file()
print(ips)
print(len(ips))
n_rows = None
n_columns = None
for key,val in tile_map.items():
if len(ips) < key:
break
n_rows = val[0]
n_columns = val[1]
write_list = []
if len(ips) != 0:
with open(sys.argv[2]) as fp:
line = fp.readline()
while line:
pair = line.split("=")
if pair[0] == "rows":
pair[1] = str(n_rows)
elif pair[0] == "columns":
pair[1] = str(n_columns)
elif pair[0] == "batch-size":
pair[1] = str(len(ips))
output = line
if len(pair) > 1:
output = "=".join(pair)
output = output.replace('\n','')
write_list.append(output)
line = fp.readline()
fp.close()
else:
with open(sys.argv[2]) as fp:
line = fp.readline()
while line:
pair = line.split("=")
if pair[0] == "rows":
pair[1] = str("1")
elif pair[0] == "columns":
pair[1] = str("1")
elif pair[0] == "num-sources":
pair[1] = str("1")
elif pair[0] == "file-loop":
pair[1] = str("1")
elif pair[0] == "batch-size":
pair[1] = str("1")
output = line
if len(pair) > 1:
output = "=".join(pair)
output = output.replace('\n','')
write_list.append(output)
line = fp.readline()
fp.close()
if len(ips) != 0:
for index,ip in enumerate(ips):
write_list.append("\n")
write_list.append("[source{}]".format(index))
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("uri={}".format(ip))
write_list.append("num-sources=1")
write_list.append("gpu-id=0")
write_list.append("cudadec-memtype=0")
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("sync=0")
write_list.append("bitrate=2000000")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
if len(ips) == 0:
write_list.append("\n")
write_list.append("[sink0]")
write_list.append("enable=1")
write_list.append("type=1")
write_list.append("sync=1")
write_list.append("codec=1")
write_list.append("bitrate=4000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("source-id=0")
write_list.append("gpu-id=0")
write_list.append("nvbuf-memory-type=0")
write_list.append("\n")
write_list.append("[sink2]")
write_list.append("enable=1")
write_list.append("type=4")
write_list.append("container=1")
write_list.append("codec=1")
write_list.append("sync=1")
write_list.append("bitrate=2000000")
write_list.append("rtsp-port=8554")
write_list.append("udp-port=5400")
write_list.append("profile=0")
write_list.append("output-file=out.mp4")
write_list.append("source-id=0")
write_file = os.path.join(os.path.dirname(sys.argv[2]),'run.txt')
with open(write_file,"w") as file:
for line in write_list:
file.write(line)
file.write("\n")
file.close()
print(write_file)
os.system("{} -c {}".format(sys.argv[1],write_file))
if __name__ == '__main__':
main()
| fleet-command-master | azure-iot/video-analytics-demo-old/config/create_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script to build Data-Services."""
import os
import setuptools
from release.python.utils import utils
PACKAGE_LIST = [
"nvidia_tao_ds"
]
version_locals = utils.get_version_details()
setuptools_packages = []
for package_name in PACKAGE_LIST:
setuptools_packages.extend(utils.find_packages(package_name))
setuptools.setup(
name=version_locals['__package_name__'],
version=version_locals['__version__'],
description=version_locals['__description__'],
author='NVIDIA Corporation',
classifiers=[
'Environment :: Console',
'License :: Other/Proprietary License',
'Natural Language :: English',
'Operating System :: Linux',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
license=version_locals['__license__'],
keywords=version_locals['__keywords__'],
packages=setuptools_packages,
package_data={
'': ['*.py', "*.pyc", "*.yaml", "*.so", "*.pdf"]
},
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
'augmentation=nvidia_tao_ds.augment.entrypoint.augment:main',
'auto_label=nvidia_tao_ds.auto_label.entrypoint.auto_label:main',
'annotations=nvidia_tao_ds.annotations.entrypoint.annotations:main',
'analytics=nvidia_tao_ds.data_analytics.entrypoint.analytics:main'
]
}
)
| tao_dataset_suite-main | setup.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Instantiate the Data-Services docker container for developers."""
import argparse
from distutils.version import LooseVersion
import json
import os
import subprocess
import sys
ROOT_DIR = os.getenv("NV_TAO_DS_TOP", os.path.dirname(os.path.dirname(os.getcwd())))
with open(os.path.join(ROOT_DIR, "docker/manifest.json"), "r") as m_file:
docker_config = json.load(m_file)
DOCKER_REGISTRY = docker_config["registry"]
DOCKER_REPOSITORY = docker_config["repository"]
DOCKER_TAG = docker_config["tag"]
DOCKER_COMMAND = "docker"
HOME_PATH = os.path.expanduser("~")
MOUNTS_PATH = os.path.join(HOME_PATH, ".tao_mounts.json")
def get_docker_mounts_from_file(mounts_file=MOUNTS_PATH):
"""Check for docker mounts in ~/.tao_mounts.json."""
if not os.path.exists(mounts_file):
return []
with open(mounts_file, 'r') as mfile:
data = json.load(mfile)
assert "Mounts" in list(data.keys()), "Invalid json file. Requires Mounts key."
return data["Mounts"]
def format_mounts(mount_points):
"""Format mount points to docker recognizable commands."""
formatted_mounts = []
# Traverse through mount points and add format them for the docker command.
for mount_point in mount_points:
assert "source" in list(mount_point.keys()), "destination" in list(mount_point.keys())
mount = "{}:{}".format(mount_point["source"], mount_point["destination"])
formatted_mounts.append(mount)
return formatted_mounts
def check_image_exists(docker_image):
"""Check if the image exists locally."""
check_command = '{} images | grep "\\<{}\\>" | grep "{}" >/dev/null 2>&1'.format(DOCKER_COMMAND, docker_image, DOCKER_TAG)
rc = subprocess.call(check_command, stdout=sys.stderr, shell=True)
return rc == 0
def pull_base_container(docker_image):
"""Pull the default base container."""
pull_command = "{} pull {}:{}".format(DOCKER_COMMAND, docker_image, DOCKER_TAG)
rc = subprocess.call(pull_command, stdout=sys.stderr, shell=True)
return rc == 0
def get_formatted_mounts(mount_file):
"""Simple function to get default mount points."""
default_mounts = get_docker_mounts_from_file(mount_file)
return format_mounts(default_mounts)
def check_mounts(formatted_mounts):
"""Check the formatted mount commands."""
assert type(formatted_mounts) == list
for mounts in formatted_mounts:
source_path = mounts.split(":")[0]
if not os.path.exists(source_path):
raise ValueError("Path doesn't exist: {}".format(source_path))
return True
def get_docker_gpus_prefix(gpus):
"""Get the docker command gpu's prefix."""
docker_version = (
subprocess.check_output(
["docker", "version", "--format={{ .Server.APIVersion }}"]
)
.strip()
.decode()
)
if LooseVersion(docker_version) >= LooseVersion("1.40"):
# You are using the latest version of docker using
# --gpus instead of the nvidia runtime.
gpu_string = "--gpus "
if gpus == "all":
gpu_string += "all"
else:
gpu_string += '\'\"device={}\"\''.format(gpus)
else:
# Stick to the older version of getting the gpu's using runtime=nvidia
gpu_string = "--runtime=nvidia -e NVIDIA_DRIVER_CAPABILITIES=all "
if gpus != "none":
gpu_string += "-e NVIDIA_VISIBLE_DEVICES={}".format(gpus)
return gpu_string
def create_base_docker():
"""Function to create the base docker."""
create_command = "bash {}/docker/build.sh --build".format(ROOT_DIR)
try:
subprocess.run(create_command, stdout=sys.stderr, shell=True, check=True)
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Container build failed with error {e}")
def instantiate_dev_docker(gpus, mount_file,
mount_cli_list,
env_var_list,
command, ulimit=None,
shm_size="16G", run_as_user=False):
"""Instiate the docker container."""
docker_image = "{}/{}:{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY, DOCKER_TAG)
# Invoking the nvidia docker.
gpu_string = get_docker_gpus_prefix(gpus)
# Prefix for the run command.
run_command = "{} run -it --rm".format(DOCKER_COMMAND)
# get default mount points.
formatted_mounts = get_formatted_mounts(MOUNTS_PATH)
# get mounts from cli mount file.
formatted_mounts += get_formatted_mounts(mount_file)
if mount_cli_list is not None:
formatted_mounts.extend(mount_cli_list)
assert check_mounts(formatted_mounts), "Mounts don't exists, Please make sure the paths all exist."
mount_string = "-v {}:/workspace ".format(os.getenv("NV_TAO_DS_TOP", os.getcwd()))
# Defining env variables.
env_variables = "-e PYTHONPATH={}:$PYTHONPATH ".format("/workspace")
for env in env_var_list:
if "=" not in env:
print("invalid env variable definition. skipping this {}".format(env))
continue
env_variables += "-e {} ".format(env)
for path in formatted_mounts:
mount_string += "-v {} ".format(path)
# Setting shared memory.
shm_option = "--shm-size {}".format(shm_size)
# Setting ulimits for host
ulimit_options = ""
if ulimit is not None:
for param in ulimit:
ulimit_options += "--ulimit {} ".format(param)
user_option = ""
if run_as_user:
user_option = "--user {}:{}".format(os.getuid(), os.getgid())
working_directory = "/workspace"
working_dir_option = f"-w {working_directory}"
final_command = "{} {} {} {} {} {} {} {} {} {}".format(
run_command, gpu_string,
mount_string, env_variables,
shm_option, ulimit_options, user_option,
working_dir_option,
docker_image, " ".join(command)
)
print(final_command)
return subprocess.check_call(final_command, stdout=sys.stderr, shell=True)
def parse_cli_args(args=None):
"""Parse run container command line."""
parser = argparse.ArgumentParser(
prog="tao_ds",
description="Tool to run the Data-Services container.",
add_help=True)
parser.add_argument(
"--gpus",
default="all",
type=str,
help="Comma separated GPU indices to be exposed to the docker."
)
parser.add_argument(
"--volume",
action="append",
type=str,
default=[],
help="Volumes to bind."
)
parser.add_argument(
"--env",
action="append",
type=str,
default=[],
help="Environment variables to bind."
)
parser.add_argument(
"--mounts_file",
help="Path to the mounts file.",
default="",
type=str
)
parser.add_argument(
"--shm_size",
help="Shared memory size for docker",
default="16G",
type=str
)
parser.add_argument(
"--run_as_user",
help="Flag to run as user",
action="store_true",
default=False
)
parser.add_argument(
"--ulimit",
action='append',
help="Docker ulimits for the host machine."
)
args = vars(parser.parse_args(args))
return args
def main(cl_args=None):
"""Start docker container."""
if "--" in cl_args:
index = cl_args.index("--")
# Split args to the tao docker wrapper and the command to be run inside the docker.
ds_args = cl_args[:index]
command_args = cl_args[index + 1:]
else:
ds_args = cl_args
command_args = ["/bin/bash"]
args = parse_cli_args(ds_args)
docker_image = "{}/{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY)
if not check_image_exists(docker_image):
if not pull_base_container(docker_image):
print("The base container doesn't exist locally and the pull failed. Hence creating the base container")
create_base_docker()
try:
instantiate_dev_docker(
args["gpus"], args["mounts_file"],
args["volume"], args["env"],
command_args, args["ulimit"],
args["shm_size"], args["run_as_user"]
)
except subprocess.CalledProcessError:
# Do nothing - the errors are printed in entrypoint launch.
pass
if __name__ == "__main__":
main(sys.argv[1:])
| tao_dataset_suite-main | runner/tao_ds.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KITTI visualization tool."""
from PIL import Image, ImageDraw
from glob import glob
import os
import argparse
def draw_one_image(image_file, kitti_file, output_dir):
img = Image.open(image_file)
draw = ImageDraw.Draw(img)
f = open(kitti_file, 'r')
for line in f:
po = list(map(lambda x:float(x), line.split(' ')[4:8]))
# print label
draw.rectangle(po, outline=(0,0,255,200))
img.save(os.path.join(output_dir, os.path.basename(kitti_file)+'.jpg'))
def draw_dir(imagedir, labeldir, output_dir, ext='.png'):
assert os.path.isdir(imagedir), "Image dir invalid."
assert os.path.isdir(labeldir), "Label dir invalid."
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for labelpath in sorted(glob(labeldir + '/*.txt')):
imgpath = os.path.join(imagedir, os.path.basename(labelpath)[:-4] + ext)
if os.path.isfile(imgpath):
# print imgpath
draw_one_image(imgpath, labelpath, output_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="KITTI visualization tool.")
parser.add_argument('-i', '--imagedir',
help="Image Dir",
type=str,
required=True)
parser.add_argument('-l', '--labeldir',
help="Label Dir",
type=str,
required=True)
parser.add_argument('-o', '--outputdir',
help="Output Dir",
type=str,
required=True)
parser.add_argument('-e', '--extension',
help="File extension",
type=str,
default='.png')
args = parser.parse_args()
draw_dir(args.imagedir, args.labeldir, args.outputdir, args.extension)
| tao_dataset_suite-main | internal/visualize_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import PIL
import PIL
import shutil
import random
import argparse
import numpy as np
from PIL import Image
from tqdm import tqdm_notebook as tqdm
from joblib import parallel_backend
from joblib import Parallel, delayed
from split_data.utils.util import read_kitti_labels, save_kitti_labels
from pdb import set_trace as bp
from argparse import ArgumentParser
from PIL import Image
from distutils.dir_util import copy_tree
from itertools import chain,repeat
def copy_files(f,dest_dir,splits,split_count,images_path,labels_path):
if ".txt" in f:
try:
dest_split_dir = dest_dir + "/%d/" % (split_count % splits)
image_dest_dir = dest_split_dir + "/images_final_hres"
label_dest_dir = dest_split_dir + "/labels_final_hres"
img_id,ext = f.split(".txt")
img = img_id + ".jpg"
seq_dir = images_path.split("/")[-3]
image_file = os.path.join(images_path,img)
label_file = os.path.join(labels_path,f)
dest_image_file = os.path.join(image_dest_dir,img)
dest_label_file = os.path.join(label_dest_dir,f)
missing_count =0
if not os.path.getsize(label_file) == 0:
shutil.copyfile(label_file,dest_label_file)
shutil.copyfile(image_file,dest_image_file)
else:
print(label_file)
except Exception as e:
print ("Not a right file")
print (f)
else:
print ("Not a text file")
print (f)
def sample_dataset(dataset,splits,dest_dir,percent):
dataset_file = str("data_sources.csv")
file_name = os.path.join(dest_dir,dataset_file)
images_path = dataset + "/images_final_hres/"
labels_path = dataset + "/labels_final_hres/"
if os.path.exists(labels_path):
data = os.listdir(labels_path)
if len(data) < 10:
print ("data source is wrong")
print (dataset)
images_path = dataset + "/images_final_960x544"
labels_path = dataset + "/labels_final_960x544/"
print (images_path)
print (labels_path)
data = os.listdir(labels_path)
print ("other folder works", len(data))
if percent < 1:
data_samples = round(percent*len(data))
print ("samples", data_samples)
data_list = random.sample(data,k=int(data_samples))
else:
data_list = list(chain.from_iterable(repeat(item, percent) for item in data))
file = open(file_name, 'w')
quick_count = 0
random.shuffle(data_list)
for item in data_list:
split_num = quick_count % splits
file.write("%s,%s,%d,%s\n" % (dest_dir,item,split_num,dataset))
quick_count +=1
file.close()
split_count = 0
with parallel_backend("threading", n_jobs=8):
Parallel(verbose=10)(delayed(copy_files)(f,dest_dir,splits,split_count,images_path,labels_path) for split_count,f in enumerate(data_list))
else:
print ("labels_dir does not exist")
def create_dir_if_not_exists(dir):
if os.path.isdir(dir) == False:
os.mkdir(dir)
def copy_dataset(project_folder,rotation_folder,item):
image_id = item[1].split(".txt")[0]
src_image_path = project_folder + "/images_final_fres/" + image_id + ".jpg"
src_labels_path = project_folder + "/labels_final_fres/" + image_id + ".txt"
dest_image_folder = rotation_folder + str(item[2]) + "/images_final_fres/"
dest_labels_folder = rotation_folder + str(item[2]) + "/labels_final_fres/"
ensureDir(dest_image_folder)
ensureDir(dest_labels_folder)
dest_image_path = dest_image_folder + image_id + ".jpg"
dest_labels_path = dest_labels_folder + image_id + ".txt"
try:
shutil.copyfile(src_image_path,dest_image_path)
shutil.copyfile(src_labels_path,dest_labels_path)
except:
print (src_image_path)
def split(inp_dir, dest_dir, n=12, p=1):
create_dir_if_not_exists(dest_dir)
for i in range(n):
dest_split_dir = dest_dir + "/%d/" % (i)
create_dir_if_not_exists(dest_split_dir)
image_dest_dir = dest_split_dir + "/images_final_hres"
create_dir_if_not_exists(image_dest_dir)
label_dest_dir = dest_split_dir + "/labels_final_hres"
create_dir_if_not_exists(label_dest_dir)
sample_dataset(inp_dir,n,dest_dir,p)
def parse_command_line(args=None):
"""
Function to parse the command line arguments
Args:
args(list): list of arguments to be parsed.
"""
parser = argparse.ArgumentParser(
prog='split_data',
description='Split the dataset for performing augmentation'
)
parser.add_argument(
'-i',
'--input-directory',
required=True,
help='Snapshot data directory'
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
help='Directory to output the data splits'
)
parser.add_argument(
'-n',
'--splits',
type=int,
required=True,
help='Number of splits'
)
return parser.parse_args(args)
def main(cl_args=None):
"""
Split dataset and perform augmentations.
Args:
cl_args(list): list of arguments.
"""
args = parse_command_line(args=cl_args)
try:
split([args.input_directory], args.output_directory, args.splits)
except RuntimeError as e:
print("Data Split execution failed with error: {}".format(e))
exit(-1)
if __name__ == '__main__':
main() | tao_dataset_suite-main | internal/split_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
import argparse
from time import sleep
from tqdm import tqdm
from tqdm import trange
label_map = {'baggage': ['handbag', 'suitcase', 'backpack'],
'person': ['person'],
'lp': ['lp', 'LPD'],
'face': 'face'}
OMIT_LIST = ['crops']
def read_kitti_labels(label_file):
"Function wrapper to read kitti format labels txt file."
label_list = []
if not os.path.exists(label_file):
raise ValueError("Labelfile : {} does not exist".format(label_file))
with open(label_file, 'r') as lf:
for row in csv.reader(lf, delimiter=' '):
label_list.append(row)
lf.closed
return label_list
def filter_objects(child_objects, class_key):
"Extract object metadata of class name class key."
filtered_objects = []
for item in child_objects:
if item[0] in label_map[class_key]:
item[0] = class_key
# if class_key == 'lp':
# item.append("1ABC2345")
filtered_objects.append(item)
return filtered_objects
def save_kitti_labels(objects, output_file):
"Function wrapper to save kitti format bbox labels to txt file."
with open(output_file, 'w') as outfile:
outwrite=csv.writer(outfile, delimiter=' ')
for row in objects:
outwrite.writerow(row)
def main_kitti_concatenate(args):
"Function wrapper to concatenate kitti format labels from two different directories for same images to one file."
class_key = args.filter_class
parent_root = args.parent_dir
child_root = args.child_dir
output_root = args.output_dir
if not os.path.exists(parent_root):
raise ValueError("Parent label path: {} does not exist".format(parent_root))
if not os.path.exists(child_root):
raise ValueError("Child label path: {} does not exist".format(parent_root))
if not os.path.exists(output_root):
os.mkdir(output_root)
parent_label_list = [ item for item in sorted(os.listdir(parent_root)) if item.endswith('.txt') ]
for label in tqdm(parent_label_list):
child_label = os.path.join(child_root, label)
parent_label = os.path.join(parent_root, label)
parent_objects = read_kitti_labels(parent_label)
if os.path.exists(child_label):
child_objects = read_kitti_labels(child_label)
filtered_objects = filter_objects(child_objects, class_key)
if not filtered_objects is None:
for elem in filtered_objects:
parent_objects.append(elem)
output_file = os.path.join(output_root, label)
save_kitti_labels(parent_objects, output_file)
def class_rename(input_dir, filter_class, new_classname, output_dir):
"Function wrapper to rename classes for objects in kitti format label file."
class_key = filter_class
parent_root = input_dir
output_dir = output_dir
new_name = new_classname
lv = True
ns = True
if not new_name is None:
if not os.path.exists(parent_root):
raise ValueError("Input directory: {} does not exist".format(parent_root))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
labels_list = [item for item in os.listdir(parent_root) if item.endswith('.txt')]
for i in trange(len(labels_list), desc='frame_list', leave=lv):
renamed_data = []
label = labels_list[i]
read_file = os.path.join(parent_root, label)
output_file = os.path.join(output_dir, label)
label_data = read_kitti_labels(read_file)
for item in label_data:
if item[0].lower() == class_key.lower():
item[0] = new_name
renamed_data.append(item)
save_kitti_labels(renamed_data, output_file)
def KPI_rename_wrapper(args):
"Function wrapper to rename label files for multiple sequences in a directory."
KPI_root = args.parent_dir
class_key = args.filter_class
output_root = KPI_root
new_name = args.new_name
KPI_directory_list = [os.path.join(KPI_root, item) for item in os.listdir(KPI_root) \
if os.path.isdir(os.path.join(KPI_root, item)) and \
item not in OMIT_LIST]
for i in trange(len(KPI_directory_list), desc='sequence_list', leave=True):
item = KPI_directory_list[i]
sequence = os.path.join(KPI_directory_list, item)
input_dir = os.path.join(sequence, 'labels')
output_dir = os.path.join(sequence, 'labels_renamed')
class_rename(input_dir, class_key, new_name, output_dir)
def parse_command_line():
parser = argparse.ArgumentParser(description='Label concatenation tool')
parser.add_argument('-p',
'--parent_dir',
help='Parent directory of labels.',
default=None)
parser.add_argument('-ch',
'--child_dir',
help='Child directory of labels.',
default=None)
parser.add_argument('-o',
'--output_dir',
help='Output directory for concatenated labels',
default=None)
parser.add_argument('-fc',
'--filter_class',
help='Key to filter class in child directory',
default='None')
parser.add_argument('-nn',
'--new_name',
help='Name of the filters class to be renamed',
default='None')
arguments = parser.parse_args()
return arguments
if __name__=='__main__':
arguments = parse_command_line()
KPI_rename_wrapper(arguments) | tao_dataset_suite-main | internal/utils/util.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for auto labeling."""
| tao_dataset_suite-main | internal/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing implementation of release packaging."""
| tao_dataset_suite-main | release/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Version string for the Data-Services"""
MAJOR = "5"
MINOR = "0.0"
PATCH = "0"
PRE_RELEASE = ''
# Getting the build number.
def get_build_info():
"""Get the build version number."""
# required since setup.py runs a version string and global imports aren't executed.
import os # noqa pylint: disable=import-outside-toplevel
build_file = "build.info"
if not os.path.exists(build_file):
raise FileNotFoundError("Build file doesn't exist.")
patch = 0
with open(build_file, 'r') as bfile:
patch = bfile.read().strip()
assert bfile.closed, "Build file wasn't closed properly."
return patch
try:
PATCH = get_build_info()
except FileNotFoundError:
pass
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
# Version of the library.
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
# Version of the file format.
__format_version__ = 2
# Other package info.
__package_name__ = "nvidia-tao-ds"
__description__ = "NVIDIA's package for Data-services for use with TAO Toolkit."
__keywords__ = "nvidia, tao, tf2"
__contact_names__ = "Yu Wang"
__contact_emails__ = "[email protected]"
__license__ = "Apache 2.0"
| tao_dataset_suite-main | release/python/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Packaging modules for Data-Services.""" | tao_dataset_suite-main | release/python/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing utility functions required for packaging Data-Services."""
| tao_dataset_suite-main | release/python/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for packaging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import setuptools
# Rename all .py files to .py_tmp temporarily.
ignore_list = ['__init__.py', '__version__.py']
LOCAL_DIR = os.path.dirname(os.path.abspath(__file__))
def up_directory(dir_path, n=1):
"""Go up n directories from dir_path."""
dir_up = dir_path
for _ in range(n):
dir_up = os.path.split(dir_up)[0]
return dir_up
TOP_LEVEL_DIR = up_directory(LOCAL_DIR, 3)
def remove_prefix(dir_path):
"""Remove a certain prefix from path."""
max_path = 8
prefix = dir_path
while max_path > 0:
prefix = os.path.split(prefix)[0]
if prefix.endswith('ai_infra'):
return dir_path[len(prefix) + 1:]
max_path -= 1
return dir_path
def get_subdirs(path):
"""Get all subdirs of given path."""
dirs = os.walk(path)
return [remove_prefix(x[0]) for x in dirs]
def rename_py_files(path, ext, new_ext, ignore_files):
"""Rename all .ext files in a path to .new_ext except __init__ files."""
files = glob.glob(path + '/*' + ext)
for ignore_file in ignore_files:
files = [f for f in files if ignore_file not in f]
for filename in files:
os.rename(filename, filename.replace(ext, new_ext))
def get_version_details():
"""Simple function to get packages for setup.py."""
# Define env paths.
LAUNCHER_SDK_PATH = os.path.join(TOP_LEVEL_DIR, "release/python")
# Get current __version__.
version_locals = {}
with open(os.path.join(LAUNCHER_SDK_PATH, 'version.py')) as version_file:
exec(version_file.read(), {}, version_locals)
return version_locals
def cleanup():
"""Cleanup directories after the build process."""
req_subdirs = get_subdirs(TOP_LEVEL_DIR)
# Cleanup. Rename all .py_tmp files back to .py and delete pyc files
for dir_path in req_subdirs:
dir_path = os.path.join(TOP_LEVEL_DIR, dir_path)
# TODO: @vpraveen Think about removing python files before the final
# release.
rename_py_files(dir_path, '.py_tmp', '.py', ignore_list)
pyc_list = glob.glob(dir_path + '/*.pyc')
for pyc_file in pyc_list:
os.remove(pyc_file)
def find_packages(package_name):
"""List of packages.
Args:
package_name (str): Name of the package.
Returns:
packages (list): List of packages.
"""
packages = setuptools.find_packages(package_name)
packages = [f"{package_name}.{f}" for f in packages]
packages.append(package_name)
return packages
| tao_dataset_suite-main | release/python/utils/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package module for the Data services."""
| tao_dataset_suite-main | nvidia_tao_ds/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Analytics module."""
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data analytics config."""
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from dataclasses import dataclass
from omegaconf import MISSING
from typing import Optional, List
import multiprocessing
@dataclass
class DataConfig:
"""Dataset configuration template."""
input_format: str = MISSING
output_dir: str = MISSING
image_dir: str = MISSING
ann_path: str = MISSING
@dataclass
class GraphConfig:
"""Graph configuration template."""
height: int = 15
width: int = 15
show_all: bool = False
generate_summary_and_graph: bool = True
@dataclass
class WandbConfig:
"""Wandb configuration template."""
project: Optional[str] = None
entity: Optional[str] = None
save_code: bool = False
name: Optional[str] = None
notes: Optional[str] = None
tags: Optional[List] = None
visualize: bool = False
@dataclass
class ImageConfig:
"""Image configuration template."""
generate_image_with_bounding_box: bool = False
sample_size: int = 100
@dataclass
class ExperimentConfig:
"""Experiment configuration template."""
workers: int = multiprocessing.cpu_count()
data: DataConfig = DataConfig()
image: ImageConfig = ImageConfig()
apply_correction: bool = False
graph: GraphConfig = GraphConfig()
wandb: WandbConfig = WandbConfig()
results_dir: Optional[str] = None
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to handle coco operations."""
import pandas
from pycocotools.coco import COCO
import os
import json
import logging
logger = logging.getLogger(__name__)
def correct_data(coco_obj, output_dir):
"""
Correct the invalid coco annotations.
Correction criteria :
set bounding box values = 0 if their values are less than 0.
set x_max=img_width if x_max>img_width.
set y_max=img_height if y_max>img_height.
swap inverted bouding box coordinates.
Args:
coco_obj (DataFormat): object of coco data format.
output_dir (str): output directory.
Return:
No explicit returns.
"""
coco = COCO(coco_obj.ann_file)
for annots in coco.anns.values():
bbox = annots['bbox']
bbox = [bbox[0], bbox[1], bbox[2] + bbox[0], bbox[3] + bbox[1]]
image_id = annots['image_id']
image_data = coco.loadImgs([image_id])[0]
height = image_data["height"]
width = image_data['width']
xmin, ymin, xmax, ymax = bbox[0], bbox[1], bbox[2], bbox[3]
xmin = max(xmin, 0)
xmax = max(xmax, 0)
ymin = max(ymin, 0)
ymax = max(ymax, 0)
if xmax < xmin:
xmin, xmax = xmax, xmin
if ymax < ymin:
ymin, ymax = ymax, ymin
xmax = min(xmax, width)
ymax = min(ymax, height)
annots['bbox'] = [xmin, ymin, xmax - xmin, ymax - ymin]
final_dict = {"annotations": list(coco.anns.values()),
"images": list(coco.imgs.values()),
"categories": list(coco.cats.values())}
# save the corrected coco annotation file.
basename = os.path.basename(coco_obj.ann_file)
save_path = os.path.join(output_dir, f"{basename}")
with open(save_path, "w", encoding='utf-8') as f:
json.dump(final_dict, f)
logger.info(f"Corrected coco file is available at {save_path}")
def get_image_data(coco_obj):
""" Get image width and height.
Args:
coco_obj (DataFormat): object of coco data format.
Returns:
image_data(dict): Dictionary of image name.
mapped to image width and height.
"""
coco = COCO(coco_obj.ann_file)
image_dir = coco_obj.image_dir
image_data = {}
for img in coco.imgs.values():
img_fname = img['file_name']
width = img['width']
height = img['height']
image_data[img_fname] = [width, height, os.path.join(image_dir, img_fname)]
return image_data
def create_image_dataframe(image_data):
""" Create image data frame.
Args:
image_data(Dict): image data dictionary.
Returns:
No explicit returns.
"""
image_df = pandas.DataFrame.from_dict(image_data, orient='index',
columns=['img_width', 'img_height', 'path'])
image_df['size'] = image_df['img_width'] * image_df['img_height']
return image_df
def is_valid(bbox, width, height):
""" Check if bbox coordinates are valid.
Args:
bbox(list): bbox coordinates.
width(float): image width.
height(float): image height.
Returns:
Bool: True if coordinates are valid else False.
reason: list of reason for invalidaity.
"""
reason = []
xmin, ymin, xmax, ymax = bbox[0], bbox[1], bbox[2], bbox[3]
if xmin < 0 or ymin < 0 or xmax < 0 or ymax < 0:
reason.append("INVALID_OUT_OF_BOX_COORD")
if (xmax - xmin) == 0 or (ymax - ymin) == 0:
reason.append("INVALID_ZERO_COORD")
if xmax < xmin or ymax < ymin:
reason.append("INVALID_INVERTED_COORD")
if (xmax > width or ymax > height):
if "INVALID_OUT_OF_BOX_COORD" not in reason:
reason.append("INVALID_OUT_OF_BOX_COORD")
if len(reason) > 0:
return False, reason
return True, None
def create_dataframe(coco_obj):
"""Create DataFrame from coco annotation file.
Args:
coco_obj (DataFormat): object of coco data format.
Returns:
valid_df (Pandas DataFrame): output valid dataframe of kitti data.
invalid_df (Pandas DataFrame): output invalid dataframe of kitti data.
"""
coco = COCO(coco_obj.ann_file)
valid_data_list = []
invalid_data_list = []
category_ids = coco.getCatIds()
categories = coco.loadCats(category_ids)
image_dir = ""
if coco_obj.image_dir:
image_dir = coco_obj.image_dir
category_id_dict = {}
for category in categories:
category_id_dict[category['id']] = category['name']
for annots in coco.anns.values():
bbox = annots['bbox']
bbox = [bbox[0], bbox[1], bbox[2] + bbox[0], bbox[3] + bbox[1]]
image_id = annots['image_id']
category_id = annots['category_id']
category_name = category_id_dict[category_id]
image_data = coco.loadImgs([image_id])[0]
image_name = image_data['file_name']
height = image_data["height"]
width = image_data['width']
bbox_area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
valid, reason = is_valid(bbox, width, height)
image_path = os.path.join(image_dir, image_name)
if valid:
valid_data_list.append([category_name, bbox[0], bbox[1], bbox[2], bbox[3], image_name,
width, height, bbox_area, image_path])
else:
out_of_box, zero_area, inverted_coord = 'False', 'False', 'False'
if "INVALID_OUT_OF_BOX_COORD" in reason:
out_of_box = 'True'
if "INVALID_ZERO_COORD" in reason:
zero_area = 'True'
if "INVALID_INVERTED_COORD" in reason:
inverted_coord = 'True'
invalid_data_list.append([category_name, bbox[0], bbox[1], bbox[2], bbox[3], image_name,
width, height, out_of_box, zero_area, inverted_coord, bbox_area, image_path])
valid_df = pandas.DataFrame(valid_data_list, columns=['type', 'bbox_xmin',
'bbox_ymin', 'bbox_xmax',
'bbox_ymax', 'image_name',
'img_width', 'img_height',
'bbox_area', 'img_path'])
invalid_df = pandas.DataFrame(invalid_data_list, columns=['type', 'bbox_xmin',
'bbox_ymin', 'bbox_xmax',
'bbox_ymax', 'image_name',
'img_width', 'img_height',
'out_of_box_coordinates',
'zero_area_bounding_box',
'inverted_coordinates',
'bbox_area', 'img_path'])
return valid_df, invalid_df
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/utils/coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines for connecting with Weights and Biases client."""
import logging
import os
import random
import wandb
from nvidia_tao_ds.core.mlops.wandb import (
check_wandb_logged_in,
initialize_wandb,
is_wandb_initialized
)
logger = logging.getLogger(__name__)
def login_and_initialize_wandb(wandb_config, output_dir):
""" Login and initialize wandb.
Args:
wandb_config (DictConfig): wandb config.
output_dir (str): output directory.
Return:
No explicit return
"""
logged_in = check_wandb_logged_in()
if not is_wandb_initialized():
initialize_wandb(output_dir, project=wandb_config.project if wandb_config.project else None,
entity=wandb_config.entity if wandb_config.entity else None,
save_code=wandb_config.save_code if wandb_config.save_code else None,
name=wandb_config.name if wandb_config.name else None,
notes=wandb_config.notes if wandb_config.notes else None,
tags=wandb_config.tags if wandb_config.tags else None,
wandb_logged_in=logged_in)
def create_barplot(data, title, name):
""" Create barplot in wandb.
Args:
data (Pandas dataframe): data to create barplot.
title (str): barplot title.
name (str): wandb plot log name.
Return:
No explicit return
"""
table = wandb.Table(data=data)
barplot = wandb.plot.bar(table, data.columns[0], data.columns[1], title=title)
wandb.log({name: barplot})
def create_table(data, name):
""" Create table in wandb.
Args:
data (Pandas dataframe): data to create table.
name (str): wandb table log name.
Return:
No explicit return
"""
table = wandb.Table(data=data)
wandb.log({name: table})
def generate_images_with_bounding_boxes(df, wandb_config, output_dir, image_sample_size):
"""
Generate images with Bounding boxes in wandb.
Args:
df (Pandas dataframe): valid dataframe used to draw images with bboxes.
wandb_config (dict): wandb config.
output_dir (str): output directory.
Return:
No explicit return
"""
table = wandb.Table(columns=['Name', 'Images'])
classes = df['type'].unique()
class_id_to_label = {}
class_label_to_id = {}
i = 0
for classname in classes:
class_id_to_label[i] = classname
class_label_to_id[classname] = i
i += 1
images = df['img_path'].unique()
if len(images) > image_sample_size:
images = random.sample(set(images), image_sample_size)
for img in images:
temp_df = df.loc[df['img_path'].str.contains(img), :]
box_data = []
for _, box in temp_df.iterrows():
xmin = box["bbox_xmin"]
ymin = box["bbox_ymin"]
xmax = box["bbox_xmax"]
ymax = box["bbox_ymax"]
class_id = class_label_to_id[box["type"]]
box_data.append({"position": {
"minX": xmin,
"minY": ymin,
"maxX": xmax,
"maxY": ymax
},
"class_id": class_id,
"domain": "pixel"
})
box_img = wandb.Image(img, boxes={
"ground_truth": {"box_data": box_data,
"class_labels": class_id_to_label
}})
table.add_data(os.path.basename(img), box_img)
logger.info("It might take some time to log images in wandb.Please wait...")
wandb.log({"image_table": table})
# run.finish()
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/utils/wandb.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to handle kitti operations."""
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import as_completed, wait
import glob
import os
import multiprocessing
import numpy as np
import pandas as pd
from PIL import Image
from nvidia_tao_ds.data_analytics.utils.constant import COMMON_FILE_NAMES
import logging
logger = logging.getLogger(__name__)
IMAGE_EXTENSIONS = [".png", ".jpg", ".jpeg"]
def list_files(kitti_obj):
""" List image and label files.
Args:
kitti_obj(DataFormat): object of kitti data format.
Returns:
No explicit returns.
"""
image_dir = kitti_obj.image_dir
label_dir = kitti_obj.label_dir
images = []
# List image files.
for ext in IMAGE_EXTENSIONS:
images.extend(
glob.glob(
os.path.join(image_dir, f'**/*{ext}'),
recursive=True
)
)
# List label files.
labels = glob.glob(os.path.join(label_dir, '**/*.txt'), recursive=True)
images = sorted(images)
labels = sorted(labels)
if len(images) == 0:
kitti_obj.image_paths = None
kitti_obj.label_paths = labels
return
image_names = [x[x.rfind('/'):-4] for x in images]
label_names = [x[x.rfind('/'):-4] for x in labels]
out_img = []
out_lbl = []
i = 0
j = 0
while i < len(image_names) and j < len(label_names):
if image_names[i] < label_names[j]:
i += 1
elif image_names[i] > label_names[j]:
j += 1
else:
out_img.append(images[i])
out_lbl.append(labels[j])
i += 1
j += 1
kitti_obj.image_paths = out_img
kitti_obj.label_paths = out_lbl
def create_image_dataframe(image_data):
""" Create image data frame.
Args:
image_data(Dict): image data dictionary.
Returns:
No explicit returns.
"""
image_df = pd.DataFrame.from_dict(image_data, orient='index',
columns=['img_width', 'img_height', 'path'])
image_df['size'] = image_df['img_width'] * image_df['img_height']
return image_df
def create_dataframe(valid_kitti_file_path, invalid_kitti_file_path, image_data):
"""Create DataFrame from kitti files.
Args:
paths (str): Unix path to the kitti files.
image_data(dict): Dictionary of image data corresponding
to kitti data.(None if no images are given).
Returns:
df (Pandas DataFrame): output dataframe of kitti data.
"""
dtype = {'type': str, 'truncated': np.dtype('float'), 'occluded': np.dtype('int32'),
'alpha': np.dtype('float'), 'bbox_xmin': np.dtype('float'),
'bbox_ymin': np.dtype('float'), 'bbox_xmax': np.dtype('float'),
'bbox_ymax': np.dtype('float'), 'dim_height': np.dtype('float'),
'dim_width': np.dtype('float'), 'dim_length': np.dtype('float'),
'loc_x': np.dtype('float'), 'loc_y': np.dtype('float'),
'loc_z': np.dtype('float'), 'rotation_y': np.dtype('float'),
'path': str}
name_list = ['type', 'truncated', 'occluded', 'alpha', 'bbox_xmin', 'bbox_ymin',
'bbox_xmax', 'bbox_ymax', 'dim_height', 'dim_width', 'dim_length',
'loc_x', 'loc_y', 'loc_z', 'rotation_y', 'path']
if image_data is not None:
name_list += ['img_width', 'img_height', 'img_path']
dtype['img_width'] = np.dtype('float')
dtype['img_height'] = np.dtype('float')
dtype['img_path'] = str
df_list = [pd.read_csv(filepath, sep=' ', names=name_list, dtype=dtype, index_col=False) for filepath in valid_kitti_file_path]
valid_df = pd.concat(df_list)
valid_df['bbox_area'] = (valid_df['bbox_xmax'] - valid_df['bbox_xmin']) * (valid_df['bbox_ymax'] - valid_df['bbox_ymin'])
dtype['out_of_box_coordinates'] = np.dtype('bool')
dtype['zero_area_bounding_box'] = np.dtype('bool')
dtype['inverted_coordinates'] = np.dtype('bool')
name_list += ['out_of_box_coordinates', 'zero_area_bounding_box', 'inverted_coordinates']
invalid_df = pd.read_csv(invalid_kitti_file_path, sep=' ', names=name_list, dtype=dtype, index_col=False)
invalid_df['bbox_area'] = (invalid_df['bbox_xmax'] - invalid_df['bbox_xmin']) * (invalid_df['bbox_ymax'] - invalid_df['bbox_ymin'])
return valid_df, invalid_df
def validate_kitti_row(kitti_row, img_height=None, img_width=None):
""" Validate kitti row.
Args:
kitti_row (str): kitti row.
img_height (int): corresponding image height.
img_width (int): corresponding image width.
Returns:
Bool,str: boolean status if valid or not , description
"""
columns = kitti_row.strip().split(' ')
# data is not in kitti format if kitti columns are 15 or 16
# data is invalid in case of out of bound bbox and inverted coordinates.
if len(columns) not in (15, 16):
return False, "NOT_KITTI"
try:
truncated, occluded, = float(columns[1]), int(columns[2])
x_min, y_min = float(columns[4]), float(columns[5])
x_max, y_max = float(columns[6]), float(columns[7])
except Exception:
return False, "NOT_KITTI"
return_status = True
invalid_reason_list = []
if x_min < 0 or x_max < 0 or y_min < 0 or y_max < 0:
return_status = False
invalid_reason_list.append("INVALID_OUT_OF_BOX_COORD")
if (x_max - x_min) == 0 or (y_max - y_min) == 0:
return_status = False
invalid_reason_list.append("INVALID_ZERO_COORD")
if x_max < x_min or y_max < y_min:
return_status = False
invalid_reason_list.append("INVALID_INVERTED_COORD")
if img_width and img_height and (x_max > img_width or y_max > img_height):
return_status = False
if "INVALID_OUT_OF_BOX_COORD" not in invalid_reason_list:
invalid_reason_list.append("INVALID_OUT_OF_BOX_COORD")
if not return_status:
return return_status, invalid_reason_list
if truncated < 0 or truncated > 1 or occluded not in (0, 1, 2, 3):
return False, "GOOD_TO_CORRECT"
return True, "VALID_DATA"
def validate_and_merge(filepaths, lock, i, not_kitti_file, invalid_kitti_data,
good_to_correct_kitti_data, image_data=None):
"""
Validate and merge kitti files in one file.
Args:
filepaths (list): List of kitti filepaths.
lock (object): Multiprocessing lock object.
i (int): Suffix for merged file to be created (thread_multi_{i}).
not_kitti_file (str): Txt file Path where details will be logged for
files that are not kitti or images.
invalid_kitti_data (str): File Path where invalid kitti rows will be
logged.
good_to_correct_kitti_data(str): File Path where good to correct kitti
rows will be logged.
image_data(dict): Dictionary of image data corresponding to kitti data.
(None if no images are given).
Returns:
mergedfilepath (str): Path of text file in which kitti files are
merged.
"""
filename = f"thread_multi_{str(i)}.txt"
merged_file_path = os.path.join(COMMON_FILE_NAMES['INTERMEDIATE_KITTI_FOLDER'],
filename)
not_kitti_files_list = set()
invalid_kitti_row = []
good_to_correct_kitti_row = []
with open(merged_file_path, 'w+', encoding='utf-8') as handle1:
for filepath in filepaths:
basename = os.path.basename(filepath).split(".")[0]
try:
with open(filepath, 'r', encoding='utf-8') as handle2:
content = ''
line_number = 0
for line in handle2:
if not line.strip():
continue
columns = line.strip().split(' ')
if image_data:
img_width, img_height, img_path = (image_data[basename][0],
image_data[basename][1], image_data[basename][2])
columns = columns[0:15] + [filepath, str(img_width),
str(img_height), img_path]
else:
columns = columns[0:15] + [filepath]
img_height = None
img_width = None
valid, desc = validate_kitti_row(line, img_height,
img_width)
if not valid:
if not isinstance(desc, list):
if desc == "NOT_KITTI":
not_kitti_files_list.add(filepath + " " +
str(line_number))
line_number += 1
elif desc == "GOOD_TO_CORRECT":
good_to_correct_kitti_row.append(" ".join(columns).lower())
else:
out_of_box, zero_area, inverted_coord = 'False', 'False', 'False'
if "INVALID_OUT_OF_BOX_COORD" in desc:
out_of_box = 'True'
if "INVALID_ZERO_COORD" in desc:
zero_area = 'True'
if "INVALID_INVERTED_COORD" in desc:
inverted_coord = 'True'
columns = columns + [out_of_box, zero_area, inverted_coord]
invalid_kitti_row.append(" ".join(columns).lower())
continue
content += " ".join(columns).lower() + "\n"
line_number += 1
handle1.write(content)
except Exception:
not_kitti_files_list.add(filepath)
lock.acquire()
with open(not_kitti_file, 'a+', encoding='utf-8') as handle:
handle.write("\n")
handle.write('\n'.join(not_kitti_files_list))
with open(invalid_kitti_data, 'a+', encoding='utf-8') as handle:
handle.write("\n")
handle.write('\n'.join(invalid_kitti_row))
with open(good_to_correct_kitti_data, 'a+', encoding='utf-8') as handle:
handle.write("\n")
handle.write('\n'.join(good_to_correct_kitti_row))
lock.release()
return merged_file_path
def validate_and_merge_kitti_files(files, output_dir, num_workers, image_data=None):
"""Validate and Merge Kitti Files using multiprocessing.
Args:
files (str): Unix path to the kitti files.
output_dir (str): Path to output directory.
num_workers (int): Number of workers.
image_data(dict): Dictionary of image data corresponding
to kitti data.(None if no images are given).
Returns:
mergedfilenames(list): List of all merged file paths.
"""
merged_file_names = []
workers = int(num_workers)
not_kitti_file = os.path.join(output_dir, 'not_kitti.txt')
if not os.path.exists(COMMON_FILE_NAMES['INTERMEDIATE_KITTI_FOLDER']):
os.makedirs(COMMON_FILE_NAMES['INTERMEDIATE_KITTI_FOLDER'])
invalid_kitti_data_file = os.path.join(COMMON_FILE_NAMES['INTERMEDIATE_KITTI_FOLDER'],
COMMON_FILE_NAMES['INVALID_KITTI'])
good_to_correct_kitti_data_file = os.path.join(COMMON_FILE_NAMES['INTERMEDIATE_KITTI_FOLDER'],
COMMON_FILE_NAMES['GOOD_TO_CORRECT_KITTI'])
with open(not_kitti_file, 'w+', encoding='utf-8'):
pass
with open(invalid_kitti_data_file, 'w+', encoding='utf-8'):
pass
with open(good_to_correct_kitti_data_file, 'w+', encoding='utf-8'):
pass
# create the process pool
with ProcessPoolExecutor(workers) as executor:
futures = []
m = multiprocessing.Manager()
lock = m.Lock()
if len(files) < workers:
chunksize = 1
else:
chunksize = round(len(files) / workers)
# split the merge operations into chunks
for i in range(0, len(files), chunksize):
# select a chunk of filenames
filepaths = files[i:(i + chunksize)]
# submit the task
future = executor.submit(validate_and_merge, filepaths, lock,
i, not_kitti_file, invalid_kitti_data_file,
good_to_correct_kitti_data_file, image_data)
futures.append(future)
# process all results
for future in as_completed(futures):
# open the file and load the data
filename = future.result()
merged_file_names.append(filename)
merged_file_names.append(good_to_correct_kitti_data_file)
return merged_file_names
def get_image_data(kitti_obj):
""" Get image width and height .
Args:
kitti_obj (DataFormat): object of kitti data format.
Returns:
image_data(dict): Dictionary of image name.
mapped to image width and height .
"""
filepaths = kitti_obj.image_paths
image_data = {}
for filepath in filepaths:
basename = os.path.basename(filepath).rsplit(".", 1)[0]
img = Image.open(filepath)
width, height = img.size
image_data[basename] = [width, height, filepath]
return image_data
def correct_data(invalid_df):
"""
Correct the invalid kitti dataframe.
correction criteria :
set bounding box values = 0 if their values are less than 0.
set x_max=img_width if x_max>img_width.
set y_max=img_height if y_max>img_height.
swap inverted bouding box coordinates.
Args:
invalid_df(Pandas Dataframe): invalid kitti dataframe.
Return:
valid kitti dataframe.
"""
if 'img_width' in invalid_df.columns and 'img_height' in invalid_df.columns:
invalid_df.loc[invalid_df['bbox_xmax'] > invalid_df['img_width'], 'bbox_xmax'] = invalid_df['img_width']
invalid_df.loc[invalid_df['bbox_ymax'] > invalid_df['img_height'], 'bbox_ymax'] = invalid_df['img_height']
invalid_df.loc[invalid_df['bbox_xmax'] < 0, 'bbox_xmax'] = 0
invalid_df.loc[invalid_df['bbox_xmin'] < 0, 'bbox_xmin'] = 0
invalid_df.loc[invalid_df['bbox_ymin'] < 0, 'bbox_ymin'] = 0
invalid_df.loc[invalid_df['bbox_ymax'] < 0, 'bbox_ymax'] = 0
temp_rows = invalid_df['bbox_xmax'] < invalid_df['bbox_xmin']
invalid_df.loc[temp_rows, ['bbox_xmax', 'bbox_xmin']] = (invalid_df.loc[temp_rows, ['bbox_xmin', 'bbox_xmax']].values)
temp_rows = invalid_df['bbox_ymax'] < invalid_df['bbox_ymin']
invalid_df.loc[temp_rows, ['bbox_ymax', 'bbox_ymin']] = (invalid_df.loc[temp_rows, ['bbox_ymin', 'bbox_ymax']].values)
return invalid_df
def write_to_csv(paths, kitti_folder, df):
"""
write dataframe to csv's.
Args:
paths(List): path to csv files.
kitti_folder(str): path to folder to save kitti files.
df(Pandas dataframe): dataframe.
Return:
No explicit return.
"""
for filepath in paths:
temp_df = df.loc[df['path'] == filepath, :]
temp_df = temp_df.drop(['path', 'bbox_area', 'out_of_box_coordinates',
'zero_area_bounding_box', 'inverted_coordinates'], axis=1)
if 'img_height' in temp_df.columns:
temp_df = temp_df.drop(['img_height', 'img_width', 'img_path'], axis=1)
basename = os.path.basename(filepath)
kitti_path = os.path.join(kitti_folder, basename)
temp_df.to_csv(kitti_path, header=None, index=None, sep=' ', mode='w+')
def create_correct_kitti_files(df, corrected_df, output_dir, workers):
"""
Create corrected kitti files back from dataframe.
Args:
df(Pandas dataframe): valid dataframe used to create csv.
corrected_df(Pandas dataframe): valid dataframe used to get filenames to rewrite.
output_dir(str): output directory.
workers(int): number of workers for multiprocessing.
Return:
No explicit return.
"""
kitti_folder = os.path.join(output_dir, "corrected_kitti_files")
if not os.path.exists(kitti_folder):
os.makedirs(kitti_folder)
kitti_files = corrected_df['path'].unique()
logging.info(f"Total kitti files to be corrected - {len(kitti_files)}")
with ProcessPoolExecutor(workers) as executor:
futures = []
if len(kitti_files) < workers:
chunksize = 1
else:
chunksize = round(len(kitti_files) / workers)
# split the operations into chunks
for i in range(0, len(kitti_files), chunksize):
# select a chunk of filenames
filepaths = kitti_files[i:(i + chunksize)]
temp_df = df.loc[df['path'].isin(filepaths), :]
# submit the task
future = executor.submit(write_to_csv, filepaths, kitti_folder, temp_df)
futures.append(future)
wait(futures)
logger.info(f"Corrected kitti files are available at {kitti_folder}")
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/utils/kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for TAO analyze and validate."""
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to handle different data formats."""
class DataFormat:
"""Class data format"""
pass
class CocoData(DataFormat):
"""COCO data format"""
def __init__(self, ann_file, image_dir=None) -> None:
"""Initialize a COCO object.
Args:
ann_file(str): path to annotation file.
image_dir(str): path to image dir.
"""
self.ann_file = ann_file
self.image_dir = image_dir
class KittiData(DataFormat):
"""KITTI data format"""
def __init__(self, image_dir=None, label_dir=None) -> None:
"""Initialize a KITTI object.
Args:
image_dir(str): path to image dir.
label_dir(str): path to label dir.
"""
self.image_dir = image_dir
self.label_dir = label_dir
self.image_paths = None
self.label_paths = None
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/utils/data_format.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to handle constants."""
COMMON_FILE_NAMES = {'INTERMEDIATE_KITTI_FOLDER': "intermediate_kitti_files",
'GOOD_TO_CORRECT_KITTI': "good_to_correct_kitti.txt",
'INVALID_KITTI': "invalid_kitti_data.txt"}
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/utils/constant.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to handle image operations."""
import os
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import as_completed
import logging
import re
import random
from PIL import Image, ImageDraw
from tqdm import tqdm
def write_to_image(filepaths, output_image_folder, df, object_color_dict, dataformat=None):
"""
Draw Bounding boxes in images.
Args:
df(Pandas dataframe): valid dataframe used to draw bboxes.
filepaths(list): list of image files to draw.
output_image_folder(str): output directory.
object_color_dict(dict): dictiory of object names to their unique color.
dataformat(str): input data format.
Return:
No explicit return.
"""
for filepath in filepaths:
basename = os.path.basename(filepath).split(".")[0]
# read image
im = Image.open(filepath)
if dataformat == "KITTI":
pattern = '.*' + basename + '.txt'
pattern = re.compile(pattern)
temp_df = df.loc[df['path'].str.contains(pattern), :]
else:
pattern = os.path.basename(filepath)
pattern = re.compile(pattern)
temp_df = df.loc[df['image_name'].str.contains(pattern), :]
for _, row in temp_df.iterrows():
bbox = row['bbox_xmin'], row['bbox_ymin'], row['bbox_xmax'], row['bbox_ymax']
draw = ImageDraw.Draw(im)
obj_type = row['type']
color = object_color_dict[obj_type]
draw.rectangle((bbox[0], bbox[1], bbox[2], bbox[3]), fill=None, outline=color)
draw.text((bbox[0], bbox[1]), obj_type)
output_filepath = os.path.join(output_image_folder, os.path.basename(filepath))
im.save(output_filepath)
def assign_object_colors(objects_names):
"""
Assign color to each object.
Args:
object_names(List): list of object names.
Return:
color_dict(dict): dictiory of object names to their unique color.
"""
color_list = []
objects_names = list(objects_names)
for _ in range(0, len(objects_names)):
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
while ((r, g, b) in color_list):
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
color_list.append((r, g, b))
color_dict = {}
for i in range(0, len(objects_names)):
color_dict[objects_names[i]] = color_list[i]
return color_dict
def generate_images_with_bounding_boxes(df, image_data, output_dir, image_sample_size, workers, dataformat=None):
"""
Draw Bounding boxes in images.
Args:
df(Pandas dataframe): valid dataframe used to draw bboxes.
image_data(Dict): Dictionary to hold image data.
output_dir(str): output directory.
workers(int): number of workers for multiprocessing.
dataformat(str): input data format.
Return:
No explicit return
"""
output_image_folder = os.path.join(output_dir, "image_with_bounding_boxes")
if not os.path.exists(output_image_folder):
os.makedirs(output_image_folder)
all_image_paths = [i_data[2] for i_data in image_data.values()]
if len(all_image_paths) > image_sample_size:
all_image_paths = random.sample(set(all_image_paths), image_sample_size)
logging.info(f"Total image files- {len(all_image_paths)}")
object_color_dict = assign_object_colors(df['type'].unique())
if dataformat == "KITTI":
df = df.drop(["truncated", "occluded", "alpha", "rotation_y", "loc_x",
"loc_y", "loc_z", "dim_height", "dim_width", "dim_length"], axis=1)
tq = tqdm(total=len(all_image_paths), position=0, leave=True)
with ProcessPoolExecutor(workers) as executor:
futures = []
if len(all_image_paths) < workers:
chunksize = 1
else:
chunksize = round(len(all_image_paths) / workers)
# split the operations into chunks
for i in range(0, len(all_image_paths), chunksize):
# select a chunk of filenames
filepaths = all_image_paths[i:(i + chunksize)]
if dataformat == "COCO":
patterns = [os.path.basename(filepath) for filepath in filepaths]
patterns = re.compile('|'.join(patterns))
temp_df = df.loc[df['image_name'].str.contains(patterns), :]
else:
patterns = [os.path.basename(filepath).split(".")[0] + ".txt" for filepath in filepaths]
patterns = re.compile('|'.join(patterns))
temp_df = df.loc[df['path'].str.contains(patterns), :]
# submit the task
future = executor.submit(write_to_image, filepaths, output_image_folder, temp_df, object_color_dict, dataformat)
futures.append(future)
for future in as_completed(futures):
tq.update(chunksize)
tq.close()
logging.info(f"Images with bounding boxes are available at {output_image_folder}")
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/utils/image.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Analytics scripts."""
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script to run TAO validate."""
import logging
import os
import time
import sys
import pandas as pd
from nvidia_tao_ds.data_analytics.utils import kitti, coco, data_format
from nvidia_tao_ds.core.decorators import monitor_status
from nvidia_tao_ds.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_ds.data_analytics.config.default_config import ExperimentConfig
from nvidia_tao_ds.data_analytics.utils.constant import COMMON_FILE_NAMES
logger = logging.getLogger(__name__)
def class_balance_info(df):
"""Print class balance summary.
Args:
df (Pandas Dataframe): Dataframe for valid kitti rows.
Return:
No explicit return.
"""
count_df = df['type'].value_counts(ascending=False, normalize=True).rename(
'count_num').reset_index()
count_df['count_num'] = count_df['count_num'] * 100
count_df = count_df.rename(columns={"index": "Object_tags", "count_num": "Percentage"})
logger.info("Review below table to find if data for object tags is imbalanced")
logger.info(count_df.to_string())
def validate_summary(valid_df, invalid_df, data_format):
"""Show validation summary.
Args:
valid_df (Pandas Dataframe): Valid kitti dataframe.
invalid_df (Pandas Dataframe): invalid kitti dataframe.
data_format (str): Input data format
Return:
No explicit returns.
"""
total_rows = len(valid_df) + len(invalid_df)
num_of_invalid_rows = len(invalid_df)
invalid_percentage = (num_of_invalid_rows / total_rows) * 100
invalid_percentage = round(invalid_percentage, 2)
logger.info(f"Number of total annotations : {total_rows}\
\nNumber of invalid annotations : {num_of_invalid_rows}")
if invalid_percentage > 5:
logger.warning(f"WARNING: Number of invalid annotations are {invalid_percentage}"
" of total annotations , It is advisable"
" to correct the data before training.")
else:
logger.info(f"Number of invalid annotations are {invalid_percentage}"
" of total annotations,")
if 'img_width' in invalid_df.columns:
oob_condition = ((invalid_df['bbox_xmin'] < 0) |
(invalid_df['bbox_ymin'] < 0) |
(invalid_df['bbox_ymax'] < 0) |
(invalid_df['bbox_xmax'] < 0) |
(invalid_df['bbox_xmax'] > invalid_df['img_width']) |
(invalid_df['bbox_ymax'] > invalid_df['img_height']))
else:
oob_condition = ((invalid_df['bbox_xmin'] < 0) |
(invalid_df['bbox_ymin'] < 0) |
(invalid_df['bbox_ymax'] < 0) |
(invalid_df['bbox_xmax'] < 0))
out_of_bound_count = len(invalid_df[oob_condition])
inverted_cord_count = len(invalid_df[(invalid_df['bbox_ymax'] > 0) &
(invalid_df['bbox_xmax'] > 0) &
(invalid_df['bbox_ymin'] > 0) &
(invalid_df['bbox_xmin'] > 0) &
((invalid_df['bbox_xmax'] < invalid_df['bbox_xmin']) |
(invalid_df['bbox_ymax'] < invalid_df['bbox_ymin']))])
out_of_bound_percentage = round((out_of_bound_count / total_rows) * 100, 2)
inverted_cord_percentage = round((inverted_cord_count / total_rows) * 100, 2)
logger.info("Number and Percent of annotations with out of bound "
f"coordinates {out_of_bound_count}, {out_of_bound_percentage}%")
logger.info("Number and Percent of annotations with inverted "
f"coordinates {inverted_cord_count}, {inverted_cord_percentage}%")
class_balance_info(valid_df)
@monitor_status(mode='KITTI validation')
def validate_dataset_kitti(config):
"""TAO KITTI dataset validate.
Args:
config (Hydra config): Config element of the analyze config.
"""
start_time = time.perf_counter()
kitti_obj = data_format.KittiData(config.data.image_dir, config.data.ann_path)
if not os.path.isdir(config.data.ann_path):
logger.info("Please provide path of kitti label directory in config data.ann_path.")
sys.exit(1)
kitti.list_files(kitti_obj)
if kitti_obj.image_paths is None:
logger.info("Image Directory not found.Processing only label files")
image_data = None
else:
# Get image data (image width and height)
image_data = kitti.get_image_data(kitti_obj)
if kitti_obj.label_paths is None:
logger.info("kitti files not Found. Exiting ")
sys.exit(1)
valid_kitti_filepaths = kitti.validate_and_merge_kitti_files(kitti_obj.label_paths,
config.data.output_dir,
config.workers,
image_data)
invalid_filepath = os.path.join(COMMON_FILE_NAMES['INTERMEDIATE_KITTI_FOLDER'],
COMMON_FILE_NAMES['INVALID_KITTI'])
# Dataframe creation for valid and invalid kitti data
valid_df, invalid_df = kitti.create_dataframe(valid_kitti_filepaths,
invalid_filepath,
image_data)
validate_summary(valid_df, invalid_df, config.data.input_format)
if config.apply_correction:
corrected_df = kitti.correct_data(invalid_df)
df = pd.concat([valid_df, corrected_df])
kitti.create_correct_kitti_files(df, corrected_df, config.data.output_dir, config.workers)
logger.debug(f"Total time taken : {time.perf_counter() - start_time}")
@monitor_status(mode='COCO validation')
def validate_dataset_coco(config):
"""TAO COCO dataset validate.
Args:
config (Hydra config): Config element of the analyze config.
"""
start_time = time.perf_counter()
if not os.path.isfile(config.data.ann_path):
logger.info("Please provide path of coco annotation file in config data.ann_path.")
sys.exit(1)
coco_obj = data_format.CocoData(config.data.ann_path, config.data.image_dir)
# Dataframe creation for valid and invalid kitti data
valid_df, invalid_df = coco.create_dataframe(coco_obj)
validate_summary(valid_df, invalid_df, config.data.input_format)
if config.apply_correction:
# correct the coco file and write into output_dir
coco.correct_data(coco_obj, config.data.output_dir)
logger.debug(f"Total time taken : {time.perf_counter() - start_time}")
spec_root = os.path.dirname(os.path.abspath(__file__))
@hydra_runner(
config_path=os.path.join(spec_root, "../experiment_specs"),
config_name="validate", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig):
"""TAO Validate main wrapper function."""
try:
if not os.path.exists(cfg.data.output_dir):
os.makedirs(cfg.data.output_dir)
cfg.results_dir = cfg.results_dir or cfg.data.output_dir
if cfg.data.input_format == "COCO":
validate_dataset_coco(cfg)
elif cfg.data.input_format == "KITTI":
validate_dataset_kitti(cfg)
else:
logger.info(f"Data format {cfg.data.input_format} is not supported.")
sys.exit(1)
except KeyboardInterrupt:
logger.info("Aborting execution.")
sys.exit(1)
except RuntimeError as e:
logger.info(f"Validate run failed with error: {e}")
sys.exit(1)
except Exception as e:
logger.info(f"Validate run failed with error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/scripts/validate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script to run TAO analyze."""
import pandas as pd
import os
import time
import logging
import numpy as np
import sys
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.font_manager import FontProperties
from nvidia_tao_ds.data_analytics.utils import kitti, data_format, coco, image, wandb
from nvidia_tao_ds.data_analytics.utils.constant import COMMON_FILE_NAMES
from nvidia_tao_ds.core.decorators import monitor_status
from nvidia_tao_ds.core.hydra.hydra_runner import hydra_runner
from nvidia_tao_ds.core.mlops.wandb import is_wandb_initialized
from nvidia_tao_ds.data_analytics.config.default_config import ExperimentConfig
logger = logging.getLogger(__name__)
def configure_subgraph(axn, xlabel=None, ylabel=None, xtickrotate=None,
ytickrotate=None, xticklabelformat=None,
yticklabelformat=None, xticks=None, yticks=None):
""" Configure graph properties.
Args:
axn = graph axis object,
xlabel (str): Label for x axis,
ylabel (str): Label for y axis,
xtickrotate (str): Rotation for x axis label,
ytickrotate (str): Rotation for y axis label,
xticklabelformat (str): Format of x axis label,
yticklabelformat (str): Format of y axis label,
xticks (str): set x ticks,
yticks (str): set y ticks
Return:
No explicit Return.
"""
if xlabel:
axn.set_xlabel(xlabel)
if ylabel:
axn.set_ylabel(ylabel)
if xticklabelformat:
axn.ticklabel_format(style=xticklabelformat, axis='x')
if yticklabelformat:
axn.ticklabel_format(style=yticklabelformat, axis='y')
if xticks:
axn.set_xticks(xticks)
if yticks:
axn.set_yticks(yticks)
if xtickrotate:
axn.tick_params(axis='x', labelrotation=xtickrotate[0])
if ytickrotate:
axn.tick_params(axis='y', labelrotation=ytickrotate[0])
def object_count_visualize(valid_df, output_dir, graph_attr, wandb_attr):
""" Create Graphs for Object Count.
Args:
valid_df (Pandas DataFrame): valid kitti dataframe
output_dir (str): Result Directory.
graph_attr (DictConfig): graph attributes(
height - to set graph height
width - to set graph width
show_all - to show all object on graph,
by default maximum 100 object will be shown.)
wandb_attr (DictConfig): wandb attributes
Return:
No explicit Return
"""
# per Object Count
count_df = valid_df['type'].value_counts(ascending=False).rename(
'count_num').reset_index()
figuresizes = (graph_attr.width, graph_attr.height)
show_all = True
if not graph_attr.show_all and len(count_df) > 100:
show_all = False
if not show_all:
graph_data = count_df.head(100)
else:
graph_data = count_df
# Create graph for object count
if wandb_attr.visualize:
graph_data = graph_data.rename(columns={'index': 'Object Name', 'count_num': 'Count'})
wandb.create_barplot(graph_data, "Object Name Vs Count", "object_count_chart1")
else:
pdf = PdfPages(os.path.join(output_dir, 'Object_count.pdf'))
fig = plt.figure(figsize=figuresizes)
if max(count_df['count_num']) - min(count_df['count_num']) > 10:
binrange = range(min(count_df['count_num']),
max(count_df['count_num']),
int((max(count_df['count_num']) - min(count_df['count_num'])) / 10))
else:
binrange = range(min(count_df['count_num']), max(count_df[
'count_num']))
ax = plt.gca()
sns.barplot(y='count_num', x='index', data=graph_data, width=0.2)
txt = "The bar plot below describes the count of each object"\
" available in valid kitti data."
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
configure_subgraph(ax, xlabel="Object Name", ylabel="Count",
xtickrotate=(90, "right"), yticklabelformat="plain",
yticks=binrange)
pdf.savefig(fig)
plt.close()
# Create graph for object count(%)
count_sum = count_df["count_num"].sum()
count_df['percent'] = (count_df["count_num"] / count_sum) * 100
count_df['percent'] = count_df['percent'].round(decimals=2)
graph_data = count_df if show_all else count_df.head(100)
if wandb_attr.visualize:
graph_data = graph_data.drop("count_num", axis=1)
graph_data = graph_data.rename(columns={'index': 'Object Name', 'percent': 'Count(%)'})
wandb.create_barplot(graph_data, "Object Name Vs Count Percentage", "object_count_chart2")
else:
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
binrange = range(0, 100, 10)
sns.barplot(y='percent', x='index', data=graph_data, width=0.2)
txt = "The bar plot below describes the count percentage of each "\
"object available in valid kitti data ."
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
configure_subgraph(ax, xlabel="Object Name", ylabel="Count (%)",
xtickrotate=(90, "right"), yticklabelformat="plain",
yticks=binrange)
pdf.savefig(fig)
plt.close()
# Create stats table for object count
count_df_desc = count_df['count_num'].describe()
count_stat = pd.DataFrame({'Value': count_df_desc})
count_stat = count_stat.reset_index()
count_stat = count_stat.rename(columns={'index': 'Statistic'})
if not wandb_attr.visualize:
fig = plt.figure(figsize=figuresizes)
fig.clf()
plt.axis('off')
txt = "The table below shows object count statistics"
plt.text(0.05, 0.90, txt, transform=fig.transFigure, size=10)
table = plt.table(cellText=count_stat.values, edges='closed',
loc='center', colLoc='right')
for coord, cell in table.get_celld().items():
if (coord[1] == 0):
font_property = FontProperties(weight='bold')
cell.set_text_props(fontproperties=font_property)
pdf.savefig(fig)
plt.close()
else:
wandb.create_table(count_stat, "Count statistics")
# Create summary table for object count per tag
if not wandb_attr.visualize:
fig = plt.figure(figsize=figuresizes)
fig.clf()
plt.axis('off')
txt = 'The table below shows object count per object.'
plt.text(0.05, 0.90, txt, transform=fig.transFigure, size=10)
count_df['percent'] = count_df['percent'].round(decimals=4)
graph_data = count_df if show_all else count_df.head(100)
table = plt.table(cellText=graph_data.values, edges='closed',
loc='center', colLoc='right',
colLabels=['Object Name', 'Count', 'Percentage'])
for coord, cell in table.get_celld().items():
if (coord[0] == 0):
font_property = FontProperties(weight='bold')
cell.set_text_props(fontproperties=font_property)
pdf.savefig(fig)
plt.close()
pdf.close()
def occlusion_visualize(valid_df, output_dir, graph_attr, wandb_attr):
""" Create Graphs for Object Count.
Args:
valid_df (Pandas DataFrame): valid kitti dataframe
output_dir(str): Result Directory.
graph_attr (DictConfig): graph attributes(
height - to set graph height
width - to set graph width
show_all - to show all object on graph,
by default maximum 100 object will be shown.)
wandb_attr (DictConfig): wandb attributes
Return:
No explicit Return
"""
# Object Occlusion
occluded_df = valid_df.groupby(['occluded'])['type'].count().rename('occ_count').reset_index()
occluded_df['count_per'] = (occluded_df['occ_count'] / occluded_df[
'occ_count'].sum()) * 100
occluded_df['count_per'] = occluded_df['count_per'] .round(decimals=3)
# Occlusion per object
per_object_occluded_df = valid_df.groupby(['type', 'occluded'])[
'truncated'].count().reset_index()
figuresizes = (graph_attr.width, graph_attr.height)
show_all = True
if not graph_attr.show_all and len(per_object_occluded_df) > 100:
show_all = False
if not wandb_attr.visualize:
pdf = PdfPages(os.path.join(output_dir, 'Occlusion.pdf'))
# Create Occlusion Barplot
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
binrange = range(0, 100, 10)
gph = sns.barplot(occluded_df, y='count_per', x='occluded', width=0.2)
gph.bar_label(gph.containers[0])
configure_subgraph(ax, xlabel="Occlusion", ylabel="Count (%)",
yticks=binrange)
txt = "The bar plot below describes the count percentage of each "\
"occlusion level in valid kitti data .\n0: Fully visible, "\
"1: Partly occluded, 2: Largely occluded,"\
" 3: Unknown, Any other integer: Unknown occlusion tag"
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
sumvalue = per_object_occluded_df["truncated"].sum()
# Draw a nested barplot for occlusion level per object Tag
per_object_occluded_df["truncated"] = (per_object_occluded_df["truncated"] / sumvalue) * 100
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
graph_data = per_object_occluded_df if show_all else per_object_occluded_df.head(100)
gph = sns.barplot(data=graph_data, x="type", y="truncated",
hue="occluded", errorbar="sd", palette="dark")
txt = "The bar plot below describes the count percentage of occlusion"\
" per object, available in valid kitti data .\n0: Fully "\
"Visible, 1: Partly occluded, 2: Largely occluded, 3: Unknown, "\
"Any other integer: Unknown occlusion tag"
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
configure_subgraph(ax, xlabel="Object Name", ylabel="Count(%)",
xtickrotate=(90, "right"), yticklabelformat="plain",
yticks=binrange)
fig.legend(fontsize=5)
pdf.savefig(fig)
plt.close()
pdf.close()
else:
occluded_df['occluded'] = np.where((occluded_df['occluded'] < 0) | (occluded_df['occluded'] > 3),
"Unknown occlusion value", occluded_df['occluded'])
occluded_df['occluded'] = occluded_df['occluded'].replace(["0", "1", "2", "3"],
['Fully visible',
'Partly occluded',
'Largely occluded',
'Unknown'])
occluded_df = occluded_df.drop("occ_count", axis=1)
occluded_df = occluded_df.rename(columns={'occluded': 'Occlusion', 'count_per': 'Count(%)'})
wandb.create_barplot(occluded_df, "Occlusion Vs Count(%)", "Occlusion_chart1")
def bbox_area_visualize(valid_df, output_dir, graph_attr, wandb_attr):
""" Create Graphs for Object Bounding box area.
Args:
valid_df (Pandas DataFrame): valid kitti data dataframe.
output_dir(str): Result Directory.
graph_attr (DictConfig): graph attributes(
height - to set graph height
width - to set graph width
show_all - to show all object on graph,
by default maximum 100 object will be shown.)
wandb_attr (DictConfig): wandb attributes
Return:
No explicit Return
"""
figuresizes = (graph_attr.width, graph_attr.height)
area_mean = valid_df.groupby('type')['bbox_area']
area_mean = area_mean.describe()['mean'].reset_index()
if not graph_attr.show_all and len(area_mean) > 100:
graph_data = area_mean.head(100)
else:
graph_data = area_mean
area_stats = valid_df['bbox_area'].describe()
area_stat = pd.DataFrame({'Value': area_stats})
area_stat['Value'] = area_stat['Value'].round(decimals=4)
area_stat = area_stat.reset_index()
area_stat = area_stat.rename(columns={'index': 'Area Statistic'})
area_stat_per_type = valid_df.groupby('type')['bbox_area'].describe()
if not graph_attr.show_all and len(area_stat_per_type) > 100:
graph_data_per_type = area_stat_per_type.head(100)
else:
graph_data_per_type = area_stat_per_type
if not wandb_attr.visualize:
pdf = PdfPages(os.path.join(output_dir, 'Area.pdf'))
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
minv = min(area_mean['mean'])
maxv = max(area_mean['mean'])
if maxv - minv > 10:
binrange = range(round(minv), round(maxv), int((maxv - minv) // 10))
else:
binrange = range(round(minv), round(maxv))
sns.scatterplot(graph_data, x='type', y='mean')
configure_subgraph(ax, xlabel="Object Name", ylabel="mean bbox area ",
xtickrotate=(90, "right"), yticklabelformat="plain",
yticks=binrange)
txt = "The scatter plot below describes the mean bounding box size "\
"of different objects."
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
fig = plt.figure(figsize=figuresizes)
fig.clf()
plt.axis('off')
txt = 'The table below shows bounding box area statistics.'
plt.text(0.05, 0.90, txt, transform=fig.transFigure, size=10)
table = plt.table(cellText=area_stat.values, edges='closed',
loc='center', colLoc='right',
colLabels=area_stat.columns)
for coord, cell in table.get_celld().items():
if (coord[0] == 0 or coord[1] == 0):
font_property = FontProperties(weight='bold')
cell.set_text_props(fontproperties=font_property)
pdf.savefig(fig)
plt.close()
fig = plt.figure(figsize=figuresizes)
fig.clf()
plt.axis('off')
txt = 'The table below shows per object bounding box area statistics.'
plt.text(0.05, 0.90, txt, transform=fig.transFigure, size=10)
table = plt.table(cellText=graph_data_per_type.values, edges='closed',
loc='center', colLoc='right',
colLabels=graph_data_per_type.columns,
rowLabels=graph_data_per_type.index)
for coord, cell in table.get_celld().items():
if (coord[0] == 0 or coord[1] == -1):
font_property = FontProperties(weight='bold')
cell.set_text_props(fontproperties=font_property)
pdf.savefig(fig)
plt.close()
pdf.close()
else:
graph_data = graph_data.rename(columns={'type': 'Object Name', 'mean': 'Mean bbox area'})
wandb.create_barplot(graph_data, "Object Name Vs Mean bbox area", "bbox_area_chart1")
wandb.create_table(area_stat, "Area statistics")
graph_data_per_type = graph_data_per_type.reset_index(level=0)
graph_data_per_type = graph_data_per_type.rename(columns={'type': 'Object Name'})
wandb.create_table(graph_data_per_type, "Area statistics per object.")
def truncation_visualize(valid_df, output_dir, graph_attr, wandb_attr):
""" Create Graphs for Object Truncation.
Args:
valid_df (Pandas DataFrame): valid kitti dataframe.
output_dir(str): Result Directory.
graph_attr (DictConfig): graph attributes(
height - to set graph height
width - to set graph width
show_all - to show all object on graph,
by default maximum 100 object will be shown.)
wandb_attr (DictConfig): wandb attributes
Return:
No explicit Return.
"""
# Truncation
valid_df['truncated'] = valid_df['truncated'].round(decimals=1)
truncated_df = valid_df['truncated']
truncated_df = truncated_df.value_counts(normalize=True)
truncated_df = truncated_df.rename('count_per').reset_index()
truncated_df_count = valid_df['truncated']
truncated_df_count = truncated_df_count.value_counts().rename('count')
truncated_df_count = truncated_df_count.reset_index()
truncated_df['index'] = truncated_df['index'] * 100
truncated_df['count_per'] = truncated_df['count_per'] * 100
truncated_df['count_per'] = truncated_df['count_per'].round(decimals=2)
figuresizes = (graph_attr.width, graph_attr.height)
if not wandb_attr.visualize:
pdf = PdfPages(os.path.join(output_dir, 'Truncation.pdf'))
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
c_min = min(truncated_df_count['count'])
c_max = max(truncated_df_count['count'])
if c_max - c_min > 10:
binrange = range(c_min, c_max, int((c_max - c_min) / 10))
else:
binrange = range(c_min, c_max)
gph = sns.barplot(data=truncated_df_count, y='count', x='index',
width=0.2)
gph.bar_label(gph.containers[0])
configure_subgraph(ax, xlabel="Trucation(%)", ylabel="Object Count",
yticks=binrange, yticklabelformat="plain")
txt = "The bar plot below describes the object count against "\
"truncation percentage."
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
binrange = range(0, 100, 10)
gph = sns.barplot(data=truncated_df, y='count_per', x='index',
width=0.2)
gph.bar_label(gph.containers[0])
configure_subgraph(ax, xlabel="Trucation(%)", ylabel="Object Count(%)",
yticks=binrange)
txt = "The bar plot below describes the object count percentage "\
"against truncation percentage"
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
pdf.close()
else:
truncated_df_count = truncated_df_count.rename(columns={'count': 'Count', 'index': 'Truncation(%)'})
wandb.create_barplot(truncated_df_count, "Truncation(%) Vs Count", "truncation_chart1")
def invalid_data_visualize(valid_df, invalid_df, output_dir, graph_attr, wandb_attr):
""" Create Graphs for Object Truncation.
Args:
valid_df (Pandas DataFrame): valid kitti data
invalid_df (Pandas DataFrame): invalid kitti data
output_dir(str): Result Directory.
graph_attr (DictConfig): graph attributes(
height - to set graph height
width - to set graph width
show_all - to show all object on graph,
by default maximum 100 object will be shown.)
wandb_attr (DictConfig): wandb attributes
Return:
No explicit Return
"""
count_df = valid_df['type']
count_df = count_df.value_counts().rename('count_num')
count_df = count_df.reset_index()
# invalid Obejct tag kitti row
invalid_count_df = invalid_df['type']
var = 'invalid_count_num'
invalid_count_df = invalid_count_df.value_counts().rename(var)
invalid_count_df = invalid_count_df.reset_index()
valid_invalid_count_df = count_df.merge(invalid_count_df, on='index',
how='outer')
cols = {"count_num": "valid_count", "invalid_count_num": "invalid_count"}
valid_invalid_count_df = valid_invalid_count_df.rename(columns=cols)
valid_invalid_count_df = valid_invalid_count_df.fillna(0)
xmin = invalid_df['bbox_xmin']
xmax = invalid_df['bbox_xmax']
ymin = invalid_df['bbox_ymin']
ymax = invalid_df['bbox_ymax']
if 'img_height' in invalid_df.columns:
oob_condition = ((xmin < 0) | (ymin < 0) | (ymax < 0) |
(xmax < 0) | (ymax > invalid_df['img_height']) |
(xmax > invalid_df['img_width']))
else:
oob_condition = ((xmin < 0) | (ymin < 0) | (ymax < 0) |
(xmax < 0))
out_of_bound_bbox = invalid_df[oob_condition]
inverted_cord = invalid_df[(ymax > 0) &
(xmax > 0) &
(ymin > 0) &
(xmin > 0) &
((xmax < xmin) |
(ymax < ymin))]
coord_df = pd.DataFrame({"Coordinates": ["valid", "inverted",
"out_of_bound"],
"count": [len(valid_df), len(inverted_cord),
len(out_of_bound_bbox)]})
figuresizes = (graph_attr.width, graph_attr.height)
if len(inverted_cord) > 0:
temp_df = inverted_cord['type']
var = "inverted_cord_count"
temp_df = temp_df.value_counts(ascending=True).rename(var)
temp_df = temp_df.reset_index()
if not graph_attr.show_all and len(temp_df) > 100:
graph_data_inverted = temp_df.head(100)
else:
graph_data_inverted = temp_df
if len(out_of_bound_bbox) > 0:
temp_df = out_of_bound_bbox['type']
var = "out_of_bound_bbox_count"
temp_df = temp_df.value_counts(ascending=True).rename(var)
temp_df = temp_df.reset_index()
if not graph_attr.show_all and len(temp_df) > 100:
graph_data_oob = temp_df.head(100)
else:
graph_data_oob = temp_df
if not wandb_attr.visualize:
pdf = PdfPages(os.path.join(output_dir, 'Invalid_data.pdf'))
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
gph = sns.barplot(data=coord_df, x="Coordinates", y="count",
errorbar="sd", palette="dark")
gph.bar_label(gph.containers[0])
txt = "The bar plot below describes the count of valid, "\
"inverted and out of bound coordinates."
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
if len(inverted_cord) > 0:
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
gph = sns.barplot(y='inverted_cord_count', x='index', data=graph_data_inverted,
width=0.2)
gph.bar_label(gph.containers[0])
configure_subgraph(ax, xlabel="Object Name",
ylabel="inverted_cord_count",
xtickrotate=(90, "right"),
yticklabelformat="plain")
txt = "The bar plot below describes the inverted coordinates "\
"count per object."
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
if len(out_of_bound_bbox) > 0:
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
gph = sns.barplot(y='out_of_bound_bbox_count', x='index',
data=graph_data_oob, width=0.2)
gph.bar_label(gph.containers[0])
configure_subgraph(ax, xlabel="Object Name",
ylabel="out of bound coordinates count",
xtickrotate=(90, "right"),
yticklabelformat="plain")
txt = "The bar plot below describes the out of bound coordinates "\
"count per object."
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
pdf.close()
else:
wandb.create_barplot(coord_df, "Count Vs bbox Coordinates", "coordinates_chart1")
if len(inverted_cord) > 0:
graph_data_inverted = graph_data_inverted.rename(columns={'index': 'Object Name',
'inverted_cord_count': 'Inverted coordinates count'})
wandb.create_barplot(graph_data_inverted, "Object Name Vs Inverted coordinates count", "coordinates_chart2")
if len(out_of_bound_bbox) > 0:
graph_data_oob = graph_data_oob.rename(columns={'out_of_bound_bbox_count': 'out of bound coordinates count',
'index': 'Object Name'})
wandb.create_barplot(graph_data_oob, "Object Name Vs Out of bound coordinates count", "coordinates_chart3")
def image_visualize(image_df, output_dir, graph_attr, wandb_attr):
""" Create Graphs for Object Truncation.
Args:
image_df (Pandas DataFrame): Image Dataframe
output_dir(str): Result Directory.
graph_attr (DictConfig): graph attributes(
height - to set graph height
width - to set graph width
show_all - to show all object on graph,
by default maximum 100 object will be shown.)
wandb_attr (DictConfig): wandb attributes
Return:
No explicit Return
"""
# Image stats
size_df = image_df['size'].value_counts(ascending=True,
normalize=True).rename('count_per').reset_index()
size_df['count_per'] = size_df['count_per'] * 100
width_df = image_df['img_width']
width_df = width_df.value_counts(ascending=True, normalize=True)
width_df = width_df.rename('count_per').reset_index()
width_df['count_per'] = width_df['count_per'] * 100
height_df = image_df['img_height']
height_df = height_df.value_counts(ascending=True, normalize=True)
height_df = height_df.rename('count_per').reset_index()
height_df['count_per'] = height_df['count_per'] * 100
height_df['count_per'] = height_df['count_per'].round(decimals=3)
width_df['count_per'] = width_df['count_per'].round(decimals=3)
size_df['count_per'] = size_df['count_per'].round(decimals=3)
image_stat = image_df.describe()
figuresizes = (graph_attr.width, graph_attr.height)
if not wandb_attr.visualize:
pdf = PdfPages(os.path.join(output_dir, 'Image.pdf'))
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
binrange = range(0, 100, 10)
gph = sns.barplot(size_df, y='count_per', x='index', width=0.2)
gph.bar_label(gph.containers[0])
configure_subgraph(ax, xlabel="image size", ylabel="Count Percentage",
yticks=binrange)
txt = "The bar plot below decribes the count(%) of different image "\
"sizes"
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
binrange = range(0, 100, 10)
gph = sns.barplot(width_df, y='count_per', x='index', width=0.2)
gph.bar_label(gph.containers[0])
configure_subgraph(ax, xlabel="image width",
ylabel="Count Percentage", yticks=binrange)
txt = "The bar plot below decribes the count(%) of different image "\
"widths"
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
fig = plt.figure(figsize=figuresizes)
ax = plt.gca()
binrange = range(0, 100, 10)
gph = sns.barplot(height_df, y='count_per', x='index', width=0.2)
gph.bar_label(gph.containers[0])
configure_subgraph(ax, xlabel="image height",
ylabel="Count Percentage",
yticks=binrange)
txt = "The bar plot below decribes the count(%) of different image "\
"heights"
plt.text(0.05, 0.95, txt, transform=fig.transFigure, size=10)
pdf.savefig(fig)
plt.close()
fig = plt.figure(figsize=figuresizes)
fig.clf()
plt.axis('off')
txt = 'The table below shows image statistics.'
plt.text(0.05, 0.90, txt, transform=fig.transFigure, size=10)
table = plt.table(cellText=image_stat.values, edges='closed',
loc='center', colLoc='right',
colLabels=image_stat.columns,
rowLabels=image_stat.index)
for coord, cell in table.get_celld().items():
if (coord[0] == 0 or coord[1] == -1):
font_property = FontProperties(weight='bold')
cell.set_text_props(fontproperties=font_property)
pdf.savefig(fig)
plt.close()
pdf.close()
else:
size_df = size_df.rename(columns={'count_per': 'Count(%)', 'index': 'image area'})
wandb.create_barplot(size_df, "image area vs count(%)", "image_chart1")
width_df = width_df.rename(columns={'count_per': 'Count(%)', 'index': 'image width'})
wandb.create_barplot(width_df, "image width vs count(%)", "image_chart2")
height_df = height_df.rename(columns={'count_per': 'Count(%)', 'index': 'image height'})
wandb.create_barplot(height_df, "image height vs count(%)", "image_chart3")
image_stat = image_stat.reset_index(level=0)
image_stat = image_stat.rename(columns={'index': 'Stat'})
wandb.create_table(image_stat, "Image Statistics")
def create_csv(valid_df, invalid_df, output_dir):
""" Create Graph and summary.
Args:
valid_df(Pandas Dataframe): Valid kitti dataframe
invalid_df(Pandas Dataframe): invalid kitti dataframe
output_dir(str): result Dir
Return:
No explicit returns.
"""
csv_folder = os.path.join(output_dir, 'csv')
if not os.path.exists(csv_folder):
os.makedirs(csv_folder)
invalid_file = os.path.join(csv_folder, 'invalid_data.csv')
valid_file = os.path.join(csv_folder, 'valid_data.csv')
valid_df = valid_df.drop('bbox_area', axis=1)
invalid_df = invalid_df.drop('bbox_area', axis=1)
valid_df.to_csv(valid_file, columns=valid_df.columns, index=False)
invalid_df.to_csv(invalid_file, columns=invalid_df.columns, index=False)
def summary_and_graph(valid_df, invalid_df, image_df, output_dir, data_format, graph_attr, wandb_attr):
""" Create Graph and summary.
Args:
valid_df (Pandas Dataframe): Valid kitti dataframe
invalid_df (Pandas Dataframe): invalid kitti dataframe
image_df (Pandas Dataframe): image dataframe
output_dir (str): result Dir
data_format (str): input data format.
graph_attr (DictConfig): graph attributes(
height - to set graph height
width - to set graph width
show_all - to show all object on graph,
by default maximum 100 object will be shown.)
wandb_attr (DictConfig): wandb attributes
Return:
No explicit returns.
"""
# Create CSV for valid and invalid data
create_csv(valid_df, invalid_df, output_dir)
# Create visualizations
output_dir = os.path.join(output_dir, 'graphs')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
object_count_visualize(valid_df, output_dir, graph_attr, wandb_attr)
bbox_area_visualize(valid_df, output_dir, graph_attr, wandb_attr)
if image_df is not None:
image_visualize(image_df, output_dir, graph_attr, wandb_attr)
if data_format == "KITTI":
occlusion_visualize(valid_df, output_dir, graph_attr, wandb_attr)
truncation_visualize(valid_df, output_dir, graph_attr, wandb_attr)
invalid_data_visualize(valid_df, invalid_df, output_dir, graph_attr, wandb_attr)
def visualize_on_wandb(config, valid_df, invalid_df, image_df, len_image_data):
""" Visualize on wandb.
Args:
config (Hydra config): Config element of the analyze config.
valid_df (Pandas Dataframe): Valid kitti dataframe
invalid_df (Pandas Dataframe): invalid kitti dataframe
image_df (Pandas Dataframe): image dataframe
len_image_data (int): len of image data dict.
Return:
No explicit returns.
"""
wandb.login_and_initialize_wandb(config.wandb, config.data.output_dir)
if config.graph.generate_summary_and_graph:
summary_and_graph(valid_df, invalid_df, image_df, config.data.output_dir,
config.data.input_format, config.graph, config.wandb)
if not is_wandb_initialized():
logger.info("Not able to login or initialize wandb.Exiting..")
sys.exit(1)
if config.image.generate_image_with_bounding_box:
if len_image_data == 0:
logging.info("Skipping visualizing images with Bounding boxes.Please provide correct path in data.image_dir .")
else:
wandb.generate_images_with_bounding_boxes(valid_df, config.wandb, config.data.output_dir, config.image.sample_size)
def visualize_on_desktop(config, valid_df, invalid_df, image_df, image_data):
""" Visualize and save locally.
Args:
config (Hydra config): Config element of the analyze config.
valid_df (Pandas Dataframe): Valid kitti dataframe
invalid_df (Pandas Dataframe): invalid kitti dataframe
image_df (Pandas Dataframe): image dataframe
image_data (Dict): Dict containing image info (image_width,
image_height, image_path)
Return:
No explicit returns.
"""
if config.graph.generate_summary_and_graph:
summary_and_graph(valid_df, invalid_df, image_df, config.data.output_dir,
config.data.input_format, config.graph, config.wandb)
logging.info(f"Created Graphs inside {config.data.output_dir} folder")
# Generate Images with bounding boxes
if config.image.generate_image_with_bounding_box:
if len(image_data) == 0:
logging.info("Skipping visualizing images with Bounding boxes.Please provide correct path in data.image_dir .")
else:
logger.info("Generating images with bounding boxes and labels.")
image.generate_images_with_bounding_boxes(valid_df, image_data, config.data.output_dir,
config.image.sample_size, config.workers, config.data.input_format)
@monitor_status(mode='KITTI analysis')
def analyze_dataset_kitti(config):
"""Tao kitti analysis.
Args:
config (Hydra config): Config element of the analyze config.
"""
start_time = time.perf_counter()
kitti_obj = data_format.KittiData(config.data.image_dir, config.data.ann_path)
if not os.path.isdir(config.data.ann_path):
logger.info("Please provide path of kitti label directory in config data.ann_path.")
sys.exit(1)
kitti.list_files(kitti_obj)
if kitti_obj.image_paths is None:
logger.info("Image Directory not found.Processing only label files")
image_data = None
image_df = None
else:
# Get image data (image width and height)
image_data = kitti.get_image_data(kitti_obj)
image_df = kitti.create_image_dataframe(image_data)
# Validate and create big merged kitti files
valid_kitti_filepaths = kitti.validate_and_merge_kitti_files(kitti_obj.label_paths,
config.data.output_dir,
config.workers,
image_data)
invalid_filepath = os.path.join(COMMON_FILE_NAMES['INTERMEDIATE_KITTI_FOLDER'],
COMMON_FILE_NAMES['INVALID_KITTI'])
# Dataframe creation for valid and invalid kitti data
valid_df, invalid_df = kitti.create_dataframe(valid_kitti_filepaths,
invalid_filepath,
image_data)
if config.wandb.visualize:
visualize_on_wandb(config, valid_df, invalid_df, image_df, len(image_data))
else:
visualize_on_desktop(config, valid_df, invalid_df, image_df, image_data)
logger.debug(f"Total time taken : {time.perf_counter() - start_time}")
@monitor_status(mode='COCO analysis')
def analyze_dataset_coco(config):
"""Tao coco analysis.
Args:
config (Hydra config): Config element of the analyze config.
"""
start = time.perf_counter()
if not os.path.isfile(config.data.ann_path):
logger.info("Please provide path of coco annotation file in config data.ann_path.")
sys.exit(1)
coco_obj = data_format.CocoData(config.data.ann_path, config.data.image_dir)
image_data = coco.get_image_data(coco_obj)
image_df = None
if image_data:
image_df = coco.create_image_dataframe(image_data)
# Dataframe creation for valid and invalid kitti data
valid_df, invalid_df = coco.create_dataframe(coco_obj)
if config.wandb.visualize:
visualize_on_wandb(config, valid_df, invalid_df, image_df, len(image_data))
else:
visualize_on_desktop(config, valid_df, invalid_df, image_df, image_data)
logger.debug(f"Total time taken : {time.perf_counter() - start}")
spec_root = os.path.dirname(os.path.abspath(__file__))
@hydra_runner(
config_path=os.path.join(spec_root, "../experiment_specs"),
config_name="analyze", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig):
"""TAO Analyze main wrapper function."""
try:
if not os.path.exists(cfg.data.output_dir):
os.makedirs(cfg.data.output_dir)
cfg.results_dir = cfg.results_dir or cfg.data.output_dir
if cfg.data.input_format == "COCO":
analyze_dataset_coco(cfg)
elif cfg.data.input_format == "KITTI":
analyze_dataset_kitti(cfg)
else:
logger.info(f"Data format {cfg.data.input_format} is not supported.")
sys.exit(1)
except KeyboardInterrupt:
logger.info("Interrupting dataset analysis.")
sys.exit(1)
except RuntimeError as e:
logger.info(f"Analysis run failed with error: {e}")
sys.exit(1)
except Exception as e:
logger.info(f"Analysis run failed with error: {e}")
sys.exit(1)
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/scripts/analyze.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data analytics entrypoint"""
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define entrypoint to run tasks for analytics."""
import argparse
from nvidia_tao_ds.data_analytics import scripts
from nvidia_tao_ds.core.entrypoint.entrypoint import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"analytics",
add_help=True,
description="Data analytics entrypoint"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(
parser, subtasks, task="analytics"
)
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/data_analytics/entrypoint/analytics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data-Services core module"""
| tao_dataset_suite-main | nvidia_tao_ds/core/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common decorators used in TAO Toolkit."""
from functools import wraps
import os
from nvidia_tao_ds.augment.utils.distributed_utils import MPI_is_distributed, MPI_local_rank
import nvidia_tao_ds.core.logging.logging as status_logging
from nvidia_tao_ds.core.mlops.wandb import alert
def monitor_status(name='Data-services', mode='analyze'):
"""Status monitoring decorator."""
def inner(runner):
@wraps(runner)
def _func(cfg, **kwargs):
try:
if MPI_is_distributed():
is_master = MPI_local_rank() == 0
else:
is_master = True
except ValueError:
is_master = True
# set up status logger
if not os.path.exists(cfg.results_dir) and is_master:
os.makedirs(cfg.results_dir)
status_file = os.path.join(cfg.results_dir, "status.json")
status_logging.set_status_logger(
status_logging.StatusLogger(
filename=status_file,
is_master=is_master,
verbosity=1,
append=True
)
)
s_logger = status_logging.get_status_logger()
try:
s_logger.write(
status_level=status_logging.Status.STARTED,
message=f"Starting {name} {mode}."
)
alert(
title=f'{mode.capitalize()} started',
text=f'{mode.capitalize()} {name} has started',
level=0,
is_master=is_master
)
runner(cfg, **kwargs)
s_logger.write(
status_level=status_logging.Status.SUCCESS,
message=f"{mode.capitalize()} finished successfully."
)
except (KeyboardInterrupt, SystemError):
status_logging.get_status_logger().write(
message=f"{mode.capitalize()} was interrupted",
verbosity_level=status_logging.Verbosity.INFO,
status_level=status_logging.Status.FAILURE
)
alert(
title=f'{mode.capitalize()} stopped',
text=f'{mode.capitalize()} was interrupted',
level=1,
is_master=is_master
)
except Exception as e:
status_logging.get_status_logger().write(
message=str(e),
status_level=status_logging.Status.FAILURE
)
alert(
title=f'{mode.capitalize()} failed',
text=str(e),
level=2,
is_master=is_master
)
raise e
return _func
return inner
| tao_dataset_suite-main | nvidia_tao_ds/core/decorators.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines for connecting with Weights and Biases client."""
from datetime import datetime
import logging
import wandb
from wandb import AlertLevel
import os
DEFAULT_WANDB_CONFIG = "~/.netrc"
logger = logging.getLogger(__name__)
_WANDB_INITIALIZED = False
def alert(title, text, duration=300, level=0, is_master=True):
"""Send alert."""
alert_levels = {
0: AlertLevel.INFO,
1: AlertLevel.WARN,
2: AlertLevel.ERROR
}
if is_wandb_initialized() and is_master:
wandb.alert(
title=title,
text=text,
level=alert_levels[level],
wait_duration=duration
)
def is_wandb_initialized():
"""Check if wandb has been initialized."""
global _WANDB_INITIALIZED # pylint: disable=W0602,W0603
return _WANDB_INITIALIZED
def check_wandb_logged_in():
"""Check if weights and biases have been logged in."""
wandb_logged_in = False
try:
wandb_api_key = os.getenv("WANDB_API_KEY", None)
if wandb_api_key is not None or os.path.exists(os.path.expanduser(DEFAULT_WANDB_CONFIG)):
wandb_logged_in = wandb.login(key=wandb_api_key)
return wandb_logged_in
except wandb.errors.UsageError:
logger.warning("WandB wasn't logged in.")
return False
def initialize_wandb(output_dir,
project="TAO Data Analytics",
entity=None,
save_code=False,
name=None,
notes=None,
tags=None,
wandb_logged_in=False,
):
"""Function to initialize wandb client with the weights and biases server.
If wandb initialization fails, then the function just catches the exception
and prints an error log with the reason as to why wandb.init() failed.
Args:
output_dir (str): Output directory of the experiment.
project (str): Name of the project to sync data with.
entity (str): Name of the wanbd entity.
save_code (bool): save the main script or notebook to W&B
notes (str): One line description about the wandb job.
tags (list(str)): List of tags about the job.
name (str): Name of the task running.
wandb_logged_in (bool): Boolean flag to check if wandb was logged in.
Returns:
No explicit return.
"""
logger.info("Initializing wandb.")
try:
assert wandb_logged_in, (
"WandB client wasn't logged in. Please make sure to set "
"the WANDB_API_KEY env variable or run `wandb login` in "
"over the CLI and copy the ~/.netrc file to the container."
)
start_time = datetime.now()
time_string = start_time.strftime("%d/%y/%m_%H:%M:%S")
if name is None:
name = "run"
wandb_name = f"{name}_{time_string}"
wandb.init(
project=project,
entity=entity,
save_code=save_code,
name=wandb_name,
notes=notes,
tags=tags,
dir=output_dir
)
global _WANDB_INITIALIZED # pylint: disable=W0602,W0603
_WANDB_INITIALIZED = True
except Exception as e:
logger.warning("Wandb logging failed with error %s", e)
| tao_dataset_suite-main | nvidia_tao_ds/core/mlops/wandb.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MLOps for data-services."""
| tao_dataset_suite-main | nvidia_tao_ds/core/mlops/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common entrypoint module to TAO Data Services.""" | tao_dataset_suite-main | nvidia_tao_ds/core/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define routines required for the entrypoint."""
import importlib
import os
import pkgutil
import subprocess
import sys
from time import time
from nvidia_tao_ds.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_ds.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, multigpu_support=['generate'], task="tao_ds"):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='train', choices=subtasks.keys(), help="Subtask for a given task/model.",
)
# Add standard TAO arguments.
parser.add_argument(
"-r",
"--results_dir",
help="Path to a folder where the experiment outputs should be written. (DEFAULT: ./)",
required=False,
)
parser.add_argument(
"-e",
"--experiment_spec_file",
help="Path to the experiment spec file.",
required=True)
parser.add_argument(
"-g",
"--gpus",
help="Number of GPUs or gpu index to use.",
type=str,
default=None
)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
process_passed = True
script_args = ""
# Check for whether the experiment spec file exists.
if not os.path.exists(args.experiment_spec_file):
raise FileNotFoundError(
f"Experiment spec file wasn't found at {args.experiment_spec_file}"
)
path, name = os.path.split(args.experiment_spec_file)
if path != "":
script_args += f" --config-path {os.path.realpath(path)}"
script_args += f" --config-name {name}"
if args.results_dir:
script_args += " results_dir=" + args.results_dir
if args.gpus and args.subtask in multigpu_support:
try:
script_args += f" gpu_ids=[{','.join([str(i) for i in range(int(args.gpus))])}]"
except ValueError:
script_args += f" gpu_ids={args.gpus}"
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
# Create a system call.
call = "python " + script + script_args + " " + unknown_args_as_str
start = time()
try:
# Run the script.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
except (KeyboardInterrupt, SystemExit) as e:
print("Command was interrupted due to ", e)
process_passed = True
except subprocess.CalledProcessError as e:
if e.output is not None:
print(e.output)
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = list()
for device in get_device_details():
gpu_data.append(device.get_config())
print("Sending telemetry data.")
send_telemetry_data(
task,
args.subtask,
gpu_data,
num_gpus=args.gpus,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[Error]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
exit(1) # returning non zero return code from the process.
print("Execution status: PASS") | tao_dataset_suite-main | nvidia_tao_ds/core/entrypoint/entrypoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities using the NVML library for GPU devices."""
import json
import pynvml
BRAND_NAMES = {
pynvml.NVML_BRAND_UNKNOWN: "Unknown",
pynvml.NVML_BRAND_QUADRO: "Quadro",
pynvml.NVML_BRAND_TESLA: "Tesla",
pynvml.NVML_BRAND_NVS: "NVS",
pynvml.NVML_BRAND_GRID: "Grid",
pynvml.NVML_BRAND_TITAN: "Titan",
pynvml.NVML_BRAND_GEFORCE: "GeForce",
pynvml.NVML_BRAND_NVIDIA_VAPPS: "NVIDIA Virtual Applications",
pynvml.NVML_BRAND_NVIDIA_VPC: "NVIDIA Virtual PC",
pynvml.NVML_BRAND_NVIDIA_VCS: "NVIDIA Virtual Compute Server",
pynvml.NVML_BRAND_NVIDIA_VWS: "NVIDIA RTX Virtual Workstation",
pynvml.NVML_BRAND_NVIDIA_VGAMING: "NVIDIA Cloud Gaming",
pynvml.NVML_BRAND_QUADRO_RTX: "Quadro RTX",
pynvml.NVML_BRAND_NVIDIA_RTX: "NVIDIA RTX",
pynvml.NVML_BRAND_NVIDIA: "NVIDIA",
pynvml.NVML_BRAND_GEFORCE_RTX: "GeForce RTX",
pynvml.NVML_BRAND_TITAN_RTX: "TITAN RTX",
}
class GPUDevice:
"""Data structure to represent a GPU device."""
def __init__(self, pci_bus_id,
device_name,
device_brand,
memory,
cuda_compute_capability):
"""Data structure representing a GPU device.
Args:
pci_bus_id (hex): PCI bus ID of the GPU.
device_name (str): Name of the device GPU.
device_branch (int): Brand of the GPU.
"""
self.name = device_name
self.pci_bus_id = pci_bus_id
if device_brand in BRAND_NAMES.keys():
self.brand = BRAND_NAMES[device_brand]
else:
self.brand = None
self.defined = True
self.memory = memory
self.cuda_compute_capability = cuda_compute_capability
def get_config(self):
"""Get json config of the device.
Returns
device_dict (dict): Dictionary containing data about the device.
"""
assert self.defined, "Device wasn't defined."
config_dict = {}
config_dict["name"] = self.name.decode().replace(" ", "-")
config_dict["pci_bus_id"] = self.pci_bus_id
config_dict["brand"] = self.brand
config_dict["memory"] = self.memory
config_dict["cuda_compute_capability"] = self.cuda_compute_capability
return config_dict
def __str__(self):
"""Generate a printable representation of the device."""
config = self.get_config()
data_string = json.dumps(config, indent=2)
return data_string
def pynvml_context(fn):
"""Simple decorator to setup python nvml context.
Args:
f: Function pointer.
Returns:
output of f.
"""
def _fn_wrapper(*args, **kwargs):
"""Wrapper setting up nvml context."""
try:
pynvml.nvmlInit()
return fn(*args, **kwargs)
finally:
pynvml.nvmlShutdown()
return _fn_wrapper
@pynvml_context
def get_number_gpus_available():
"""Get the number of GPU's attached to the machine.
Returns:
num_gpus (int): Number of GPUs in the machine.
"""
num_gpus = pynvml.nvmlDeviceGetCount()
return num_gpus
@pynvml_context
def get_device_details():
"""Get details about each device.
Returns:
device_list (list): List of GPUDevice objects.
"""
num_gpus = pynvml.nvmlDeviceGetCount()
device_list = []
assert num_gpus > 0, "Atleast 1 GPU is required for TAO Toolkit to run."
for idx in range(num_gpus):
handle = pynvml.nvmlDeviceGetHandleByIndex(idx)
pci_info = pynvml.nvmlDeviceGetPciInfo(handle)
device_name = pynvml.nvmlDeviceGetName(handle)
brand_name = pynvml.nvmlDeviceGetBrand(handle)
memory = pynvml.nvmlDeviceGetMemoryInfo(handle)
cuda_compute_capability = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
device_list.append(
GPUDevice(
pci_info.busId,
device_name,
brand_name,
memory.total,
cuda_compute_capability
)
)
return device_list
| tao_dataset_suite-main | nvidia_tao_ds/core/telemetry/nvml_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initial module containing implementation for TAO Telemetry."""
| tao_dataset_suite-main | nvidia_tao_ds/core/telemetry/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilties to send data to the TAO Toolkit Telemetry Remote Service."""
import os
import shutil
import subprocess
import sys
import tarfile
import tempfile
import urllib
import requests
import urllib3
TELEMETRY_TIMEOUT = int(os.getenv("TELEMETRY_TIMEOUT", "30"))
def get_url_from_variable(variable, default=None):
"""Get the Telemetry Server URL."""
url = os.getenv(variable, default)
return url
def url_exists(url):
"""Check if a URL exists.
Args:
url (str): String to be verified as a URL.
Returns:
valid (bool): True/Falso
"""
url_request = urllib.request.Request(url)
url_request.get_method = lambda: 'HEAD'
try:
urllib.request.urlopen(url_request) # noqa pylint: disable=R1732
return True
except urllib.request.URLError:
return False
def get_certificates():
"""Download the cacert.pem file and return the path.
Returns:
path (str): UNIX path to the certificates.
"""
certificates_url = get_url_from_variable("TAO_CERTIFICATES_URL")
if not url_exists(certificates_url):
raise urllib.request.URLError("Url for the certificates not found.")
tmp_dir = tempfile.mkdtemp()
download_command = f"wget {certificates_url} -P {tmp_dir} --quiet"
try:
subprocess.check_call(
download_command, shell=True, stdout=sys.stdout
)
except Exception as exc:
raise urllib.request.URLError("Download certificates.tar.gz failed.") from exc
tarfile_path = os.path.join(tmp_dir, "certificates.tar.gz")
assert tarfile.is_tarfile(tarfile_path), (
"The downloaded file isn't a tar file."
)
with tarfile.open(name=tarfile_path, mode="r:gz") as tar_file:
filenames = tar_file.getnames()
for memfile in filenames:
member = tar_file.getmember(memfile)
tar_file.extract(member, tmp_dir)
file_list = [item for item in os.listdir(tmp_dir) if item.endswith(".pem")]
assert file_list, (
f"Didn't get pem files. Directory contents {file_list}"
)
return tmp_dir
def send_telemetry_data(network, action, gpu_data, num_gpus=1, time_lapsed=None, pass_status=False):
"""Wrapper to send TAO telemetry data.
Args:
network (str): Name of the network being run.
action (str): Subtask of the network called.
gpu_data (dict): Dictionary containing data about the GPU's in the machine.
num_gpus (int): Number of GPUs used in the job.
time_lapsed (int): Time lapsed.
pass_status (bool): Job passed or failed.
Returns:
No explicit returns.
"""
urllib3.disable_warnings(urllib3.exceptions.SubjectAltNameWarning)
if os.getenv('TELEMETRY_OPT_OUT', "no").lower() in ["no", "false", "0"]:
url = get_url_from_variable("TAO_TELEMETRY_SERVER")
data = {
"version": os.getenv("TAO_TOOLKIT_VERSION", "4.0.0"),
"action": action,
"network": network,
"gpu": [device["name"] for device in gpu_data[:num_gpus]],
"success": pass_status
}
if time_lapsed is not None:
data["time_lapsed"] = time_lapsed
certificate_dir = get_certificates()
cert = ('client-cert.pem', 'client-key.pem')
requests.post(
url,
json=data,
cert=tuple([os.path.join(certificate_dir, item) for item in cert]), # noqa pylint: disable=R1728
timeout=TELEMETRY_TIMEOUT
)
shutil.rmtree(certificate_dir)
| tao_dataset_suite-main | nvidia_tao_ds/core/telemetry/telemetry.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger class for data-services."""
from abc import abstractmethod
import atexit
from datetime import datetime
import json
import logging
import os
logger = logging.getLogger(__name__)
class Verbosity():
"""Verbosity levels."""
DISABLE = 0
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
# Defining a log level to name dictionary.
log_level_to_name = {
Verbosity.DISABLE: "DISABLE",
Verbosity.DEBUG: 'DEBUG',
Verbosity.INFO: 'INFO',
Verbosity.WARNING: 'WARNING',
Verbosity.ERROR: 'ERROR',
Verbosity.CRITICAL: 'CRITICAL'
}
class Status():
"""Status levels."""
SUCCESS = 0
FAILURE = 1
STARTED = 2
RUNNING = 3
SKIPPED = 4
status_level_to_name = {
Status.SUCCESS: 'SUCCESS',
Status.FAILURE: 'FAILURE',
Status.STARTED: 'STARTED',
Status.RUNNING: 'RUNNING',
Status.SKIPPED: 'SKIPPED'
}
class BaseLogger(object):
"""File logger class."""
def __init__(self, is_master=False, verbosity=Verbosity.DISABLE):
"""Base logger class."""
self.is_master = is_master
self.verbosity = verbosity
self.categorical = {}
self.graphical = {}
self.kpi = {}
@property
def date(self):
"""Get date from the status."""
date_time = datetime.now()
date_object = date_time.date()
return "{}/{}/{}".format( # noqa pylint: disable=C0209
date_object.month,
date_object.day,
date_object.year
)
@property
def time(self):
"""Get date from the status."""
date_time = datetime.now()
time_object = date_time.time()
return "{}:{}:{}".format( # noqa pylint: disable=C0209
time_object.hour,
time_object.minute,
time_object.second
)
@property
def categorical(self):
"""Categorical data to be logged."""
return self._categorical
@categorical.setter
def categorical(self, value: dict):
"""Set categorical data to be logged."""
self._categorical = value
@property
def graphical(self):
"""Graphical data to be logged."""
return self._graphical
@graphical.setter
def graphical(self, value: dict):
"""Set graphical data to be logged."""
self._graphical = value
@property
def kpi(self):
"""Set KPI data."""
return self._kpi
@kpi.setter
def kpi(self, value: dict):
"""Set KPI data."""
self._kpi = value
def flush(self):
"""Flush the logger."""
pass
def format_data(self, data: dict):
"""Format the data."""
if isinstance(data, dict):
data_string = []
for key, value in data.items():
data_string.append(
f"{key}: {self.format_data(value)}"
if isinstance(value, dict) else value
)
return ", ".join(data_string)
def log(self, level, string):
"""Log the data string."""
if level >= self.verbosity:
logging.log(level, string)
@abstractmethod
def write(self, data=None,
status_level=Status.RUNNING,
verbosity_level=Verbosity.INFO,
message=None):
"""Write data out to the log file."""
if self.verbosity > Verbosity.DISABLE:
if not data:
data = {}
# Define generic data.
data["date"] = self.date
data["time"] = self.time
data["status"] = status_level_to_name.get(status_level, "RUNNING")
data["verbosity"] = log_level_to_name.get(verbosity_level, "INFO")
if message:
data["message"] = message
logging.log(verbosity_level, message)
if self.categorical:
data["categorical"] = self.categorical
if self.graphical:
data["graphical"] = self.graphical
if self.kpi:
data["kpi"] = self.kpi
data_string = self.format_data(data)
if self.is_master:
self.log(verbosity_level, data_string)
self.flush()
class StatusLogger(BaseLogger):
"""Simple logger to save the status file."""
def __init__(self, filename=None,
is_master=False,
verbosity=Verbosity.INFO,
append=True):
"""Logger to write out the status."""
super().__init__(is_master=is_master, verbosity=verbosity)
self.log_path = os.path.realpath(filename)
if is_master:
if os.path.exists(self.log_path):
logger.info("Log file already exists at %s", self.log_path)
self.l_file = open(self.log_path, "a" if append else "w", encoding='utf-8') # noqa pylint: disable=R1732
atexit.register(self.l_file.close)
def log(self, level, string):
"""Log the data string."""
if level >= self.verbosity:
self.l_file.write(string + "\n")
def flush(self):
"""Flush contents of the log file."""
if self.is_master:
self.l_file.flush()
@staticmethod
def format_data(data):
"""Format the dictionary data."""
if not isinstance(data, dict):
raise TypeError(f"Data must be a dictionary and not type {type(data)}.")
data_string = json.dumps(data)
return data_string
# Define the logger here so it's static.
_STATUS_LOGGER = BaseLogger()
def set_status_logger(status_logger):
"""Set the status logger.
Args:
status_logger: An instance of the logger class.
"""
global _STATUS_LOGGER # pylint: disable=W0603
_STATUS_LOGGER = status_logger
def get_status_logger():
"""Get the status logger."""
global _STATUS_LOGGER # pylint: disable=W0602,W0603
return _STATUS_LOGGER
| tao_dataset_suite-main | nvidia_tao_ds/core/logging/logging.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger for data-services."""
| tao_dataset_suite-main | nvidia_tao_ds/core/logging/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| tao_dataset_suite-main | nvidia_tao_ds/core/hydra/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility class to work with hydra config files."""
import functools
import os
import sys
from typing import Any, Callable, Optional
from hydra._internal.utils import _run_hydra, get_args_parser
from hydra.core.config_store import ConfigStore
from hydra.types import TaskFunction
from omegaconf import DictConfig
def hydra_runner(
config_path: Optional[str] = ".", config_name: Optional[str] = None, schema: Optional[Any] = None
) -> Callable[[TaskFunction], Any]:
"""
Decorator used for passing the Config paths to main function.
Optionally registers a schema used for validation/providing default values.
Args:
config_path: Optional path that will be added to config search directory.
NOTE: The default value of `config_path` has changed between Hydra 1.0 and Hydra 1.1+.
Please refer to https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_hydra_main_config_path/
for details.
config_name: Pathname of the config file.
schema: Structured config type representing the schema used for validation/providing default values.
"""
def decorator(task_function: TaskFunction) -> Callable[[], None]:
@functools.wraps(task_function)
def wrapper(cfg_passthrough: Optional[DictConfig] = None) -> Any:
# Check it config was passed.
if cfg_passthrough is not None:
return task_function(cfg_passthrough)
args = get_args_parser()
# Parse arguments in order to retrieve overrides
parsed_args = args.parse_args()
# Get overriding args in dot string format
overrides = parsed_args.overrides # type: list
# Disable the creation of .hydra subdir
# https://hydra.cc/docs/tutorials/basic/running_your_app/working_directory
overrides.append("hydra.output_subdir=null")
# Hydra logging outputs only to stdout (no log file).
# https://hydra.cc/docs/configure_hydra/logging
overrides.append("hydra/job_logging=stdout")
# Set run.dir ONLY for ExpManager "compatibility" - to be removed.
overrides.append("hydra.run.dir=.")
# Check if user set the schema.
if schema is not None:
# Create config store.
cs = ConfigStore.instance()
# Get the correct ConfigStore "path name" to "inject" the schema.
if parsed_args.config_name is not None:
path, name = os.path.split(parsed_args.config_name)
# Make sure the path is not set - as this will disable validation scheme.
if path != '':
sys.stderr.write(
f"ERROR Cannot set config file path using `--config-name` when "
"using schema. Please set path using `--config-path` and file name using "
"`--config-name` separately.\n"
)
sys.exit(1)
else:
name = config_name
# Register the configuration as a node under the name in the group.
cs.store(name=name, node=schema) # group=group,
# Wrap a callable object with name `parse_args`
# This is to mimic the ArgParser.parse_args() API.
def parse_args(self, args=None, namespace=None):
return parsed_args
# Overwriting the default definition of parse_args
# function in argparse.Namespace.
parsed_args.parse_args = parse_args
# no return value from run_hydra() as it may sometime actually run the task_function
# multiple times (--multirun)
# argparse_wrapper = _argparse_wrapper(args)
argparse_wrapper = parsed_args
_run_hydra(
args=argparse_wrapper,
args_parser=args,
task_function=task_function,
config_path=config_path,
config_name=config_name,
)
return wrapper
return decorator
| tao_dataset_suite-main | nvidia_tao_ds/core/hydra/hydra_runner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auto label module"""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL config module."""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import List, Optional
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class InferConfig:
"""Inference configuration template."""
ann_path: str = MISSING
img_dir: str = MISSING
label_dump_path: str = MISSING
batch_size: int = 3
load_mask: bool = False
@dataclass
class EvalConfig:
"""Evaluation configuration template."""
batch_size: int = 3
use_mixed_model_test: bool = False
use_teacher_test: bool = False
comp_clustering: bool = False
use_flip_test: bool = False
@dataclass
class DataConfig:
"""Data configuration template."""
type: str = 'coco'
crop_size: int = 512
train_ann_path: str = ''
train_img_dir: str = ''
val_ann_path: str = ''
val_img_dir: str = ''
min_obj_size: float = 2048
max_obj_size: float = 1e10
num_workers_per_gpu: int = 2
load_mask: bool = True
@dataclass
class ModelConfig:
"""Model configuration template."""
arch: str = 'vit-mae-base/16'
frozen_stages: List[int] = field(default_factory=lambda: [-1])
mask_head_num_convs: int = 4
mask_head_hidden_channel: int = 256
mask_head_out_channel: int = 256
teacher_momentum: float = 0.996
not_adjust_scale: bool = False
mask_scale_ratio_pre: int = 1
mask_scale_ratio: float = 2.0
vit_dpr: float = 0
@dataclass
class TrainConfig:
"""Train configuration template."""
seed: int = 1
num_epochs: int = 10
save_every_k_epoch: int = 1
val_interval: int = 1
batch_size: int = 3
accum_grad_batches: int = 1
use_amp: bool = True
# optim
optim_type: str = 'adamw'
optim_momentum: float = 0.9
lr: float = 0.000001
min_lr: float = 0
min_lr_rate: float = 0.2
num_wave: float = 1
wd: float = 0.0005
optim_eps: float = 1e-8
optim_betas: List[float] = field(default_factory=lambda: [0.9, 0.9])
warmup_epochs: int = 1
margin_rate: List[float] = field(default_factory=lambda: [0, 1.2])
test_margin_rate: List[float] = field(default_factory=lambda: [0.6, 0.6])
mask_thres: List[float] = field(default_factory=lambda: [0.1])
# loss
loss_mil_weight: float = 4
loss_crf_weight: float = 0.5
# crf
crf_zeta: float = 0.1
crf_kernel_size: int = 3
crf_num_iter: int = 100
loss_crf_step: int = 4000
loss_mil_step: int = 1000
crf_size_ratio: int = 1
crf_value_high_thres: float = 0.9
crf_value_low_thres: float = 0.1
@dataclass
class ExperimentConfig:
"""Experiment configuration template."""
gpu_ids: List[int] = field(default_factory=lambda: [])
strategy: str = 'ddp_sharded'
num_nodes: int = 1
checkpoint: Optional[str] = None
dataset: DataConfig = DataConfig()
train: TrainConfig = TrainConfig()
model: ModelConfig = ModelConfig()
inference: InferConfig = InferConfig()
evaluate: EvalConfig = EvalConfig()
results_dir: str = MISSING
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""COCO dataset."""
from nvidia_tao_ds.auto_label.datasets.voc import InstSegVOC, BoxLabelVOC, InstSegVOCwithBoxInput
class BoxLabelCOCO(BoxLabelVOC):
"""Dataset to load COCO box labels."""
def get_category_mapping(self):
"""Category mapping."""
categories = self.coco.dataset['categories']
self.cat_mapping = {cat['id']: idx + 1 for idx, cat in enumerate(categories)}
class InstSegCOCO(InstSegVOC):
"""Dataset to load COCO instance segmentation labels."""
def get_category_mapping(self):
"""Category mapping."""
categories = self.coco.dataset['categories']
self.cat_mapping = {cat['id']: idx + 1 for idx, cat in enumerate(categories)}
class InstSegCOCOwithBoxInput(InstSegVOCwithBoxInput):
"""Dataset to load COCO labels with only box input."""
def get_category_mapping(self):
"""Category mapping."""
categories = self.coco.dataset['categories']
self.cat_mapping = {cat['id']: idx + 1 for idx, cat in enumerate(categories)}
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/datasets/coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL data module."""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/datasets/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""Data augmentation."""
import collections
from copy import deepcopy
from PIL import ImageFilter, ImageOps, Image
import numpy as np
import random
import torch
from torch.utils.data._utils.collate import default_collate
from torchvision import transforms
from nvidia_tao_ds.auto_label.datasets.voc import DataWrapper
def custom_crop_image(img, box):
"""This function aims at getting `no padding` cropped image.
Implementation Details:
If the target box goes beyond one of the borderlines,
the function will crop the content from the opposite
side of the image
Examples:
An image of HxW, if we crop the image using box
[W-10, H-10, W+10, H+10]
Top-left corner: (W-10, H-10);
Bottom-right corner: (W+10, H+10).
Motivation:
Since the CRF algorithm uses the original pixels
for generating pseudo-labels, each pixels matters a lot here.
A fact that synthetic padding pixels (mean color of ImageNet)
do sereve damage to the refined image
"""
# box [x0, y0, x1 y1] [top left x, top left y, bottom right x, bottom right y]
ret_shape = list(img.shape)
ret_shape[:2] = box[3] - box[1], box[2] - box[0]
h, w = img.shape[:2]
ret_img = np.zeros(ret_shape)
# top left
if box[0] < 0 and box[1] < 0:
ret_img[:-box[1], :-box[0]] = img[box[1]:, box[0]:]
# middle top
if (box[0] < w and box[2] > 0) and box[1] < 0:
ret_img[:-box[1], max(-box[0], 0): min(w, box[2]) - box[0]] = img[box[1]:, max(0, box[0]):min(w, box[2])]
# top right
if box[2] > w and box[1] < 0:
ret_img[:-box[1], -(box[2] - w):] = img[box[1]:, :box[2] - w]
# middle left
if box[0] < 0 and (box[1] < h and box[3] > 0):
ret_img[max(0, -box[1]): min(h, box[3]) - box[1], :-box[0]] = img[max(0, box[1]):min(h, box[3]), box[0]:]
# middle right
if box[2] > w and (box[1] < h and box[3] > 0):
ret_img[max(0, -box[1]): min(h, box[3]) - box[1], -(box[2] - w):] = img[max(0, box[1]):min(h, box[3]), :(box[2] - w)]
# bottom left
if box[0] < 0 and box[3] > h:
ret_img[-(box[3] - h):, :-box[0]] = img[:box[3] - h, box[0]:]
# middle bottom
if (box[0] < w and box[2] > 0) and box[3] > h:
ret_img[-(box[3] - h):, max(-box[0], 0): min(w, box[2]) - box[0]] = img[:box[3] - h, max(0, box[0]):min(w, box[2])]
# bottom right
if box[2] > w and box[3] > h:
ret_img[-(box[3] - h):, -(box[2] - w):] = img[:(box[3] - h), :(box[2] - w)]
# middle
ret_img[max(0, -box[1]): min(h, box[3]) - box[1], max(0, -box[0]): min(w, box[2]) - box[0]] = \
img[max(box[1], 0): min(h, box[3]), max(box[0], 0): min(w, box[2])]
return ret_img
def custom_collate_fn(batch):
"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, collections.abc.Mapping):
try:
return elem_type({key: custom_collate_fn([d[key] for d in batch]) for key in elem})
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return {key: custom_collate_fn([d[key] for d in batch]) for key in elem}
if isinstance(elem, DataWrapper):
return batch
return default_collate(batch)
class RandomCropV2:
"""RandomCropV2."""
def __init__(self, max_size=512, margin_rate=[0.05, 0.15],
mean=(0.485, 0.456, 0.406), random=True,
crop_fields=['image', 'mask']):
"""Init."""
self._max_size = max_size
self._margin_rate = np.array(margin_rate)
self._mean = np.array(mean) * 255
self._random = random
self._crop_fields = crop_fields
def _expand_box(self, box, margins):
ctr = (box[2] + box[0]) / 2, (box[3] + box[1]) / 2
box = ctr[0] - (ctr[0] - box[0]) * (1 + margins[0]), \
ctr[1] - (ctr[1] - box[1]) * (1 + margins[1]), \
ctr[0] + (box[2] - ctr[0]) * (1 + margins[2]) + 1, \
ctr[1] + (box[3] - ctr[1]) * (1 + margins[3]) + 1
return box
def __call__(self, data):
"""Call."""
# obtain more info
img = np.array(data['image'])
box = np.array(data['bbox'])
h, w = img.shape[0], img.shape[1]
if self._random:
margins = np.random.rand(4) * (self._margin_rate[1] - self._margin_rate[0]) + self._margin_rate[0]
gates = np.random.rand(2)
gates = np.array([gates[0], gates[1], 1 - gates[0], 1 - gates[1]])
margins = margins * gates
extbox = self._expand_box(box, margins)
extbox = np.array([np.floor(extbox[0]), np.floor(extbox[1]), np.ceil(extbox[2]), np.ceil(extbox[3])]).astype(np.int)
ext_h, ext_w = extbox[3] - extbox[1], extbox[2] - extbox[0]
else:
margins = np.ones(4) * self._margin_rate[0] * 0.5
extbox = self._expand_box(box, margins)
extbox = np.array([np.floor(extbox[0]), np.floor(extbox[1]), np.ceil(extbox[2]), np.ceil(extbox[3])]).astype(np.int)
ext_h, ext_w = extbox[3] - extbox[1], extbox[2] - extbox[0]
# extended box size
data['ext_h'], data['ext_w'] = ext_h, ext_w
# crop image
if 'image' in self._crop_fields:
ret_img = custom_crop_image(img, extbox)
ret_img = Image.fromarray(ret_img.astype(np.uint8)).resize((self._max_size, self._max_size))
data['image'] = ret_img
# crop mask
if 'mask' in self._crop_fields and 'mask' in data.keys():
mask = np.array(data['mask'])
ret_mask = custom_crop_image(mask, extbox)
ret_mask = Image.fromarray(ret_mask.astype(np.uint8)).resize((self._max_size, self._max_size))
ret_mask = np.array(ret_mask)
data['mask'] = ret_mask
# crop box mask (during test)
if 'boxmask' in self._crop_fields:
boxmask = data['boxmask']
ret_boxmask = np.zeros((ext_h, ext_w))
ret_boxmask[max(0 - extbox[1], 0):ext_h + min(0, h - extbox[3]), max(0 - extbox[0], 0):ext_w + min(0, w - extbox[2])] = \
boxmask[max(extbox[1], 0):min(extbox[3], h), max(extbox[0], 0):min(extbox[2], w)]
ret_boxmask = np.array(Image.fromarray(ret_boxmask.astype(np.uint8)).resize((self._max_size, self._max_size)))
data['boxmask'] = ret_boxmask
data['ext_boxes'] = extbox
data['margins'] = margins
return data
class RandomCropV3(RandomCropV2):
"""RandomCropV3."""
def __call__(self, data):
"""Call."""
# obtain more info
img = np.array(data['image'])
box = np.array(data['bbox'])
h, w = img.shape[0], img.shape[1]
if self._random:
margins = np.random.rand(4) * (self._margin_rate[1] - self._margin_rate[0]) + self._margin_rate[0]
gates = np.random.rand(2)
gates = np.array([gates[0], gates[1], 1 - gates[0], 1 - gates[1]])
margins = margins * gates
extbox = self._expand_box(box, margins)
extbox = np.array([np.floor(extbox[0]), np.floor(extbox[1]), np.ceil(extbox[2]), np.ceil(extbox[3])]).astype(np.int)
ext_h, ext_w = extbox[3] - extbox[1], extbox[2] - extbox[0]
else:
margins = np.ones(4) * self._margin_rate[0] * 0.5
extbox = self._expand_box(box, margins)
extbox = np.array([np.floor(extbox[0]), np.floor(extbox[1]), np.ceil(extbox[2]), np.ceil(extbox[3])]).astype(np.int)
ext_h, ext_w = extbox[3] - extbox[1], extbox[2] - extbox[0]
# extended box size
data['ext_h'], data['ext_w'] = ext_h, ext_w
# crop image
if 'image' in self._crop_fields:
ret_img = custom_crop_image(img, extbox)
ret_img = Image.fromarray(ret_img.astype(np.uint8)).resize((self._max_size, self._max_size))
data['image'] = ret_img
# crop mask
if 'mask' in self._crop_fields:
mask = np.array(data['mask'])
ret_mask = custom_crop_image(mask, extbox)
ret_mask = Image.fromarray(ret_mask.astype(np.uint8)).resize((self._max_size, self._max_size))
ret_mask = np.array(ret_mask)
data['mask'] = ret_mask
# crop box mask (during test)
if 'boxmask' in self._crop_fields:
boxmask = data['boxmask']
ret_boxmask = np.zeros((ext_h, ext_w))
ret_boxmask[max(0 - extbox[1], 0):ext_h + min(0, h - extbox[3]), max(0 - extbox[0], 0):ext_w + min(0, w - extbox[2])] = \
boxmask[max(extbox[1], 0):min(extbox[3], h), max(extbox[0], 0):min(extbox[2], w)]
ret_boxmask = np.array(Image.fromarray(ret_boxmask.astype(np.uint8)).resize((self._max_size, self._max_size)))
data['boxmask'] = ret_boxmask
data['ext_boxes'] = extbox
data['margins'] = margins
return data
class RandomFlip:
"""Random Flip."""
def __init__(self, p=0.5):
"""Init."""
self.p = p
def __call__(self, x):
"""Call."""
if 'aug_images' in x.keys():
x['flip_records'] = []
for idx in range(len(x['aug_images'])):
x['flip_records'].append([])
for jdx in range(len(x['aug_images'][idx])):
if float(torch.rand(1)) > self.p:
x['aug_images'][idx][jdx] = ImageOps.mirror(x['aug_images'][idx][jdx])
x['flip_records'][idx].append(1)
else:
x['flip_records'][idx].append(0)
elif 'image' in x.keys():
if float(torch.rand(1)) > self.p:
x['flip_records'] = 1
x['image'] = ImageOps.mirror(x['image'])
x['mask'] = x['mask'][:, ::-1]
else:
x['flip_records'] = 0
else:
raise NotImplementedError
return x
class Normalize(transforms.Normalize):
"""Normalize image in dictionary."""
def forward(self, tensor):
"""Forward."""
if 'image' in tensor.keys():
tensor['image'] = super().forward(tensor['image'])
if 'timage' in tensor.keys():
tensor['timage'] = super().forward(tensor['timage'])
elif 'aug_images' in tensor.keys():
for idx in range(len(tensor['aug_images'])):
for jdx in range(len(tensor['aug_images'][idx])):
tensor['aug_images'][idx][jdx] = super().forward(tensor['aug_images'][idx][jdx])
else:
raise NotImplementedError
return tensor
class Denormalize:
"""Denormalize image."""
def __init__(self, mean, std, inplace=False):
"""Init."""
self._mean = mean
self._std = std
self._inplace = inplace
def __call__(self, img):
"""Call."""
img = (img * self._std + self._mean) * 255
return img
class ToTensor(transforms.ToTensor):
"""Dictioinary data to Tensor."""
def __call__(self, data):
"""Call."""
if 'image' in data.keys():
if isinstance(data['image'], (list, tuple)):
img_list = []
for img in data['image']:
img_list.append(super().__call__(img))
data['image'] = torch.cat(img_list)
else:
data['image'] = super().__call__(data['image'])
if 'flip_records' in data.keys():
data['flip_records'] = torch.tensor([data['flip_records']])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = super().__call__(data['aug_images'][idx][jdx])
data['aug_ranges'][idx][jdx] = torch.tensor(data['aug_ranges'][idx][jdx])
if 'flip_records' in data.keys():
data['flip_records'][idx] = torch.tensor(data['flip_records'][idx])
else:
raise NotImplementedError
if 'timage' in data.keys():
if isinstance(data['timage'], (list, tuple)):
img_list = []
for img in data['timage']:
img_list.append(super().__call__(img))
data['timage'] = torch.cat(img_list)
else:
data['timage'] = super().__call__(data['timage'])
if 'mask' in data.keys():
if isinstance(data['mask'], (list, tuple)):
mask_list = []
for mask in data['mask']:
mask_list.append(torch.tensor(mask, dtype=torch.float)[None, ...])
data['mask'] = torch.cat(mask_list)
else:
data['mask'] = torch.tensor(data['mask'], dtype=torch.float)[None, ...]
if 'boxmask' in data.keys():
if isinstance(data['boxmask'], (list, tuple)):
mask_list = []
for mask in data['boxmask']:
mask_list.append(torch.tensor(mask, dtype=torch.float)[None, ...])
data['boxmask'] = torch.cat(mask_list)
else:
data['boxmask'] = torch.tensor(data['boxmask'], dtype=torch.float)[None, ...]
if 'ann' in data.keys():
data['ann'] = torch.tensor(data['ann'])
return data
class ColorJitter(transforms.ColorJitter):
"""Color Jitter."""
def single_forward(self, img):
"""Single forward."""
if isinstance(img, list):
return [self.single_forward(_img) for _img in img]
return super().forward(img)
def forward(self, img):
"""Forward."""
if 'image' in img.keys():
img['image'] = self.single_forward(img['image'])
elif 'aug_images' in img.keys():
for idx in range(len(img['aug_images'])):
for jdx in range(len(img['aug_images'][idx])):
img['aug_images'][idx][jdx] = super().forward(img['aug_images'][idx][jdx])
else:
raise NotImplementedError
return img
class RandomGrayscale(transforms.RandomGrayscale):
"""Random Grayscale."""
def single_forward(self, img):
"""Single forward."""
if isinstance(img, list):
return [self.single_forward(_img) for _img in img]
return super().forward(img)
def forward(self, img):
"""Forward."""
if 'image' in img.keys():
img['image'] = self.single_forward(img['image'])
elif 'aug_images' in img.keys():
for idx in range(len(img['aug_images'])):
for jdx in range(len(img['aug_images'][idx])):
img['aug_images'][idx][jdx] = super().forward(img['aug_images'][idx][jdx])
else:
raise NotImplementedError
return img
class GaussianBlur(object):
"""Apply Gaussian Blur to the PIL image."""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
"""Init."""
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def single_forward(self, img):
"""Single forward."""
if isinstance(img, list):
return [self.single_forward(img_) for img_ in img]
do_it = random.random() <= self.prob
if not do_it:
return img
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
def __call__(self, data):
"""Call."""
if 'image' in data.keys():
data['image'] = self.single_forward(data['image'])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = self.single_forward(data['aug_images'][idx][jdx])
else:
raise NotImplementedError
return data
class DropAllExcept:
"""Drop all except keys to keep."""
def __init__(self, keep_keys):
"""Init."""
self.keep_keys = keep_keys
def __call__(self, data):
"""Call."""
data_keys = list(data.keys())
for key in data_keys:
if key not in self.keep_keys:
del data[key]
return data
class ChangeNames:
"""Change names."""
def __init__(self, kv_dic):
"""Init."""
self.kv_dic = kv_dic
def __call__(self, data):
"""Call."""
data_keys = list(data.keys())
for key, value in self.kv_dic.items():
if key in data_keys:
data[value] = data[key]
del data[key]
return data
class Solarization:
"""Apply Solarization to the PIL image."""
def __init__(self, p):
"""Init."""
self.p = p
def single_forward(self, img):
"""Single forward."""
if isinstance(img, list):
return [self.single_forward(img_) for img_ in img]
if random.random() < self.p:
return ImageOps.solarize(img)
return img
def __call__(self, data):
"""Call."""
if 'image' in data.keys():
data['image'] = self.single_forward(data['image'])
elif 'aug_images' in data.keys():
for idx in range(len(data['aug_images'])):
for jdx in range(len(data['aug_images'][idx])):
data['aug_images'][idx][jdx] = self.single_forward(data['aug_images'][idx][jdx])
else:
raise NotImplementedError
return data
class ImageSizeAlignment:
"""Image Size Alignment."""
def __init__(self, max_size, mean, random_offset=False):
"""Init."""
self._max_size = max_size
self._mean = (np.array(mean) * 255).astype(np.uint8)
self._random_offset = random_offset
def __call__(self, data):
"""Call."""
assert 'image' in data.keys()
padded_image = np.ones((self._max_size, self._max_size, 3), dtype=np.uint8) * self._mean
image = np.array(data['image'])
h, w = image.shape[0], image.shape[1]
if self._random_offset:
offy, offx = torch.randint(0, self._max_size - h + 1, (1,)), torch.randint(0, self._max_size - w + 1, (1,))
else:
offy, offx = 0, 0
padded_image[offy:offy + h, offx:offx + w] = image
data['image'] = Image.fromarray(padded_image)
if 'mask' in data.keys():
padded_mask = np.ones((self._max_size, self._max_size))
padded_mask[offy:offy + h, offx:offx + w] = np.array(data['mask'])
data['mask'] = Image.fromarray(padded_mask)
return data
class RandomScale:
"""Random Scale."""
def __init__(self, min_size, max_size, mean=(0.485, 0.456, 0.406)):
"""Init."""
self._min_size = min_size
self._max_size = max_size
self._mean = mean
def __call__(self, data):
"""Call."""
if 'image' in data.keys():
for i in range(len(data['image'])):
img = np.array(data['image'][i])
w, h = img.shape[:2]
mask = data['mask'][i]
rw, rh = torch.randint(self._min_size, self._max_size + 1, (2, ))
offw, offh = torch.randint(0, w - rw + 1, (1, )), torch.randint(0, h - rh + 1, (1, ))
ret_img = (np.ones(img.shape) * np.array(self._mean)).astype(img.dtype)
ret_mask = np.zeros(mask.shape, img.dtype)
img = np.array(Image.fromarray(img).resize((rw, rh)))
mask = np.array(Image.fromarray(mask).resize((rw, rh)))
ret_img[offh: offh + rh, offw: offw + rw] = img
ret_mask[offh: offh + rh, offw: offw + rw] = mask
data['image'][i] = Image.fromarray(ret_img)
data['mask'][i] = ret_mask
else:
raise NotImplementedError
return data
class SplitAndMerge:
"""Split and Merge."""
def __init__(self, branch1, branch2):
"""Init."""
self.branch1 = branch1
self.branch2 = branch2
def __call__(self, data):
"""Call."""
data_clone = deepcopy(data)
data1 = self.branch1(data_clone)
data_clone = deepcopy(data)
data2 = self.branch2(data_clone)
data1.update(data2)
return data1
data_aug_pipelines = {
'test': lambda cfg: transforms.Compose([
RandomCropV2(cfg.dataset.crop_size,
margin_rate=cfg.train.test_margin_rate,
random=False,
crop_fields=['image', 'boxmask', 'mask']),
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
]),
'train': lambda cfg: transforms.Compose([
RandomCropV3(cfg.dataset.crop_size, margin_rate=cfg.train.margin_rate),
RandomFlip(0.5),
SplitAndMerge(
transforms.Compose([
transforms.RandomApply(
[ColorJitter(brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1)],
p=0.5
),
RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur(1.0)], p=0.5)
]),
transforms.Compose([
DropAllExcept(['image']),
ChangeNames({'image': 'timage'})
])
),
ToTensor(),
Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
]),
}
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/datasets/data_aug.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""Custom LightningDataModule for MAL."""
import logging
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from nvidia_tao_ds.auto_label.datasets.coco import BoxLabelCOCO, InstSegCOCO, InstSegCOCOwithBoxInput
from nvidia_tao_ds.auto_label.datasets.data_aug import data_aug_pipelines, custom_collate_fn
logger = logging.getLogger(__name__)
class WSISDataModule(pl.LightningDataModule):
"""Weakly supervised instance segmentation data module."""
def __init__(self,
num_workers,
load_train=False,
load_val=False,
cfg=None):
"""Initialize train/val dataset and dataloader."""
super().__init__()
self.cfg = cfg
self.num_workers = num_workers
self.train_transform = data_aug_pipelines['train'](cfg)
self.test_transform = data_aug_pipelines['test'](cfg)
assert self.cfg.dataset.type == 'coco', 'only COCO format is supported.'
self._train_data_loader = None
self._val_data_loader = None
self.box_inputs = None
if load_train:
logger.info("Loading train set...")
dataset = BoxLabelCOCO(
self.cfg.dataset.train_ann_path,
self.cfg.dataset.train_img_dir,
min_obj_size=self.cfg.dataset.min_obj_size,
max_obj_size=self.cfg.dataset.max_obj_size,
transform=self.train_transform, cfg=cfg)
data_loader = DataLoader(
dataset, batch_size=self.cfg.train.batch_size, shuffle=True,
num_workers=self.num_workers)
self._train_data_loader = data_loader
logger.info("Train set is loaded successfully.")
if load_val:
logger.info("Loading validation set...")
build_dataset = InstSegCOCOwithBoxInput if self.box_inputs else InstSegCOCO
dataset = build_dataset(
self.cfg.dataset.val_ann_path,
self.cfg.dataset.val_img_dir,
min_obj_size=0,
max_obj_size=1e9,
load_mask=self.cfg.dataset.load_mask,
transform=self.test_transform,
box_inputs=self.box_inputs
)
data_loader = DataLoader(
dataset, collate_fn=custom_collate_fn,
batch_size=self.cfg.train.batch_size, num_workers=self.num_workers)
self._val_data_loader = data_loader
logger.info("Validation set is loaded successfully.")
def train_dataloader(self):
"""Set train dataloader."""
return self._train_data_loader
def val_dataloader(self):
"""Set val dataloader."""
return self._val_data_loader
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/datasets/pl_data_module.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""VOC dataset."""
import os
import logging
import json
import numpy as np
from PIL import Image
from pycocotools.coco import COCO
import pycocotools.mask as maskUtils
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
class DataWrapper:
"""Simple data wrapper."""
def __init__(self, data):
"""Init."""
self.data = data
class BoxLabelVOC(Dataset):
"""Base class for loading COCO format labels."""
def __init__(self, ann_path, img_data_dir,
min_obj_size=0, max_obj_size=1e10,
transform=None, cfg=None,
**kwargs):
"""Initialize dataset.
Args:
ann_path (str): annotation file in json format
img_data_dir (str): raw image directory
min_obj_size (float): min object size
max_obj_size (float): max object size
transform (transform.Compose): data augmentation methods
cfg (Hydra config): Hydra configurations
"""
self.cfg = cfg
self.ann_path = ann_path
self.img_data_dir = img_data_dir
self.min_obj_size = min_obj_size
self.max_obj_size = max_obj_size
self.transform = transform
self.coco = COCO(ann_path)
self._filter_imgs()
self.get_category_mapping()
def get_category_mapping(self):
"""Map category index in json to 1 based index."""
self.cat_mapping = dict([i, i] for i in range(1, 21))
def _filter_imgs(self):
"""Filter out bboxes based on area and H/W range."""
anns = self.coco.dataset['annotations']
filtered_anns = []
for ann in anns:
# query image info
image_info = self.coco.loadImgs(ann['image_id'])[0]
# check if bbox is out of bound
is_correct_bbox = ann['bbox'][0] >= 0 and ann['bbox'][1] >= 0 and \
(ann['bbox'][0] + ann['bbox'][2]) <= image_info['width'] and \
(ann['bbox'][1] + ann['bbox'][3]) <= image_info['height']
area = ann['bbox'][2] * ann['bbox'][3]
# check if bbox area is within range
is_correct_area = self.max_obj_size > area > self.min_obj_size
# additionally, check bbox w/h > 2
if is_correct_bbox and is_correct_area and ann['bbox'][2] > 2 and ann['bbox'][3] > 2:
filtered_anns.append(ann)
self.coco.dataset['annotations'] = filtered_anns
num_filtered = len(self.coco.dataset['annotations']) - len(filtered_anns)
if num_filtered > 0:
print("***********************************")
print(f"WARNING: {num_filtered} bboxes were filtered out.")
print("***********************************")
def __len__(self):
"""Total number of bboxes."""
return len(self.coco.getAnnIds())
def __getitem__(self, idx):
"""Per item."""
ann = self.coco.dataset['annotations'][idx]
img_info = self.coco.loadImgs(ann['image_id'])[0]
h, w, file_name = img_info['height'], img_info['width'], img_info['file_name']
img = self.get_image(file_name)
# box mask
mask = np.zeros((h, w))
bbox = ann['bbox']
x0, y0, x1, y1 = int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])
mask[y0:y1 + 1, x0:x1 + 1] = 1
data = {
'image': img, 'mask': mask,
'height': h, 'width': w,
'category_id': ann['category_id'],
'bbox': np.array([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], dtype=np.float32),
'compact_category_id': self.cat_mapping[int(ann['category_id'])],
'id': ann['id']
}
if self.transform is not None:
data = self.transform(data)
return data
def get_image(self, file_name):
"""Load image.
Args:
file_name (str): relative path to an image file.
Return:
image (PIL image): loaded image
"""
image = Image.open(os.path.join(self.img_data_dir, file_name)).convert('RGB')
return image
class InstSegVOC(BoxLabelVOC):
"""Class for loading COCO format labels with instance segmentation masks."""
def __init__(self, *args, load_mask=True, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.load_mask = load_mask
if load_mask:
for ann in self.coco.dataset['annotations']:
if not ann.get('segmentation', None):
raise ValueError(
"Please check your annotation file, "
"as not all annotations contain segmentation info. "
"Or set load_mask to False.")
self.get_category_mapping()
def __getitem__(self, idx):
"""Per item."""
ann = self.coco.dataset['annotations'][idx]
img_info = self.coco.loadImgs(ann['image_id'])[0]
h, w, file_name = img_info['height'], img_info['width'], img_info['file_name']
img = self.get_image(file_name)
# box mask
boxmask = np.zeros((h, w))
bbox = ann['bbox']
x0, y0, x1, y1 = int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])
boxmask[y0:y1 + 1, x0:x1 + 1] = 1
data = {'image': img, 'boxmask': boxmask,
'height': h, 'width': w,
'category_id': ann['category_id'],
'bbox': np.array(
[bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], dtype=np.float32),
'compact_category_id': self.cat_mapping[int(ann['category_id'])],
'id': ann['id'],
'image_id': ann['image_id']}
if self.load_mask:
# mask = np.ascontiguousarray(
# maskUtils.decode(maskUtils.frPyObjects(ann['segmentation'], h, w)))
# polygons
if isinstance(ann['segmentation'], list):
rles = maskUtils.frPyObjects(ann['segmentation'], h, w)
rle = maskUtils.merge(rles)
elif 'counts' in ann['segmentation']:
# e.g. {'counts': [6, 1, 40, 4, 5, 4, 5, 4, 21], 'size': [9, 10]}
if isinstance(ann['segmentation']['counts'], list):
rle = maskUtils.frPyObjects(ann['segmentation'], h, w)
else:
rle = ann['segmentation']
else:
raise ValueError('Please check the segmentation format.')
mask = np.ascontiguousarray(maskUtils.decode(rle))
if len(mask.shape) > 2:
mask = mask.transpose((2, 0, 1)).sum(0) > 0
mask = mask.astype(np.uint8)
data['gtmask'] = DataWrapper(mask)
data['mask'] = mask
if self.transform is not None:
data = self.transform(data)
return data
class InstSegVOCwithBoxInput(InstSegVOC):
"""Class for loading bbox inputs with instance segmentation masks."""
def __init__(self,
ann_path,
img_data_dir,
min_obj_size=0,
max_obj_size=1e10,
transform=None,
load_mask=True,
box_inputs=None):
"""Init."""
self.load_mask = load_mask
self.ann_path = ann_path
self.img_data_dir = img_data_dir
self.min_obj_size = min_obj_size
self.max_obj_size = max_obj_size
self.transform = transform
self.coco = COCO(ann_path)
self._filter_imgs()
self.get_category_mapping()
with open(box_inputs, "r", encoding='utf-8') as f:
self.val_coco = json.load(f)
def __len__(self):
"""Number of samples."""
return len(self.val_coco)
def __getitem__(self, idx):
"""Per item."""
ann = self.val_coco[idx]
img_info = self.coco.loadImgs(ann['image_id'])[0]
h, w, file_name = img_info['height'], img_info['width'], img_info['file_name']
img = self.get_image(file_name)
# box mask
boxmask = np.zeros((h, w))
bbox = np.array(ann['bbox'])
x0, y0, x1, y1 = int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])
boxmask[y0:y1 + 1, x0:x1 + 1] = 1
if 'id' not in ann.keys():
_id = hash(str(ann['image_id']) + ' ' + str(x0) + ' ' + str(x1) + ' ' + str(y0) + ' ' + str(y1))
else:
_id = ann['id']
data = {'image': img, 'boxmask': boxmask,
'height': h, 'width': w,
'category_id': ann['category_id'],
'bbox': np.array(
[bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], dtype=np.float32),
'compact_category_id': self.cat_mapping[int(ann['category_id'])],
'id': _id,
'image_id': ann['image_id'],
'score': ann['score']}
if self.load_mask:
mask = np.ascontiguousarray(maskUtils.decode(ann['segmentation']))
if len(mask.shape) > 2:
mask = mask.transpose((2, 0, 1)).sum(0) > 0
mask = mask.astype(np.uint8)
data['gtmask'] = DataWrapper(mask)
data['mask'] = mask
if self.transform is not None:
data = self.transform(data)
return data
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/datasets/voc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL utils module."""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for configuration."""
def update_config(cfg):
"""Update Hydra config."""
# mask threshold
if len(cfg.train.mask_thres) == 1:
# this means to repeat the same threshold three times
# all scale objects are sharing the same threshold
cfg.train.mask_thres = [cfg.train.mask_thres[0] for _ in range(3)]
assert len(cfg.train.mask_thres) == 3
# frozen_stages
if len(cfg.model.frozen_stages) == 1:
cfg.model.frozen_stages = [0, cfg.model.frozen_stages[0]]
assert len(cfg.model.frozen_stages) == 2
assert len(cfg.train.margin_rate) == 2
return cfg
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/utils/config_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LR scheduler utils."""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/utils/lr_schedulers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""Cosine learning rate scheduler."""
import math
def adjust_learning_rate(optimizer, epoch, cfg):
"""Decay the learning rate with half-cycle cosine after warmup."""
if epoch < cfg.train.warmup_epochs:
lr = cfg.train.lr * (epoch / cfg.train.warmup_epochs)
else:
lr = cfg.train.min_lr + (cfg.train.lr - cfg.train.min_lr) * 0.5 * \
(1. + math.cos(
math.pi * (epoch - cfg.train.warmup_epochs) /
(cfg.train.num_epochs - cfg.train.warmup_epochs) * cfg.train.num_wave))
for param_group in optimizer.param_groups:
if "lr_scale" in param_group:
param_group["lr"] = lr * param_group["lr_scale"]
else:
param_group["lr"] = lr
return lr
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/utils/lr_schedulers/cosine_lr.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimizer utils."""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/utils/optimizers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""AdamW optimizer with step."""
from torch.optim import AdamW
class AdamWwStep(AdamW):
"""AdamW optimizer with step."""
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
for param_group in self.param_groups:
param_group['step'] = 0
param_group['epoch'] = 0
def step(self, closure=None):
"""Step."""
super().step(closure)
for param_group in self.param_groups:
param_group['step'] = param_group['step'] + 1
def next_epoch(self):
"""Next epoch."""
for param_group in self.param_groups:
param_group['epoch'] += 1
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/utils/optimizers/adamw.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL model module."""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""MAL model."""
import itertools
import json
import os
import cv2
import numpy as np
from pycocotools.coco import COCO
from pycocotools.mask import encode
from pycocotools.cocoeval import COCOeval
from mmcv.cnn import ConvModule
import torchmetrics
import pytorch_lightning as pl
from fairscale.nn import auto_wrap
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
from nvidia_tao_ds.auto_label.datasets.data_aug import Denormalize
from nvidia_tao_ds.auto_label.models import vit_builder
from nvidia_tao_ds.auto_label.utils.optimizers.adamw import AdamWwStep
from nvidia_tao_ds.auto_label.utils.lr_schedulers.cosine_lr import adjust_learning_rate
class MeanField(nn.Module):
"""Mean Field approximation to refine mask."""
def __init__(self, cfg=None):
"""Init."""
super().__init__()
self.kernel_size = cfg.train.crf_kernel_size
assert self.kernel_size % 2 == 1
self.zeta = cfg.train.crf_zeta
self.num_iter = cfg.train.crf_num_iter
self.high_thres = cfg.train.crf_value_high_thres
self.low_thres = cfg.train.crf_value_low_thres
self.cfg = cfg
def trunc(self, seg):
"""Clamp mask values by crf_value_(low/high)_thres."""
seg = torch.clamp(seg, min=self.low_thres, max=self.high_thres)
return seg
@torch.no_grad()
def forward(self, feature_map, seg, targets=None):
"""Forward pass with num_iter."""
feature_map = feature_map.float()
kernel_size = self.kernel_size
B, H, W = seg.shape
C = feature_map.shape[1]
self.unfold = torch.nn.Unfold(kernel_size, stride=1, padding=self.kernel_size // 2)
# feature_map [B, C, H, W]
feature_map = feature_map + 10
# unfold_feature_map [B, C, kernel_size ** 2, H*W]
unfold_feature_map = self.unfold(feature_map).reshape(B, C, kernel_size**2, H * W)
# B, kernel_size**2, H*W
kernel = torch.exp(-(((unfold_feature_map - feature_map.reshape(B, C, 1, H * W)) ** 2) / (2 * self.zeta ** 2)).sum(1))
if targets is not None:
t = targets.reshape(B, H, W)
seg = seg * t
else:
t = None
seg = self.trunc(seg)
for it in range(self.num_iter):
seg = self.single_forward(seg, kernel, t, B, H, W, it)
return (seg > 0.5).float()
def single_forward(self, x, kernel, targets, B, H, W, it):
"""Forward pass."""
x = x[:, None]
# x [B 2 H W]
B, _, H, W = x.shape
x = torch.cat([1 - x, x], 1)
kernel_size = self.kernel_size
# unfold_x [B, 2, kernel_size**2, H * W]
# kernel [B, kennel_size**2, H * W]
unfold_x = self.unfold(-torch.log(x)).reshape(B, 2, kernel_size ** 2, H * W)
# aggre, x [B, 2, H * W]
aggre = (unfold_x * kernel[:, None]).sum(2)
aggre = torch.exp(-aggre)
if targets is not None:
aggre[:, 1:] = aggre[:, 1:] * targets.reshape(B, 1, H * W)
out = aggre
out = out / (1e-6 + out.sum(1, keepdim=True))
out = self.trunc(out)
return out[:, 1].reshape(B, H, W)
class MaskHead(nn.Module):
"""Mask Head."""
def __init__(self, in_channels=2048, cfg=None):
"""Init."""
super().__init__()
self.num_convs = cfg.model.mask_head_num_convs
self.in_channels = in_channels
self.mask_head_hidden_channel = cfg.model.mask_head_hidden_channel
self.mask_head_out_channel = cfg.model.mask_head_out_channel
self.mask_scale_ratio = cfg.model.mask_scale_ratio
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else self.mask_head_hidden_channel
out_channels = self.mask_head_hidden_channel if i < self.num_convs - 1 else self.mask_head_out_channel
self.convs.append(ConvModule(in_channels, out_channels, 3, padding=1))
def forward(self, x):
"""Forward pass."""
for idx, conv in enumerate(self.convs):
if idx == 3:
h, w = x.shape[2:]
th, tw = int(h * self.mask_scale_ratio), int(w * self.mask_scale_ratio)
x = F.interpolate(x, (th, tw), mode='bilinear', align_corners=False)
x = conv(x)
return x
class RoIHead(nn.Module):
"""RoI Head."""
def __init__(self, in_channels=2048, cfg=None):
"""Init."""
super().__init__()
self.mlp1 = nn.Linear(in_channels, cfg.model.mask_head_out_channel)
self.relu = nn.ReLU()
self.mlp2 = nn.Linear(cfg.model.mask_head_out_channel, cfg.model.mask_head_out_channel)
def forward(self, x, boxmask=None):
"""Forward pass."""
x = x.mean((2, 3))
x = self.mlp2(self.relu(self.mlp1(x)))
return x
class MALStudentNetwork(pl.LightningModule):
"""MAL student model."""
def __init__(self, in_channels=2048, cfg=None):
"""Init."""
super().__init__()
self.cfg = cfg
self.backbone = vit_builder.build_model(cfg=cfg)
# Load pretrained weights
if cfg.checkpoint:
print('Loading pretrained weights.....')
state_dict = torch.load(cfg.checkpoint)
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
if 'model' in state_dict.keys():
state_dict = state_dict['model']
self.backbone.load_state_dict(state_dict, strict=False)
# K head
self.roi_head = RoIHead(in_channels, cfg=cfg)
# V head
self.mask_head = MaskHead(in_channels, cfg=cfg)
# make student sharded on multiple gpus
self.configure_sharded_model()
def configure_sharded_model(self):
"""Sharded backbone."""
self.backbone = auto_wrap(self.backbone)
def forward(self, x, boxmask, bboxes):
"""Forward pass."""
if self.cfg.train.use_amp:
x = x.half()
feat = self.backbone.base_forward(x)
spatial_feat_ori = self.backbone.get_spatial_feat(feat)
h, w = spatial_feat_ori.shape[2:]
mask_scale_ratio_pre = int(self.cfg.model.mask_scale_ratio_pre)
if not self.cfg.model.not_adjust_scale:
spatial_feat_list = []
masking_list = []
areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0])
for idx, (scale_low, scale_high) in enumerate([(0, 32**2), (32**2, 96**2), (96**2, 1e5**2)]):
masking = (areas < scale_high) * (areas > scale_low)
if masking.sum() > 0:
spatial_feat = F.interpolate(
spatial_feat_ori[masking],
size=(int(h * 2**(idx - 1)), int(w * 2**(idx - 1))),
mode='bilinear', align_corners=False)
boxmask = None
else:
spatial_feat = None
boxmask = None
spatial_feat_list.append(spatial_feat)
masking_list.append(masking)
roi_feat = self.roi_head(spatial_feat_ori)
n, maxh, maxw = roi_feat.shape[0], h * 4, w * 4
seg_all = torch.zeros(n, 1, maxh, maxw).to(roi_feat)
for idx, (spatial_feat, masking) in enumerate(zip(spatial_feat_list, masking_list)):
if masking.sum() > 0:
mn = masking.sum()
mh, mw = int(h * mask_scale_ratio_pre * 2**(idx - 1)), int(w * mask_scale_ratio_pre * 2**(idx - 1))
seg_feat = self.mask_head(spatial_feat)
c = seg_feat.shape[1]
masked_roi_feat = roi_feat[masking]
seg = (masked_roi_feat[:, None, :] @ seg_feat.reshape(mn, c, mh * mw * 4)).reshape(mn, 1, mh * 2, mw * 2)
seg = F.interpolate(seg, size=(maxh, maxw), mode='bilinear', align_corners=False)
seg_all[masking] = seg
ret_vals = {'feat': feat, 'seg': seg_all, 'spatial_feat': spatial_feat_ori, 'masking_list': masking_list}
else:
spatial_feat = F.interpolate(
spatial_feat_ori, size=(int(h * mask_scale_ratio_pre), int(w * mask_scale_ratio_pre)),
mode='bilinear', align_corners=False)
boxmask = F.interpolate(boxmask, size=spatial_feat.shape[2:], mode='bilinear', align_corners=False)
seg_feat = self.mask_head(spatial_feat)
roi_feat = self.roi_head(spatial_feat_ori, boxmask)
n, c, h, w = seg_feat.shape
seg = (roi_feat[:, None, :] @ seg_feat.reshape(n, c, h * w)).reshape(n, 1, h, w)
seg = F.interpolate(seg, (h * 4, w * 4), mode='bilinear', align_corners=False)
ret_vals = {'feat': feat, 'seg': seg, 'spatial_feat': spatial_feat_ori}
return ret_vals
class MALTeacherNetwork(MALStudentNetwork):
"""MAL teacher model."""
def __init__(self, in_channels, cfg=None):
"""Init."""
super().__init__(in_channels, cfg=cfg)
self.eval()
self.momentum = cfg.model.teacher_momentum
@torch.no_grad()
def update(self, student):
"""Update EMA teacher model."""
for param_student, param_teacher in zip(student.parameters(), self.parameters()):
param_teacher.data = param_teacher.data * self.momentum + param_student.data * (1 - self.momentum)
class MIoUMetrics(torchmetrics.Metric):
"""MIoU Metrics."""
def __init__(self, dist_sync_on_step=True, num_classes=20):
"""Init."""
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("cnt", default=torch.zeros(num_classes), dist_reduce_fx="sum")
self.add_state("total", default=torch.zeros(num_classes), dist_reduce_fx="sum")
def update(self, label, iou):
"""Update."""
self.cnt[label - 1] += 1
self.total[label - 1] += iou
def update_with_ious(self, labels, ious):
"""Update with IOUs."""
for iou, label in zip(ious, labels):
self.cnt[label - 1] += 1
self.total[label - 1] += float(iou)
return ious
def cal_intersection(self, seg, gt):
"""Calcuate mask intersection."""
B = seg.shape[0]
inter_cnt = (seg * gt).reshape(B, -1).sum(1)
return inter_cnt
def cal_union(self, seg, gt, inter_cnt=None):
"""Calculate mask union."""
B = seg.shape[0]
if inter_cnt is None:
inter_cnt = self.cal_intersection(seg, gt)
union_cnt = seg.reshape(B, -1).sum(1) + gt.reshape(B, -1).sum(1) - inter_cnt
return union_cnt
def cal_iou(self, seg, gt):
"""Calculate mask IOU."""
inter_cnt = self.cal_intersection(seg, gt)
union_cnt = self.cal_union(seg, gt, inter_cnt)
return 1.0 * inter_cnt / (union_cnt + 1e-6)
def compute(self):
"""Compute mIOU."""
mIoUs = self.total / (1e-6 + self.cnt)
mIoU = mIoUs.sum() / (self.cnt > 0).sum()
return mIoU
def compute_with_ids(self, ids=None):
"""Compute mIOU with IDs."""
if ids is not None:
total = self.total[torch.tensor(np.array(ids)).long()]
cnt = self.cnt[torch.tensor(np.array(ids)).long()]
else:
total = self.total
cnt = self.cnt
mIoUs = total / (1e-6 + cnt)
mIoU = mIoUs.sum() / (cnt > 0).sum()
return mIoU
class MAL(pl.LightningModule):
"""Base MAL model."""
def __init__(self, cfg=None, num_iter_per_epoch=None, categories=None):
"""Init."""
super().__init__()
# loss term hyper parameters
self.num_convs = cfg.model.mask_head_num_convs
self.loss_mil_weight = cfg.train.loss_mil_weight
self.loss_crf_weight = cfg.train.loss_crf_weight
self.loss_crf_step = cfg.train.loss_crf_step
self.cfg = cfg
self.mask_thres = cfg.train.mask_thres
self.num_classes = len(categories) + 1
self.mIoUMetric = MIoUMetrics(num_classes=self.num_classes)
self.areaMIoUMetrics = nn.ModuleList([MIoUMetrics(num_classes=self.num_classes) for _ in range(3)])
if self.cfg.evaluate.comp_clustering:
self.clusteringScoreMetrics = torchmetrics.MeanMetric()
backbone_type = cfg.model.arch # TODO(@yuw): arch options?
self.categories = categories
if 'tiny' in backbone_type.lower():
in_channel = 192
if 'small' in backbone_type.lower():
in_channel = 384
elif 'base' in backbone_type.lower():
in_channel = 768
elif 'large' in backbone_type.lower():
in_channel = 1024
elif 'huge' in backbone_type.lower():
in_channel = 1280
elif 'fan' in backbone_type.lower():
in_channel = 448
self.mean_field = MeanField(cfg=self.cfg)
self.student = MALStudentNetwork(in_channel, cfg=cfg)
self.teacher = MALTeacherNetwork(in_channel, cfg=cfg)
self.denormalize = Denormalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
self._optim_type = cfg.train.optim_type
self._lr = cfg.train.lr
self._wd = cfg.train.wd
self._momentum = cfg.train.optim_momentum
if num_iter_per_epoch is not None:
self._num_iter_per_epoch = num_iter_per_epoch // len(self.cfg.gpu_ids)
self.cfg = cfg
self.vis_cnt = 0
self.local_step = 0
# Enable manual optimization
self.automatic_optimization = False
def configure_optimizers(self):
"""Configure optimizers."""
optimizer = AdamWwStep(
self.parameters(), eps=self.cfg.train.optim_eps,
betas=self.cfg.train.optim_betas,
lr=self._lr, weight_decay=self._wd)
return optimizer
def crf_loss(self, img, seg, tseg, boxmask):
"""CRF loss."""
refined_mask = self.mean_field(img, tseg, targets=boxmask)
return self.dice_loss(seg, refined_mask).mean(), refined_mask
def dice_loss(self, pred, target):
"""DICE loss.
replace cross-entropy like loss in the original paper:
(https://papers.nips.cc/paper/2019/file/e6e713296627dff6475085cc6a224464-Paper.pdf).
Args:
pred (torch.Tensor): [B, embed_dim]
target (torch.Tensor): [B, embed_dim]
Return:
loss (torch.Tensor): [B]
"""
pred = pred.contiguous().view(pred.size()[0], -1).float()
target = target.contiguous().view(target.size()[0], -1).float()
a = torch.sum(pred * target, 1)
b = torch.sum(pred * pred, 1) + 0.001
c = torch.sum(target * target, 1) + 0.001
d = (2 * a) / (b + c)
return 1 - d
def mil_loss(self, pred, target):
"""Multi-instance loss.
Args:
pred (torch.Tensor): size of [batch_size, 128, 128], where 128 is input_size // 4
target (torch.Tensor): size of [batch_size, 128, 128], where 128 is input_size // 4
Return:
loss (torch.Tensor): size of [batch_size]
"""
row_labels = target.max(1)[0]
column_labels = target.max(2)[0]
row_input = pred.max(1)[0]
column_input = pred.max(2)[0]
loss = self.dice_loss(column_input, column_labels)
loss += self.dice_loss(row_input, row_labels)
return loss
def training_step(self, x):
"""training step."""
optimizer = self.optimizers()
loss = {}
image = x['image']
local_step = self.local_step
self.local_step += 1
if 'timage' in x.keys():
timage = x['timage']
else:
timage = image
student_output = self.student(image, x['mask'], x['bbox'])
teacher_output = self.teacher(timage, x['mask'], x['bbox'])
B, oh, ow = student_output['seg'].shape[0], student_output['seg'].shape[2], student_output['seg'].shape[3]
mask = F.interpolate(x['mask'], size=(oh, ow), mode='bilinear', align_corners=False).reshape(-1, oh, ow)
if 'image' in x:
student_seg_sigmoid = torch.sigmoid(student_output['seg'])[:, 0].float()
teacher_seg_sigmoid = torch.sigmoid(teacher_output['seg'])[:, 0].float()
# Multiple instance learning Loss
loss_mil = self.mil_loss(student_seg_sigmoid, mask)
# Warmup loss weight for multiple instance learning loss
if self.current_epoch > 0:
step_mil_loss_weight = 1
else:
step_mil_loss_weight = min(1, 1. * local_step / self.cfg.train.loss_mil_step)
loss_mil *= step_mil_loss_weight
loss_mil = loss_mil.sum() / (loss_mil.numel() + 1e-4) * self.loss_mil_weight
loss.update({'mil': loss_mil})
# Tensorboard logs
self.log("train/loss_mil", loss_mil, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
# Conditional Random Fields Loss
th, tw = oh * self.cfg.train.crf_size_ratio, ow * self.cfg.train.crf_size_ratio
# resize image
scaled_img = F.interpolate(image, size=(th, tw), mode='bilinear', align_corners=False).reshape(B, -1, th, tw)
# resize student segmentation
scaled_stu_seg = F.interpolate(student_seg_sigmoid[None, ...], size=(th, tw), mode='bilinear', align_corners=False).reshape(B, th, tw)
# resize teacher segmentation
scaled_tea_seg = F.interpolate(teacher_seg_sigmoid[None, ...], size=(th, tw), mode='bilinear', align_corners=False).reshape(B, th, tw)
# resize mask
scaled_mask = F.interpolate(x['mask'], size=(th, tw), mode='bilinear', align_corners=False).reshape(B, th, tw)
# loss_crf, pseudo_label
loss_crf, _ = self.crf_loss(scaled_img, scaled_stu_seg, (scaled_stu_seg + scaled_tea_seg) / 2, scaled_mask)
if self.current_epoch > 0:
step_crf_loss_weight = 1
else:
step_crf_loss_weight = min(1. * local_step / self.loss_crf_step, 1.)
loss_crf *= self.loss_crf_weight * step_crf_loss_weight
loss.update({'crf': loss_crf})
self.log("train/loss_crf", loss_crf, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
else:
raise NotImplementedError
total_loss = sum(loss.values())
self.log("train/loss", total_loss, on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("lr", optimizer.param_groups[0]['lr'], on_step=True, on_epoch=False, prog_bar=True, sync_dist=True)
self.log("train/bs", image.shape[0], on_step=False, on_epoch=True, prog_bar=False, sync_dist=True)
optimizer.zero_grad()
self.manual_backward(total_loss)
optimizer.step()
if self._optim_type == 'adamw':
adjust_learning_rate(optimizer, 1. * local_step / self._num_iter_per_epoch + self.current_epoch, self.cfg)
self.teacher.update(self.student)
def training_epoch_end(self, outputs):
"""On training epoch end."""
self.local_step = 0
def validation_step(self, batch, batch_idx, return_mask=False):
"""Validation step."""
if self.cfg.dataset.load_mask:
imgs, gt_masks, masks, labels, ids, boxmasks, boxes, ext_boxes, ext_hs, ext_ws =\
batch['image'], batch['gtmask'], batch['mask'], batch['compact_category_id'], \
batch['id'], batch['boxmask'], batch['bbox'], batch['ext_boxes'], batch['ext_h'], batch['ext_w']
else:
imgs, gt_masks, masks, labels, ids, boxmasks, boxes, ext_boxes, ext_hs, ext_ws =\
batch['image'], batch['boxmask'], batch['boxmask'], batch['compact_category_id'], \
batch['id'], batch['boxmask'], batch['bbox'], batch['ext_boxes'], batch['ext_h'], batch['ext_w']
_, _, H, W = imgs.shape # B, C, H, W
denormalized_images = self.denormalize(imgs.cpu().numpy().transpose(0, 2, 3, 1)).astype(np.uint8)
labels = labels.cpu().numpy()
if self.cfg.evaluate.use_mixed_model_test:
s_outputs = self.student(imgs, batch['boxmask'], batch['bbox'])
t_outputs = self.teacher(imgs, batch['boxmask'], batch['bbox'])
segs = (s_outputs['seg'] + t_outputs['seg']) / 2
else:
if self.cfg.evaluate.use_teacher_test:
outputs = self.teacher(imgs, batch['boxmask'], batch['bbox'])
else:
outputs = self.student(imgs, batch['boxmask'], batch['bbox'])
segs = outputs['seg']
if self.cfg.evaluate.use_flip_test:
if self.cfg.evaluate.use_mixed_model_test:
s_outputs = self.student(torch.flip(imgs, [3]), batch['boxmask'], batch['bbox'])
t_outputs = self.teacher(torch.flip(imgs, [3]), batch['boxmask'], batch['bbox'])
flipped_segs = torch.flip((s_outputs['seg'] + t_outputs['seg']) / 2, [3])
segs = (flipped_segs + segs) / 2
else:
if self.cfg.evaluate.use_teacher_test:
flip_outputs = self.teacher(torch.flip(imgs, [3]), batch['boxmask'], batch['bbox'])
else:
flip_outputs = self.student(torch.flip(imgs, [3]), batch['boxmask'], batch['bbox'])
segs = (segs + torch.flip(flip_outputs['seg'], [3])) / 2
segs = F.interpolate(segs, (H, W), align_corners=False, mode='bilinear')
segs = segs.sigmoid()
thres_list = [0, 32**2, 96 ** 2, 1e5**2]
segs = segs * boxmasks
areas = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
binseg = segs.clone()
for idx, (lth, hth) in enumerate(zip(thres_list[:-1], thres_list[1:])):
obj_ids = ((lth < areas) * (areas <= hth)).cpu().numpy()
if obj_ids.sum() > 0:
binseg[obj_ids] = (binseg[obj_ids] > self.mask_thres[idx]).float()
tb_logger = self.logger.experiment
epoch_count = self.current_epoch
batch_ious = []
img_pred_masks = []
for idx, (img_h, img_w, ext_h, ext_w, ext_box, seg, gt_mask, area, label) in enumerate(zip(batch['height'], batch['width'], ext_hs, ext_ws, ext_boxes, segs, gt_masks, areas, labels)):
roi_pred_mask = F.interpolate(seg[None, ...], (ext_h, ext_w), mode='bilinear', align_corners=False)[0][0]
h, w = int(img_h), int(img_w)
img_pred_mask_shape = h, w
img_pred_mask = np.zeros(img_pred_mask_shape).astype(np.float)
img_pred_mask[max(ext_box[1], 0):min(ext_box[3], h), max(ext_box[0], 0):min(ext_box[2], w)] = \
roi_pred_mask[max(0 - ext_box[1], 0):ext_h + min(0, h - ext_box[3]), max(0 - ext_box[0], 0):ext_w + min(0, w - ext_box[2])].cpu().numpy()
for idx, (lth, hth) in enumerate(zip(thres_list[:-1], thres_list[1:])):
if lth < area <= hth:
img_pred_mask = (img_pred_mask > self.mask_thres[idx]).astype(np.float)
img_pred_masks.append(img_pred_mask[None, ...])
if self.cfg.dataset.load_mask:
iou = self.mIoUMetric.cal_iou(img_pred_mask[np.newaxis, ...], gt_mask.data[np.newaxis, ...])
# overall mask IoU
self.mIoUMetric.update(int(label), iou[0])
batch_ious.extend(iou)
# Small/Medium/Large IoU
for jdx, (lth, hth) in enumerate(zip(thres_list[:-1], thres_list[1:])):
obj_ids = ((lth < area) * (area <= hth)).cpu().numpy()
if obj_ids.sum() > 0:
self.areaMIoUMetrics[jdx].update_with_ious(labels[obj_ids], iou[obj_ids])
# Tensorboard vis
if self.cfg.dataset.load_mask:
for idx, batch_iou, img, seg, label, gt_mask, mask, _, area in zip(ids, batch_ious, denormalized_images, segs, labels, gt_masks, masks, boxes, areas):
if area > 64**2 and batch_iou < 0.78 and self.vis_cnt <= 100:
seg = seg.cpu().numpy().astype(np.float32)[0]
mask = mask.data
seg = cv2.resize(seg, (W, H), interpolation=cv2.INTER_LINEAR)
seg = (seg * 255).astype(np.uint8)
seg = cv2.applyColorMap(seg, cv2.COLORMAP_JET)
tseg = cv2.applyColorMap((mask[0] > 0.5).cpu().numpy().astype(np.uint8) * 255, cv2.COLORMAP_JET)
vis = cv2.addWeighted(img, 0.5, seg, 0.5, 0)
tvis = cv2.addWeighted(img, 0.5, tseg, 0.5, 0)
tb_logger.add_image(f'val/vis_{int(idx)}', vis, epoch_count, dataformats="HWC")
tb_logger.add_image(f'valgt/vis_{int(idx)}', tvis, epoch_count, dataformats="HWC")
self.vis_cnt += 1
ret_dict = {}
if return_mask:
ret_dict['img_pred_masks'] = img_pred_masks
if self.cfg.dataset.load_mask:
ret_dict['ious'] = batch_ious
return ret_dict
def get_parameter_groups(self, print_fn=print):
"""Get parameter groups."""
groups = ([], [], [], [])
for name, value in self.named_parameters():
# pretrained weights
if 'backbone' in name:
if 'weight' in name:
# print_fn(f'pretrained weights : {name}')
groups[0].append(value)
else:
# print_fn(f'pretrained bias : {name}')
groups[1].append(value)
# scracthed weights
else:
if 'weight' in name:
if print_fn is not None:
print_fn(f'scratched weights : {name}')
groups[2].append(value)
else:
if print_fn is not None:
print_fn(f'scratched bias : {name}')
groups[3].append(value)
return groups
def validation_epoch_end(self, outputs):
"""On validation epoch end."""
mIoU = self.mIoUMetric.compute()
self.log("val/mIoU", mIoU, on_epoch=True, prog_bar=True, sync_dist=True)
if dist.get_rank() == 0:
print(f"val/mIoU: {mIoU}")
if "coco" in self.cfg.dataset.type:
# cat_kv = dict([(cat["name"], cat["id"]) for cat in self.categories])
if self.cfg.evaluate.comp_clustering:
clustering_score = self.clusteringScoreMetrics.compute()
self.log("val/cluster_score", clustering_score, on_epoch=True, prog_bar=True, sync_dist=True)
if dist.get_rank() == 0:
if self.cfg.evaluate.comp_clustering:
print("val/cluster_score", clustering_score)
else:
raise NotImplementedError
self.mIoUMetric.reset()
self.vis_cnt = 0
for i, name in zip(range(len(self.areaMIoUMetrics)), ["small", "medium", "large"]):
area_mIoU = self.areaMIoUMetrics[i].compute()
self.log(f"val/mIoU_{name}", area_mIoU, on_epoch=True, sync_dist=True)
if dist.get_rank() == 0:
print(f"val/mIoU_{name}: {area_mIoU}")
self.areaMIoUMetrics[i].reset()
class MALPseudoLabels(MAL):
"""MAL model for pseudo label generation."""
def __init__(self, *args, **kwargs):
"""Init."""
super().__init__(*args, **kwargs)
self.box_inputs = None
def validation_step(self, batch, batch_idx):
"""Validation step."""
pred_dict = super().validation_step(batch, batch_idx, return_mask=True)
pred_seg = pred_dict['img_pred_masks']
if self.cfg.dataset.load_mask:
ious = pred_dict['ious']
ret = []
cnt = 0
# t = time.time()
for seg, (x0, y0, x1, y1), idx, image_id, category_id in zip(pred_seg, batch['bbox'], batch['id'], batch.get('image_id', batch.get('video_id', None)), batch['category_id']):
encoded_mask = encode(np.asfortranarray(seg[0].astype(np.uint8)))
encoded_mask['counts'] = encoded_mask['counts'].decode('ascii')
labels = {
"bbox": [float(x0), float(y0), float(x1 - x0), float(y1 - y0)],
"id": int(idx),
"category_id": int(category_id),
"segmentation": encoded_mask,
"iscrowd": 0,
"area": float(x1 - x0) * float(y1 - y0),
"image_id": int(image_id)
}
if 'score' in batch.keys():
labels['score'] = float(batch['score'][cnt].cpu().numpy())
if self.cfg.dataset.load_mask:
labels['iou'] = float(ious[cnt])
cnt += 1
ret.append(labels)
if batch.get('ytvis_idx', None) is not None:
for ytvis_idx, labels in zip(batch['ytvis_idx'], ret):
labels['ytvis_idx'] = list(map(int, ytvis_idx))
return ret
def validation_epoch_end(self, outputs):
"""On validation epoch end."""
super().validation_epoch_end(outputs)
ret = list(itertools.chain.from_iterable(outputs))
if self.trainer.strategy.root_device.index > 0:
with open(f"{self.cfg.inference.label_dump_path}.part{self.trainer.strategy.root_device.index}", "w", encoding='utf-8') as f:
json.dump(ret, f)
torch.distributed.barrier()
else:
val_ann_path = self.cfg.inference.ann_path
with open(val_ann_path, "r", encoding='utf-8') as f:
anns = json.load(f)
torch.distributed.barrier()
for i in range(1, len(self.cfg.gpu_ids)):
with open(f"{self.cfg.inference.label_dump_path}.part{i}", "r", encoding='utf-8') as f:
obj = json.load(f)
ret.extend(obj)
os.remove(f"{self.cfg.inference.label_dump_path}.part{i}")
if ret[0].get('ytvis_idx', None) is None:
# for COCO format
_ret = []
_ret_set = set()
for ann in ret:
if ann['id'] not in _ret_set:
_ret_set.add(ann['id'])
_ret.append(ann)
anns['annotations'] = _ret
else:
# for YouTubeVIS format
for inst_ann in anns['annotations']:
len_video = len(inst_ann['bboxes'])
inst_ann['segmentations'] = [None for _ in range(len_video)]
for seg_ann in ret:
inst_idx, frame_idx = seg_ann['ytvis_idx']
anns['annotations'][inst_idx]['segmentations'][frame_idx] = seg_ann['segmentation']
with open(self.cfg.inference.label_dump_path, "w", encoding='utf-8') as f:
json.dump(anns, f)
if self.box_inputs is not None:
print("Start evaluating the results...")
cocoGt = COCO(self.cfg.val_ann_path)
cocoDt = cocoGt.loadRes(self.cfg.label_dump_path + ".result")
for iou_type in ['bbox', 'segm']:
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/models/mal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""Transformer (ViT and FAN) builder."""
from functools import partial
import torch
from torch import nn
from torch.utils import model_zoo
from nvidia_tao_ds.backbone.fan import (
fan_tiny_12_p16_224,
fan_small_12_p16_224,
fan_base_18_p16_224,
fan_large_24_p16_224,
fan_tiny_8_p4_hybrid,
fan_small_12_p4_hybrid,
fan_base_16_p4_hybrid,
fan_large_16_p4_hybrid
)
from nvidia_tao_ds.backbone.vision_transformer import VisionTransformer
fan_dict = {
"fan_tiny_12_p16_224": fan_tiny_12_p16_224,
"fan_small_12_p16_224": fan_small_12_p16_224,
"fan_base_18_p16_224": fan_base_18_p16_224,
"fan_large_24_p16_224": fan_large_24_p16_224,
"fan_tiny_8_p4_hybrid": fan_tiny_8_p4_hybrid,
"fan_small_12_p4_hybrid": fan_small_12_p4_hybrid,
"fan_base_16_p4_hybrid": fan_base_16_p4_hybrid,
"fan_large_16_p4_hybrid": fan_large_16_p4_hybrid
}
urls_dic = {
"vit-deit-tiny/16": "https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
"vit-deit-small/16": "https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
"vit-deit-base/16": "https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
"vit-deit-base-distilled/16": "https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
"vit-deit-iii-base-224/16": "https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth",
"vit-mocov3-base/16": "https://dl.fbaipublicfiles.com/moco-v3/vit-b-300ep/vit-b-300ep.pth.tar",
"vit-mae-base/16": "https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth",
'vit-mae-large/16': "https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_large.pth",
'vit-mae-huge/14': 'https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_huge.pth'
}
def get_vit(cfg=None, load_imagenet_weights=False):
"""Build ViT models."""
arch = cfg.model.arch
if '16' in arch:
patch_size = 16
elif '8' in arch:
patch_size = 8
elif '14' in arch:
patch_size = 14
else:
raise ValueError("Only 8/14/16 are supported.")
if 'tiny' in arch.lower():
embed_dim = 192
num_heads = 3
depth = 12
if 'small' in arch.lower():
embed_dim = 384
num_heads = 6
depth = 12
elif 'base' in arch.lower():
embed_dim = 768
num_heads = 12
depth = 12
elif 'large' in arch.lower():
embed_dim = 1024
num_heads = 16
depth = 24
elif 'huge' in arch.lower():
embed_dim = 1280
num_heads = 16
depth = 32
else:
raise ValueError("Only tiny/small/base/large/huge are supported.")
model = VisionTransformer(
patch_size=patch_size, embed_dim=embed_dim, depth=depth,
num_heads=num_heads, mlp_ratio=4, qkv_bias=True, drop_path_rate=cfg.model.vit_dpr,
norm_layer=partial(nn.LayerNorm, eps=1e-6), frozen_stages=cfg.model.frozen_stages)
if load_imagenet_weights:
path = urls_dic[arch]
if path.startswith('http'):
state_dict = model_zoo.load_url(path)
else:
state_dict = torch.load(path)
if 'state_dict' in state_dict.keys():
state_dict = state_dict['state_dict']
if 'model' in state_dict.keys():
state_dict = state_dict['model']
model.load_state_dict(state_dict, strict=False)
return model
def get_fan(cfg, load_imagenet_weights=False):
"""Build FAN models."""
arch = cfg.model.arch
if arch in list(fan_dict.keys()):
return fan_dict[arch](pretrained=load_imagenet_weights)
raise ValueError(f"Only {list(fan_dict.keys())} are supported.")
def build_model(cfg):
"""Model builder."""
if 'vit' in cfg.model.arch:
backbone = get_vit(cfg, load_imagenet_weights=False)
elif 'fan' in cfg.model.arch:
backbone = get_fan(cfg, load_imagenet_weights=False)
else:
raise ValueError('Only vit and fan are supported.')
return backbone
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/models/vit_builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/MAL/blob/main/LICENSE
"""MAL inference script."""
import os
import warnings
import torch
from pytorch_lightning import Trainer
from nvidia_tao_ds.auto_label.config.default_config import ExperimentConfig
from nvidia_tao_ds.auto_label.datasets.pl_data_module import WSISDataModule
from nvidia_tao_ds.auto_label.models.mal import MALPseudoLabels
from nvidia_tao_ds.auto_label.utils.config_utils import update_config
from nvidia_tao_ds.core.decorators import monitor_status
from nvidia_tao_ds.core.hydra.hydra_runner import hydra_runner
warnings.filterwarnings("ignore")
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="generate", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for format conversion."""
run_inference(cfg=cfg)
@monitor_status(mode='Auto-label')
def run_inference(cfg: ExperimentConfig) -> None:
"""Run pseudo-label generation."""
cfg = update_config(cfg)
os.makedirs(cfg.results_dir, exist_ok=True)
# gpu indices
if len(cfg.gpu_ids) == 0:
cfg.gpu_ids = list(range(torch.cuda.device_count()))
cfg.train.lr = 0
cfg.train.min_lr = 0
num_workers = len(cfg.gpu_ids) * cfg.dataset.num_workers_per_gpu
# override validation path
cfg.dataset.val_ann_path = cfg.inference.ann_path
cfg.dataset.val_img_dir = cfg.inference.img_dir
cfg.dataset.load_mask = cfg.inference.load_mask
cfg.train.batch_size = cfg.inference.batch_size
cfg.evaluate.use_mixed_model_test = False
cfg.evaluate.use_teacher_test = False
cfg.evaluate.comp_clustering = False
cfg.evaluate.use_flip_test = False
data_loader = WSISDataModule(
num_workers=num_workers,
load_train=False,
load_val=True, cfg=cfg)
# Phase 2: Generating pseudo-labels
model = MALPseudoLabels(
cfg=cfg,
categories=data_loader._val_data_loader.dataset.coco.dataset['categories'])
trainer = Trainer(
gpus=cfg.gpu_ids,
strategy=cfg.strategy,
devices=1,
accelerator='gpu',
default_root_dir=cfg.results_dir,
precision=16,
check_val_every_n_epoch=1,
resume_from_checkpoint=cfg.checkpoint
)
trainer.validate(model, ckpt_path=cfg.checkpoint, dataloaders=data_loader.val_dataloader())
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/scripts/generate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL scripts."""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define entrypoint to run tasks for auto-label."""
import argparse
from nvidia_tao_ds.auto_label import scripts
from nvidia_tao_ds.core.entrypoint.entrypoint import get_subtasks, launch
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"auto_label",
add_help=True,
description="TAO Toolkit entrypoint for MAL"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(
parser, subtasks, task="auto_label"
)
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/entrypoint/auto_label.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL entrypoint."""
| tao_dataset_suite-main | nvidia_tao_ds/auto_label/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Offline augmentation module."""
| tao_dataset_suite-main | nvidia_tao_ds/augment/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DALI pipeline runner."""
import os
import numpy as np
from multiprocessing import Pool
import pycocotools.mask as maskUtils
from PIL import Image
from tqdm import tqdm
from nvidia_tao_ds.augment.utils import kitti
from nvidia_tao_ds.augment.utils.helper import decode_str
def save_image(img, path):
"""Write PIL image to path."""
img.save(path)
def process_augmented_coco(image_id, image, boxes_per_image, masks_per_image,
coco, config):
"""Process augmented COCO data."""
ann_load = coco.loadAnns(coco.getAnnIds(imgIds=image_id))
img_info = coco.loadImgs(ids=image_id)[0]
img_name = img_info['file_name']
out_image_path = os.path.join(config.data.output_dataset, 'images', img_name)
ann_per_image = []
for j in range(len(ann_load)):
ann_load[j]['bbox'] = list(map(lambda x: float(x), list(boxes_per_image[j])))
if config.data.include_masks:
# mask --> RLE
encoded_mask = maskUtils.encode(
np.asfortranarray(masks_per_image[..., j].astype(np.uint8)))
encoded_mask['counts'] = encoded_mask['counts'].decode('ascii')
ann_load[j]['segmentation'] = encoded_mask
ann_per_image.append(ann_load[j])
save_image(Image.fromarray(image), out_image_path)
return ann_per_image
def process_augmented_kitti(image, boxes_per_image,
encoded_image_path,
encoded_label_path,
config):
"""Process augmented KITTI data."""
image_path = decode_str(encoded_image_path)
image_name = os.path.basename(image_path)
label_path = decode_str(encoded_label_path)
label_name = os.path.basename(label_path)
output_image_dir = os.path.join(config.data.output_dataset, 'images')
output_label_dir = os.path.join(config.data.output_dataset, 'labels')
# save augmented image
save_image(Image.fromarray(image), os.path.join(output_image_dir, image_name))
# dump kitti file with augmented labels
with open(os.path.join(output_label_dir, label_name), "w", encoding='utf-8') as f:
annotations = kitti.parse_label_file(label_path)
for j in range(boxes_per_image.shape[0]):
annotation = annotations[j]
annotation.box = boxes_per_image[j]
f.write(str(annotation))
f.write('\n')
class DALIPipeIter():
"""Dali pipe iterator."""
def __init__(self, pipe):
"""Initialization of the pipeline iterator.
Args:
pipe (dali.pipeline.Pipeline): Dali pipeline object.
Returns:
DALIPipeIter object class.
"""
self.pipe = pipe
def __iter__(self):
"""Return interator."""
return self
def __next__(self):
"""Next method for the DALI iterator.
This method runs the DALI pipeline and generates the
pipes outputs.
"""
return self.pipe.run()
def run(pipe, batch_size, data_callable, config):
"""Run pipeline."""
if config.data.dataset_type.lower() == 'coco':
results = []
ann_dump = []
img_id_set = set()
with Pool(batch_size) as pool:
with tqdm(total=data_callable.size) as pbar:
for images, boxes, img_ids, masks in DALIPipeIter(pipe):
images = images.as_cpu()
img_ids = img_ids.as_array().flatten()
for i, img_id in enumerate(img_ids):
img_id = int(img_id)
if img_id not in img_id_set:
img_id_set.add(img_id)
results.append(pool.apply_async(
process_augmented_coco,
(img_id,
images.at(i),
boxes.at(i),
masks.at(i),
data_callable.coco,
config)))
pbar.update(data_callable.samples_per_iter)
for r in results:
r.wait()
ann_batch = r.get()
for b in ann_batch:
ann_dump.append(b)
return ann_dump
if config.data.dataset_type.lower() == 'kitti':
with Pool(batch_size) as pool:
with tqdm(total=data_callable.size) as pbar:
for images, boxes, img_paths, lbl_paths in DALIPipeIter(pipe):
images = images.as_cpu()
results = []
for i in range(len(images)):
results.append(pool.apply_async(
process_augmented_kitti,
(images.at(i),
boxes.at(i),
img_paths.at(i),
lbl_paths.at(i),
config)))
for r in results:
r.wait()
pbar.update(data_callable.samples_per_iter)
return 0
raise ValueError("Only `kitti` and `coco` are supported in dataset_type.")
| tao_dataset_suite-main | nvidia_tao_ds/augment/pipeline/runner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pipeline module for TAO augment."""
| tao_dataset_suite-main | nvidia_tao_ds/augment/pipeline/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DALI pipeline."""
import logging
import numpy as np
from nvidia import dali
from nvidia.dali import fn, pipeline_def
import nvidia.dali.fn.transforms as tr
logger = logging.getLogger(__name__)
def random_pick(values):
"""Randomly pick from values or range."""
if len(values) == 0:
value = 0
elif len(values) == 1:
value = values[0]
elif len(values) == 2:
value = dali.fn.random.uniform(range=tuple(values))
else:
value = dali.fn.random.uniform(values=list(values))
return value
def spatial_transform(config, input_size, output_size):
"""Define a spatial transform pipe.
Args:
config (Hydra config): Spatial augumentation config element.
input_size (tuple): Size of the input image.
output_size (tuple): Size of the output image.
Return:
m (2x3): Spatial transformation matrix.
"""
# DALI cannot conditionally apply a transform, so we have to pass neutral parameters.
# Since we're only manipulating 2x3 matrix, it's not a problem.
angle = config.rotation.angle
angle = random_pick(angle)
if config.rotation.units == "radians":
angle *= 180 / np.pi
center = input_size / 2
shear_ratio_x = random_pick(config.shear.shear_ratio_x)
shear_ratio_y = random_pick(config.shear.shear_ratio_y)
shear = np.float32([shear_ratio_x, shear_ratio_y])
translate_x = random_pick(config.translation.translate_x)
translate_y = random_pick(config.translation.translate_y)
offset = np.float32(
[translate_x, translate_y]
)
flip = np.int32([config.flip.flip_horizontal, config.flip.flip_vertical])
m = tr.rotation(angle=angle, center=center)
# flip operator is scale with negative axis.
m = tr.scale(m, scale=1 - flip * 2.0, center=center)
m = tr.shear(m, shear=shear, center=center)
m = tr.translation(m, offset=offset)
m = tr.scale(m, scale=output_size / input_size)
return m
def apply_color_transform(images, config):
"""Apply color transform to the image.
Args:
images (list): Array of images.
config (Hydra config): Color configuration element.
Returns:
images (list): Batch of images.
"""
H = config.hue.hue_rotation_angle
H = -random_pick(H)
S = config.saturation.saturation_shift
S = random_pick(S)
C = 1.0 # TODO(@yuw): verify
C += random_pick(config.contrast.contrast)
center = random_pick(config.contrast.center)
B = random_pick(config.brightness.offset) / 255
# applying hue rotation
images = fn.hsv(
images,
hue=H,
saturation=S,
dtype=dali.types.FLOAT
)
# applying contrast adjustment.
images = fn.brightness_contrast(
images,
contrast=C,
contrast_center=center,
brightness_shift=B * C,
dtype=dali.types.UINT8)
return images
def transform_boxes(boxes, matrix, out_w, out_h):
"""Apply color transform to the image.
Args:
boxes (list): batch of boxes
matrix (np.array): transformation matrix
out_w (int): output width
out_h (int): output height
"""
# boxes' shape: num_boxes x 4
box_x0 = fn.slice(boxes, 0, 1, axes=[1])
box_y0 = fn.slice(boxes, 1, 1, axes=[1])
box_x1 = fn.slice(boxes, 2, 1, axes=[1])
box_y1 = fn.slice(boxes, 3, 1, axes=[1])
corners = fn.stack(
fn.cat(box_x0, box_y0, axis=1),
fn.cat(box_x1, box_y0, axis=1),
fn.cat(box_x0, box_y1, axis=1),
fn.cat(box_x1, box_y1, axis=1),
axis=1
)
# corners' shape: nboxes x 4 x 2
corners = fn.coord_transform(corners, MT=matrix)
lo = fn.reductions.min(corners, axes=1)
hi = fn.reductions.max(corners, axes=1)
# I really DO wish DALI had broadcasting :(
# hi, lo shape: nboxes x 2
lohi = fn.stack(lo, hi, axis=1)
# lohi shape: nboxes x 2 x 2
lohix = dali.math.clamp(fn.slice(lohi, 0, 1, axes=[2]), 0, out_w)
lohiy = dali.math.clamp(fn.slice(lohi, 1, 1, axes=[2]), 0, out_h)
lohi = fn.stack(lohix, lohiy, axis=2)
return fn.reshape(lohi, shape=[-1, 4])
def apply_blur(images, blur_config):
"""Apply blur operator on an image.
Args:
images (list): Batch of images.
blur_config (Hydra config): Config element for the blur operator.
Returns:
images (batch): Batch of images blurred.
"""
logger.debug("Applying Gaussian blur operator to the images.")
sigma = random_pick(blur_config.std) or None
size = random_pick(blur_config.size) or None
return fn.gaussian_blur(images, sigma=sigma, window_size=size)
def build_coco_pipeline(coco_callable, is_fixed_size, config,
batch_size=1, device_id=0):
"""Build DALI pipeline for COCO format dataset."""
@pipeline_def(batch_size=batch_size, num_threads=2, device_id=device_id,
py_num_workers=1, py_start_method='spawn', seed=config.random_seed)
def sharded_coco_pipeline(coco_callable, is_fixed_size, config):
logger.debug("Defining COCO pipeline.")
raw_files, boxes, img_ids, masks = fn.external_source(
source=coco_callable,
num_outputs=4, batch=False,
parallel=True,
dtype=[dali.types.UINT8, dali.types.FLOAT, dali.types.INT32, dali.types.UINT8])
shapes = fn.peek_image_shape(raw_files)
images = fn.image_decoder(raw_files, device="mixed")
in_h = fn.slice(shapes, 0, 1, axes=[0])
in_w = fn.slice(shapes, 1, 1, axes=[0])
if is_fixed_size:
out_w = config.data.output_image_width
out_h = config.data.output_image_height
out_size = [out_h, out_w]
else:
out_w = in_w
out_h = in_h
out_size = fn.cat(out_h, out_w)
mt = spatial_transform(
config.spatial_aug,
input_size=fn.cat(in_w, in_h),
output_size=dali.types.Constant(
[out_w, out_h]) if is_fixed_size else fn.cat(out_w, out_h)
)
images = fn.warp_affine(
images, matrix=mt,
size=out_size,
fill_value=0, inverse_map=False
)
if config.data.include_masks:
masks = fn.warp_affine(
masks, matrix=mt,
size=out_size,
fill_value=0, inverse_map=False
)
orig_boxes = boxes # noqa pylint: disable=W0612
boxes = transform_boxes(boxes, mt, out_w, out_h)
images = apply_color_transform(images, config.color_aug)
images = apply_blur(images, config.blur_aug)
return images, boxes, img_ids, masks
logger.debug("Building COCO pipeline.")
pipe = sharded_coco_pipeline(
coco_callable, is_fixed_size, config)
pipe.build()
return pipe
def build_kitti_pipeline(kitti_callable, is_fixed_size, config,
batch_size=1, device_id=0):
"""Build DALI pipeline for COCO format dataset."""
@pipeline_def(batch_size=batch_size, num_threads=2, device_id=device_id,
py_num_workers=1, py_start_method='spawn', seed=config.random_seed)
def sharded_kitti_pipeline(kitti_callable, is_fixed_size, config):
logger.debug("Defining KITTI pipeline.")
raw_files, boxes, img_paths, lbl_paths = fn.external_source(
source=kitti_callable,
num_outputs=4, batch=False,
parallel=True,
dtype=[dali.types.UINT8, dali.types.FLOAT, dali.types.UINT8, dali.types.UINT8])
shapes = fn.peek_image_shape(raw_files)
images = fn.image_decoder(raw_files, device="mixed")
in_h = fn.slice(shapes, 0, 1, axes=[0])
in_w = fn.slice(shapes, 1, 1, axes=[0])
if is_fixed_size:
out_w = config.data.output_image_width
out_h = config.data.output_image_height
out_size = [out_h, out_w]
else:
out_w = in_w
out_h = in_h
out_size = fn.cat(out_h, out_w)
mt = spatial_transform(
config.spatial_aug,
input_size=fn.cat(in_w, in_h),
output_size=dali.types.Constant(
[out_w, out_h]) if is_fixed_size else fn.cat(out_w, out_h)
)
images = fn.warp_affine(
images, matrix=mt,
size=out_size,
fill_value=0, inverse_map=False
)
orig_boxes = boxes # noqa pylint: disable=W0612
boxes = transform_boxes(boxes, mt, out_w, out_h)
images = apply_color_transform(images, config.color_aug)
images = apply_blur(images, config.blur_aug)
return images, boxes, img_paths, lbl_paths
logger.debug("Building KITTI pipeline.")
pipe = sharded_kitti_pipeline(
kitti_callable, is_fixed_size, config)
pipe.build()
return pipe
| tao_dataset_suite-main | nvidia_tao_ds/augment/pipeline/sharded_pipeline.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default configs for TAO augment."""
| tao_dataset_suite-main | nvidia_tao_ds/augment/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from typing import List, Optional, Union
from dataclasses import dataclass, field
from omegaconf import MISSING
@dataclass
class RotationAIConfig:
"""Rotation configuration template."""
enabled: bool = False
gt_cache: str = ''
@dataclass
class RotationConfig:
"""Rotation configuration template."""
angle: List[float] = field(default_factory=lambda: [0])
units: str = "degrees"
refine_box: RotationAIConfig = RotationAIConfig()
@dataclass
class ShearConfig:
"""Rotation configuration template."""
shear_ratio_x: List[float] = field(default_factory=lambda: [0])
shear_ratio_y: List[float] = field(default_factory=lambda: [0])
@dataclass
class FlipConfig:
"""Flip configuration template."""
flip_horizontal: bool = False
flip_vertical: bool = False
@dataclass
class TranslationConfig:
"""Translation configuration template."""
translate_x: List[int] = field(default_factory=lambda: [0])
translate_y: List[int] = field(default_factory=lambda: [0])
@dataclass
class HueConfig:
"""Hue configuration template."""
hue_rotation_angle: List[float] = field(default_factory=lambda: [0])
@dataclass
class SaturationConfig:
"""Saturation configuration template."""
saturation_shift: List[float] = field(default_factory=lambda: [1])
@dataclass
class ContrastConfig:
"""Contrast configuration template."""
contrast: List[float] = field(default_factory=lambda: [0])
center: List[float] = field(default_factory=lambda: [127])
@dataclass
class BrightnessConfig:
"""Contrast configuration template."""
offset: List[float] = field(default_factory=lambda: [0])
@dataclass
class SpatialAugmentationConfig:
"""Spatial augmentation configuration template."""
rotation: RotationConfig = RotationConfig()
shear: ShearConfig = ShearConfig()
flip: FlipConfig = FlipConfig()
translation: TranslationConfig = TranslationConfig()
@dataclass
class ColorAugmentationConfig:
"""Color augmentation configuration template."""
hue: HueConfig = HueConfig()
saturation: SaturationConfig = SaturationConfig()
contrast: ContrastConfig = ContrastConfig()
brightness: BrightnessConfig = BrightnessConfig()
@dataclass
class KernelFilterConfig:
"""Blur configuration template."""
std: List[float] = field(default_factory=lambda: [0.1])
size: List[float] = field(default_factory=lambda: [3])
@dataclass
class DataConfig:
"""Dataset configuration template."""
dataset_type: str = 'coco'
output_image_width: Union[int, None] = None
output_image_height: Union[int, None] = None
image_dir: str = MISSING
ann_path: str = MISSING
output_dataset: str = MISSING
batch_size: int = 8
include_masks: bool = False
@dataclass
class AugmentConfig:
"""Experiment configuration template."""
random_seed: int = 42
num_gpus: int = 1
gpu_ids: List[int] = field(default_factory=lambda: [0])
data: DataConfig = DataConfig()
color_aug: ColorAugmentationConfig = ColorAugmentationConfig()
spatial_aug: SpatialAugmentationConfig = SpatialAugmentationConfig()
blur_aug: KernelFilterConfig = KernelFilterConfig()
results_dir: Optional[str] = None
| tao_dataset_suite-main | nvidia_tao_ds/augment/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to handle kitti annotations."""
from pathlib import Path
import glob
import os
IMAGE_EXTENSIONS = [".png", ".jpg", ".jpeg"]
def list_files(image_dir, label_dir):
"""List images and labels file in the dataset directory.
Args:
root_dir (str): Unix path to the dataset root.
Returns:
output_image(str), output_label (str): List of output images and output labels
"""
images = []
# List image files.
for ext in IMAGE_EXTENSIONS:
images.extend(
glob.glob(
os.path.join(image_dir, f'*{ext}'),
)
)
# List label files.
labels = glob.glob(os.path.join(label_dir, '*.txt'))
image_names = list(map(lambda x: Path(x).stem, images))
label_names = list(map(lambda x: Path(x).stem, labels))
image_dict = dict(zip(image_names, images))
label_dict = dict(zip(label_names, labels))
common = set(image_names).intersection(set(label_names))
return zip(*[(image_dict[i], label_dict[i]) for i in common])
class Annotation:
"""Label annotation object corresponding to a single line in the kitti object."""
def __init__(self, *args):
"""Initialize a kitti label object.
Args:
args[list]: List of kitti labels.
"""
self.category = args[0]
self.truncation = float(args[1])
self.occlusion = int(args[2])
self.observation_angle = float(args[3])
self.box = [float(x) for x in args[4:8]]
hwlxyz = [float(x) for x in args[8:14]]
self.world_bbox = hwlxyz[3:6] + hwlxyz[0:3]
self.world_bbox_rot_y = float(args[14])
def __str__(self):
"""String representation of annotation object."""
world_box_str = "{3:.2f} {4:.2f} {5:.2f} {0:.2f} {1:.2f} {2:.2f}".format(*self.world_bbox) # noqa pylint: disable=C0209
box_str = "{:.2f} {:.2f} {:.2f} {:.2f}".format(*self.box) # noqa pylint: disable=C0209
return "{0} {1:.2f} {2} {3:.2f} {4} {5} {6:.2f}".format( # noqa pylint: disable=C0209
self.category, self.truncation, self.occlusion, self.observation_angle,
box_str, world_box_str, self.world_bbox_rot_y)
def parse_label_file(label_file):
"""Parse a label file.
Args:
label_file (str): Unix path to the kitti label file.
Returns:
annotations (list): List of parsed kitti labels.
"""
with open(label_file, "r", encoding='utf-8') as f:
lines = f.readlines()
annotations = []
for line in lines:
if line[-1] == '\n':
line = line[:-1]
tokens = line.split(" ")
annotations.append(Annotation(*tokens))
return annotations
| tao_dataset_suite-main | nvidia_tao_ds/augment/utils/kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for TAO augment."""
from nvidia_tao_ds.augment.dataloader.coco_callable import CocoInputCallable
from nvidia_tao_ds.augment.dataloader.kitti_callable import KittiInputCallable
from nvidia_tao_ds.augment.pipeline.sharded_pipeline import (
build_coco_pipeline,
build_kitti_pipeline,
)
callable_dict = {
'kitti': KittiInputCallable,
'coco': CocoInputCallable
}
pipeline_dict = {
'kitti': build_kitti_pipeline,
'coco': build_coco_pipeline
}
| tao_dataset_suite-main | nvidia_tao_ds/augment/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for distributed execution."""
import os
import sys
__all__ = ["MPI_local_rank", "MPI_rank", "MPI_size", "MPI_rank_and_size", "MPI_is_distributed"]
def MPI_is_distributed():
"""Return a boolean whether a distributed training/inference runtime is being used."""
return all(var in os.environ for var in ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"])
def MPI_local_rank():
"""Local rank."""
if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ:
return int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK"))
return 0
def MPI_rank():
"""MPI rank."""
return MPI_rank_and_size()[0]
def MPI_size():
"""MPI size."""
return MPI_rank_and_size()[1]
def MPI_rank_and_size():
"""MPI rank and size."""
if "tensorflow" in sys.modules:
return mpi_env_MPI_rank_and_size()
return 0, 1
# Source: https://github.com/horovod/horovod/blob/c3626e/test/common.py#L25
def mpi_env_MPI_rank_and_size():
"""Get MPI rank and size from environment variables and return them as a tuple of integers.
Most MPI implementations have an `mpirun` or `mpiexec` command that will
run an MPI executable and set up all communication necessary between the
different processors. As part of that set up, they will set environment
variables that contain the rank and size of the MPI_COMM_WORLD
communicator. We can read those environment variables from Python in order
to ensure that `hvd.rank()` and `hvd.size()` return the expected values.
Since MPI is just a standard, not an implementation, implementations
typically choose their own environment variable names. This function tries
to support several different implementation, but really it only needs to
support whatever implementation we want to use for the TensorFlow test
suite.
If this is not running under MPI, then defaults of rank zero and size one
are returned. (This is appropriate because when you call MPI_Init in an
application not started with mpirun, it will create a new independent
communicator with only one process in it.)
Source: https://github.com/horovod/horovod/blob/c3626e/test/common.py#L25
"""
rank_env = 'PMI_RANK OMPI_COMM_WORLD_RANK'.split()
size_env = 'PMI_SIZE OMPI_COMM_WORLD_SIZE'.split()
for rank_var, size_var in zip(rank_env, size_env):
rank = os.environ.get(rank_var)
size = os.environ.get(size_var)
if rank is not None and size is not None:
return int(rank), int(size)
# Default to rank zero and size one if there are no environment variables
return 0, 1
| tao_dataset_suite-main | nvidia_tao_ds/augment/utils/distributed_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities functions for file handling."""
import numpy as np
def load_file(path):
"""Get a numpy array from a loaded file.
Args:
path (str): Unix path to the file.
Returns:
np.array of loaded file.
"""
return np.fromfile(path, dtype=np.uint8)
def file_fetcher(paths, batch_size):
"""Fetcher for a batch of file.
Args:
path (list): List of Unix paths to files.
batch_size (int): Number of files per batch.
Return:
f (function pointer): Function pointer to the batch.
"""
def f(i):
start = batch_size * i
end = min(len(paths), start + batch_size)
if end <= start:
raise StopIteration()
batch = paths[start:end]
if len(batch) < batch_size:
# pad with last sample
batch += [batch[-1]] * (batch_size - len(batch))
return [load_file(path) for path in batch]
return f
def box_fetcher(labels, batch_size):
"""Fetcher for a batch of file.
Args:
labels (list): List of kitti read annotation objects.
batch_size (int): Number of files per batch.
Return:
f (function pointer): Function pointer to the batch.
"""
def f(i):
start = batch_size * i
end = min(len(labels), start + batch_size)
if end <= start:
raise StopIteration()
batch = []
for j in range(start, end):
boxes = []
for annotation in labels[j]:
boxes.append(annotation.box)
batch.append(np.float32(boxes))
if len(batch) < batch_size:
# pad with last sample
batch += [batch[-1]] * (batch_size - len(batch))
return batch
return f
| tao_dataset_suite-main | nvidia_tao_ds/augment/utils/file_handlers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions."""
import logging
def encode_str(s):
"""Encode str to array."""
return [ord(e) for e in s]
def decode_str(arr):
"""Decode array to str."""
return ''.join([chr(e) for e in arr])
class ColorFormatter(logging.Formatter):
"""Color formatter."""
grey = "\x1b[38;20m"
yellow = "\x1b[33;20m"
red = "\x1b[31;20m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
fmt = "%(asctime)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: grey + fmt + reset,
logging.INFO: grey + fmt + reset,
logging.WARNING: yellow + fmt + reset,
logging.ERROR: red + fmt + reset,
logging.CRITICAL: bold_red + fmt + reset
}
def format(self, record):
"""format."""
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def config_logger(logger):
"""Add color format to logger."""
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setFormatter(ColorFormatter())
logger.addHandler(ch)
| tao_dataset_suite-main | nvidia_tao_ds/augment/utils/helper.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script to run TAO augmentation."""
from copy import deepcopy
import glob
import logging
import json
import os
import sys
import time
from nvidia_tao_ds.annotations.kitti_to_coco import convert_kitti_to_coco
from nvidia_tao_ds.annotations.coco_to_kitti import convert_coco_to_kitti
from nvidia_tao_ds.augment.config.default_config import AugmentConfig
from nvidia_tao_ds.augment.pipeline import runner
from nvidia_tao_ds.augment.utils import callable_dict, pipeline_dict
from nvidia_tao_ds.augment.utils.helper import config_logger
from nvidia_tao_ds.augment.utils.distributed_utils import MPI_local_rank
from nvidia_tao_ds.core.decorators import monitor_status
from nvidia_tao_ds.core.hydra.hydra_runner import hydra_runner
logger = logging.getLogger(__name__)
config_logger(logger)
@monitor_status(mode='Augment')
def run_augment(config):
"""TAO augmentation pipeline for OD datasets.
Args:
config (Hydra config): Config element of the augmentation config.
"""
logger.info("Data augmentation started.")
start_time = time.time()
num_gpus = max(1, config.num_gpus)
if num_gpus != len(config.gpu_ids):
config.gpu_ids = list(range(num_gpus))
logger.warning(f"Number of GPUs ({num_gpus}) doesn't match the length of GPU indices.")
logger.warning(f"Default GPU indices ({config.gpu_ids}) will be used.")
image_dir = config.data.image_dir
ann_path = config.data.ann_path
batch_size = config.data.batch_size
is_fixed_size = config.data.output_image_width and config.data.output_image_height
dataset_type = config.data.dataset_type.lower()
output_dir = config.data.output_dataset
output_image_dir = os.path.join(config.data.output_dataset, 'images')
output_label_dir = os.path.join(config.data.output_dataset, 'labels')
if MPI_local_rank() == 0:
os.makedirs(output_dir, exist_ok=True)
os.makedirs(output_image_dir, exist_ok=True)
os.makedirs(output_label_dir, exist_ok=True)
# prepare dataloader and DALI pipeline
data_callable = callable_dict[dataset_type](
image_dir, ann_path, batch_size,
include_masks=config.data.include_masks,
shard_id=MPI_local_rank(), num_shards=len(config.gpu_ids))
pipe = pipeline_dict[dataset_type](
data_callable,
is_fixed_size,
config,
batch_size,
device_id=MPI_local_rank()
)
ann_dump = runner.run(pipe, batch_size, data_callable, config)
# extra process for COCO dump
if config.data.dataset_type.lower() == 'coco' and ann_dump:
out_label_path = os.path.join(output_label_dir, "output.json")
tmp_label_path = out_label_path + f'.part{MPI_local_rank()}'
with open(tmp_label_path, "w", encoding='utf-8') as f:
json.dump(ann_dump, f)
from mpi4py import MPI # noqa pylint: disable=C0415
MPI.COMM_WORLD.Barrier() # noqa pylint: disable=I1101
if MPI_local_rank() == 0:
tmp_files = glob.glob(out_label_path + '.part*')
ann_final = []
for tmp_i in tmp_files:
with open(tmp_i, "r", encoding='utf-8') as g:
ann_i = json.load(g)
ann_final.extend(ann_i)
# write final json
coco_output = deepcopy(data_callable.coco.dataset)
coco_output['annotations'] = ann_final
# modify image size if not output dim is specified
if is_fixed_size:
for image_info in coco_output['images']:
image_info['height'] = config.data.output_image_height
image_info['width'] = config.data.output_image_width
with open(out_label_path, "w", encoding='utf-8') as o:
json.dump(coco_output, o)
# remove tmp files
for tmp_i in tmp_files:
if os.path.exists(tmp_i):
os.remove(tmp_i)
if MPI_local_rank() == 0:
time_elapsed = time.time() - start_time
logger.info(f"Data augmentation finished in {time_elapsed:.2f}s.")
def check_gt_cache(cfg, is_kitti=False, gt_cache=None):
"""Generate COCO cache file."""
if not os.path.exists(gt_cache):
root = os.path.abspath(cfg.data.output_dataset)
if MPI_local_rank() == 0:
logger.info(f"Mask cache file ({gt_cache}) is not found.")
if is_kitti:
project_name = os.path.basename(root)
tmp_ann_file = os.path.join(root, 'coco', f'{project_name}.json')
if not os.path.isfile(tmp_ann_file):
# convert kitti to coco
logger.info("Converting KITTI labels into COCO format...")
convert_kitti_to_coco(
cfg.data.image_dir,
cfg.data.ann_path,
os.path.join(root, 'coco'),
name=project_name)
logger.info("COCO format conversion completed.")
else:
tmp_ann_file = cfg.data.ann_path
# generate masks
logger.error(f"You need to generate pseudo-masks for `{tmp_ann_file}` with TAO auto-label tool first.")
sys.exit()
spec_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@hydra_runner(
config_path=os.path.join(spec_root, "experiment_specs"),
config_name="kitti", schema=AugmentConfig
)
def main(cfg: AugmentConfig):
"""TAO Augment main wrapper function."""
try:
is_kitti = cfg.data.dataset_type.lower() == 'kitti'
refine_box_enabled = cfg.spatial_aug.rotation.refine_box.enabled
if refine_box_enabled:
gt_cache = cfg.spatial_aug.rotation.refine_box.gt_cache
check_gt_cache(cfg, is_kitti, gt_cache)
# Update Hydra config
cfg.data.dataset_type = 'coco'
cfg.data.ann_path = gt_cache
cfg.data.include_masks = True
from mpi4py import MPI # noqa pylint: disable=C0415
MPI.COMM_WORLD.Barrier() # noqa pylint: disable=I1101
# run augmention
cfg.results_dir = cfg.results_dir or cfg.data.output_dataset
run_augment(cfg)
if is_kitti and refine_box_enabled and MPI_local_rank() == 0:
logger.info("Converting COCO json into KITTI format...")
# convert coco to kitti
convert_coco_to_kitti(
os.path.join(cfg.data.output_dataset, 'labels', 'output.json'),
output_dir=os.path.join(cfg.data.output_dataset, 'labels'),
refine_box=refine_box_enabled
)
logger.info("KITTI conversion is complete.")
except KeyboardInterrupt:
logger.info("Interrupting augmentation.")
sys.exit()
except RuntimeError as e:
logger.info(f"Augmentation run failed with error: {e}")
sys.exit()
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/augment/scripts/generate.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Offline augmentation entrypoint."""
| tao_dataset_suite-main | nvidia_tao_ds/augment/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Augmentation entrypoint."""
| tao_dataset_suite-main | nvidia_tao_ds/augment/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define entrypoint to run tasks for augmentation."""
import argparse
import importlib
import os
import pkgutil
import subprocess
import sys
from time import time
from nvidia_tao_ds.augment import scripts
from nvidia_tao_ds.core.telemetry.nvml_utils import get_device_details
from nvidia_tao_ds.core.telemetry.telemetry import send_telemetry_data
def get_subtasks(package):
"""Get supported subtasks for a given task.
This function lists out the tasks in in the .scripts folder.
Returns:
subtasks (dict): Dictionary of files.
"""
module_path = package.__path__
modules = {}
# Collect modules dynamically.
for _, task, is_package in pkgutil.walk_packages(module_path):
if is_package:
continue
module_name = package.__name__ + '.' + task
module_details = {
"module_name": module_name,
"runner_path": os.path.abspath(importlib.import_module(module_name).__file__),
}
modules[task] = module_details
return modules
def launch(parser, subtasks, multigpu_support=['generate'], task="tao_ds"):
"""CLI function that executes subtasks.
Args:
parser: Created parser object for a given task.
subtasks: list of subtasks for a given task.
"""
# Subtasks for a given model.
parser.add_argument(
'subtask', default='generate', choices=subtasks.keys(), help="Subtask for augmentation.",
)
# Add standard TAO arguments.
parser.add_argument(
"-e",
"--experiment_spec_file",
help="Path to the experiment spec file.",
required=True)
parser.add_argument(
"--gpu_ids",
help="GPU index to use.",
type=str,
default=None
)
parser.add_argument(
"--num_gpus",
help="Number of GPUs to use.",
type=int,
default=1
)
parser.add_argument(
"-o",
"--output_specs_dir",
help="Path to a target folder where experiment spec files will be downloaded.",
default=None
)
parser.add_argument(
"--mpirun_arg",
type=str,
default="-x NCCL_IB_HCA=mlx5_4,mlx5_6,mlx5_8,mlx5_10 -x NCCL_SOCKET_IFNAME=^lo,docker",
help="Arguments for the mpirun command to run multi-node."
)
parser.add_argument(
"--launch_cuda_blocking",
action="store_true",
default=False,
help="Debug flag to add CUDA_LAUNCH_BLOCKING=1 to the command calls."
)
# Parse the arguments.
args, unknown_args = parser.parse_known_args()
script_args = ""
# Process spec file for all commands except the one for getting spec files ;)
if args.subtask not in ["download_specs", "pitch_stats"]:
# Make sure the user provides spec file.
if args.experiment_spec_file is None:
print(f"ERROR: The subtask `{args.subtask}` requires the following argument: -e/--experiment_spec_file")
sys.exit()
# Make sure the file exists!
if not os.path.exists(args.experiment_spec_file):
print(f"ERROR: The indicated experiment spec file `{args.experiment_spec_file}` doesn't exist!")
sys.exit()
# Split spec file_path into config path and config name.
path, name = os.path.split(args.experiment_spec_file)
if path != '':
script_args += " --config-path " + os.path.realpath(path)
script_args += " --config-name " + name
# Find relevant module and pass args.
mpi_command = ""
gpu_ids = args.gpu_ids
num_gpus = args.num_gpus
if gpu_ids is None:
gpu_ids = range(num_gpus)
else:
gpu_ids = eval(args.gpu_ids)
num_gpus = len(gpu_ids)
launch_cuda_blocking = args.launch_cuda_blocking
assert num_gpus > 0, "At least 1 GPU required to run any task."
if num_gpus > 1:
if args.subtask not in multigpu_support:
raise NotImplementedError(
f"This {args['subtask']} doesn't support multiGPU. Please set --num_gpus 1"
)
mpi_command = f'mpirun -np {num_gpus} --oversubscribe --bind-to none --allow-run-as-root -mca pml ob1 -mca btl ^openib'
if args.subtask in multigpu_support:
if not args.gpu_ids:
script_args += f" gpu_ids=[{','.join([str(i) for i in range(num_gpus)])}]"
script_args += f" num_gpus={num_gpus}"
else:
script_args += f" gpu_ids=[{','.join([str(i) for i in (eval(args.gpu_ids))])}]"
script_args += f" num_gpus={len(gpu_ids)}"
script = subtasks[args.subtask]["runner_path"]
# Pass unknown args to call
unknown_args_as_str = " ".join(unknown_args)
task_command = f"python {script} {script_args} {unknown_args_as_str}"
env_variables = ""
env_variables += set_gpu_info_single_node(num_gpus, gpu_ids)
if launch_cuda_blocking:
task_command = f"CUDA_LAUNCH_BLOCKING=1 {task_command}"
run_command = f"{mpi_command} bash -c \'{env_variables} {task_command}\'"
start = time()
process_passed = True
try:
subprocess.check_call(
run_command,
shell=True,
stdout=sys.stdout,
stderr=sys.stderr
)
except (KeyboardInterrupt, SystemExit):
print("Command was interrupted.")
except subprocess.CalledProcessError as e:
if e.output is not None:
print(f"TAO Toolkit task: {args['subtask']} failed with error:\n{e.output}")
process_passed = False
end = time()
time_lapsed = int(end - start)
try:
gpu_data = []
for device in get_device_details():
gpu_data.append(device.get_config())
print("Sending telemetry data.")
send_telemetry_data(
task,
args.subtask,
gpu_data,
num_gpus=num_gpus,
time_lapsed=time_lapsed,
pass_status=process_passed
)
except Exception as e:
print("Telemetry data couldn't be sent, but the command ran successfully.")
print(f"[Error]: {e}")
pass
if not process_passed:
print("Execution status: FAIL")
sys.exit(1) # returning non zero return code from the process.
print("Execution status: PASS")
def check_valid_gpus(num_gpus, gpu_ids):
"""Check if the number of GPU's called and IDs are valid.
This function scans the machine using the nvidia-smi routine to find the
number of GPU's and matches the id's and num_gpu's accordingly.
Once validated, it finally also sets the CUDA_VISIBLE_DEVICES env variable.
Args:
num_gpus (int): Number of GPUs alloted by the user for the job.
gpu_ids (list(int)): List of GPU indices used by the user.
Returns:
No explicit returns
"""
# Ensure the gpu_ids are all different, and sorted
gpu_ids = sorted(list(set(gpu_ids)))
assert num_gpus > 0, "At least 1 GPU required to run any task."
num_gpus_available = str(subprocess.check_output(["nvidia-smi", "-L"])).count("UUID")
max_id = max(gpu_ids)
assert min(gpu_ids) >= 0, (
"GPU ids cannot be negative."
)
assert len(gpu_ids) == num_gpus, (
f"The number of GPUs ({gpu_ids}) must be the same as the number of GPU indices"
f" ({num_gpus}) provided."
)
assert max_id < num_gpus_available and num_gpus <= num_gpus_available, (
"Checking for valid GPU ids and num_gpus."
)
cuda_visible_devices = ",".join([str(idx) for idx in gpu_ids])
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_visible_devices
def set_gpu_info_single_node(num_gpus, gpu_ids):
"""Set gpu environment variable for single node."""
check_valid_gpus(num_gpus, gpu_ids)
env_variable = ""
visible_devices = os.getenv("CUDA_VISIBLE_DEVICES", None)
if visible_devices is not None:
env_variable = f" CUDA_VISIBLE_DEVICES={visible_devices}"
return env_variable
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"augmentation",
add_help=True,
description="TAO Toolkit entrypoint for MAL"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(
parser, subtasks, task="augment"
)
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/augment/entrypoint/augment.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloader module for TAO augment."""
| tao_dataset_suite-main | nvidia_tao_ds/augment/dataloader/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""kitti loader."""
import os
import numpy as np
from pycocotools import mask
from pycocotools.coco import COCO
from nvidia_tao_ds.augment.utils.file_handlers import load_file
class CocoInputCallable:
"""KITTI loader for DALI pipeline."""
def __init__(self, image_dir, annotation_path, batch_size,
include_masks=False, shard_id=0, num_shards=1):
"""Init."""
self.image_dir = image_dir
self.coco = COCO(annotation_path)
self.include_masks = include_masks
self.batch_size = batch_size
self.shard_id = shard_id
self.num_shards = num_shards
self.samples_per_iter = batch_size * num_shards
self.size = (self.samples_per_iter - 1 + len(self.coco.dataset['images'])) // self.samples_per_iter * self.samples_per_iter
self.shard_size = self.size // num_shards
self.shard_offset = self.shard_size * shard_id
self.full_iterations = self.shard_size // batch_size
def __call__(self, sample_info):
"""Call."""
if sample_info.iteration >= self.full_iterations:
# Indicate end of the epoch
raise StopIteration
sample_idx = sample_info.idx_in_epoch + self.shard_offset
sample_idx = min(sample_idx, len(self.coco.dataset['images']) - 1)
image = self.coco.dataset['images'][sample_idx]
image_path = os.path.join(self.image_dir, image['file_name'])
image_id = image['id']
ann_ids = self.coco.getAnnIds(imgIds=image_id)
boxes, masks = self._get_boxes(ann_ids, image['height'], image['width'])
encoded_img = load_file(image_path)
if self.include_masks:
masks = np.transpose(masks, (1, 2, 0))
return encoded_img, boxes, np.array([image_id], dtype=np.int32), np.uint8(masks)
def _get_boxes(self, ann_ids, image_height, image_width):
boxes = []
masks = []
for ann_id in ann_ids:
ann = self.coco.loadAnns(ann_id)[0]
boxes.append(ann['bbox'])
if self.include_masks:
if 'segmentation' not in ann:
raise ValueError(
f"segmentation groundtruth is missing in object: {ann}.")
# pylygon (e.g. [[289.74,443.39,302.29,445.32, ...], [1,2,3,4]])
if isinstance(ann['segmentation'], list):
rles = mask.frPyObjects(ann['segmentation'],
image_height, image_width)
rle = mask.merge(rles)
elif 'counts' in ann['segmentation']:
# e.g. {'counts': [6, 1, 40, 4, 5, 4, 5, 4, 21], 'size': [9, 10]}
if isinstance(ann['segmentation']['counts'], list):
rle = mask.frPyObjects(ann['segmentation'],
image_height, image_width)
else:
rle = ann['segmentation']
else:
raise ValueError('Please check the segmentation format.')
binary_mask = mask.decode(rle)
masks.append(binary_mask)
else:
masks.append(np.zeros((1, 1)))
return np.float32(boxes), np.uint8(masks)
| tao_dataset_suite-main | nvidia_tao_ds/augment/dataloader/coco_callable.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""kitti loader."""
import os
import numpy as np
from nvidia_tao_ds.augment.utils import kitti
from nvidia_tao_ds.augment.utils.file_handlers import load_file
from nvidia_tao_ds.augment.utils.helper import encode_str
class KittiInputCallable:
"""KITTI loader for DALI pipeline."""
def __init__(self, image_dir, label_dir, batch_size,
include_masks=False, shard_id=0, num_shards=1):
"""Init."""
assert os.path.isdir(label_dir)
self.include_masks = include_masks
self.image_paths, self.label_paths = kitti.list_files(image_dir, label_dir)
assert len(self.image_paths) == len(self.label_paths)
self.labels = [kitti.parse_label_file(lbl) for lbl in self.label_paths]
self.batch_size = batch_size
self.shard_id = shard_id
self.num_shards = num_shards
self.samples_per_iter = batch_size * num_shards
self.size = (self.samples_per_iter - 1 + len(self.image_paths)) // self.samples_per_iter * self.samples_per_iter
self.shard_size = self.size // num_shards
self.shard_offset = self.shard_size * shard_id
self.full_iterations = self.shard_size // batch_size
def __call__(self, sample_info):
"""Call."""
if sample_info.iteration >= self.full_iterations:
# Indicate end of the epoch
raise StopIteration
sample_idx = sample_info.idx_in_epoch + self.shard_offset
sample_idx = min(sample_idx, len(self.image_paths) - 1)
image_path = self.image_paths[sample_idx]
label_path = self.label_paths[sample_idx]
label = self.labels[sample_idx]
boxes = []
for annotation in label:
boxes.append(annotation.box)
boxes = np.float32(boxes)
encoded_img = load_file(image_path)
return encoded_img, boxes, np.uint8(encode_str(image_path)), np.uint8(encode_str(label_path))
| tao_dataset_suite-main | nvidia_tao_ds/augment/dataloader/kitti_callable.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert COCO annotations to KITTI format"""
import os
import numpy as np
from pycocotools.coco import COCO
from pycocotools import mask
def find_corners_2d(np_array):
"""Find corners in a 2D binary mask."""
result = np.where(np_array == np.amax(np_array))
x1 = np.min(result[0])
x2 = np.max(result[0])
y1 = np.min(result[1])
y2 = np.max(result[1])
return y1, x1, y2, x2
def convert_coco_to_kitti(annotations_file, output_dir, refine_box=False):
"""Function to convert COCO annotations to KITTI format.
Args:
annotations_file (string): Path to the COCO annotation file.
output_dir (string): Directory to output the KITTI files
"""
if not os.path.isfile(annotations_file):
print("Annotation file doenot exsit. Please check the path")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
coco = COCO(annotations_file)
category_ids = coco.getCatIds()
categories = coco.loadCats(category_ids)
category_id_dict = {}
for category in categories:
category_id_dict[category['id']] = category['name']
for img, ann in coco.imgs.items():
annotation_ids = coco.getAnnIds(imgIds=[img], catIds=category_ids)
if len(annotation_ids) > 0:
img_fname = ann['file_name']
label_fname = img_fname.split('.')[0]
with open(os.path.join(output_dir, f'{label_fname}.txt'), 'w', encoding='utf-8') as label_file:
annotations = coco.loadAnns(annotation_ids)
for annotation in annotations:
bbox = annotation['bbox']
bbox = [bbox[0], bbox[1], bbox[2] + bbox[0], bbox[3] + bbox[1]]
if refine_box and annotation.get('segmentation', None):
rle = annotation['segmentation']
binary_mask = mask.decode(rle)
bbox = find_corners_2d(binary_mask)
bbox = [str(b) for b in bbox]
catname = category_id_dict[annotation['category_id']]
out_str = [catname.replace(" ", "") + ' ' + ' '.join(['0'] * 3) + ' ' + ' '.join(list(bbox)) + ' ' + ' '.join(['0'] * 7) + '\n']
label_file.write(out_str[0])
| tao_dataset_suite-main | nvidia_tao_ds/annotations/coco_to_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert KITTI labels to COCO format"""
from collections import OrderedDict
import os
from pathlib import Path
import pandas as pd
import numpy as np
import ujson
import yaml
from tqdm.auto import tqdm
import csv
import cv2
def read_kitti_labels(label_file):
"""
Utility function to read a KITTI label file
Args:
label_file (string): Label file path
Returns:
label_list (list): List of labels
"""
label_list = []
if not os.path.exists(label_file):
raise ValueError(f"Labelfile : {label_file} does not exist")
with open(label_file, 'r', encoding='utf-8') as lf:
for row in csv.reader(lf, delimiter=' '):
label_list.append(row)
lf.closed
return label_list
def check_bbox_coordinates(coord, img_h, img_w):
"""
Utility function to validate the bounding box coordinates
Args:
coord (tuple): Bounding box coordinates in KITTI format
img_h (int): Image height
img_w (int): Image widith
label_file (string): Label file path
Returns:
Bounding box coordinates
"""
"Checks coordinates."
x1, y1, x2, y2 = coord
x1 = min(max(x1, 0), img_w)
x2 = min(max(x2, 0), img_w)
y1 = min(max(y1, 0), img_h)
y2 = min(max(y2, 0), img_h)
if x2 > x1 and y2 > y1:
return [x1, y1, x2, y2]
return None
def convert_xyxy_to_xywh(coord):
"""
Utility function to convert bounding box coordinates from KITTI format to COCO
Args:
coord (tuple): Bounding box coordinates in KITTI format
img_h (int): Image height
img_w (int): Image widith
label_file (string): Label file path
Returns:
Bounding box coordinates in COCO format
"""
"Checks coordinates."
x1, y1, x2, y2 = coord
w, h = x2 - x1, y2 - y1
return [x1, y1, w, h]
def get_categories(cat_map):
"""
Function to convert the category map to COCO annotation format
Args:
cat_map (dictionary): Category map
Returns:
categories_list (list): COCO annotation format of the category map
"""
categories_list = []
for i, class_name in enumerate(cat_map):
category = {
'id': i + 1,
'name': class_name
}
categories_list.append(category)
return categories_list
def construct_category_map(label_dir, mapping=None):
"""
Function to create a category map for the given dataset
Args:
label_dir (str): Label directory
mapping (str): Mapping file
Returns:
cat_map (dictionary): Category mapping
"""
cat_map = OrderedDict()
if mapping is not None:
with open(mapping, "r", encoding='utf-8') as f:
try:
cat_map_list = yaml.safe_load(f)
except yaml.YAMLError as e:
print(e)
for i in cat_map_list:
k, v = list(i.items())[0]
cat_map[k] = v
else:
for img in os.listdir(label_dir):
labels = read_kitti_labels(os.path.join(label_dir, f"{img[:-4]}.txt"))
df = pd.DataFrame(labels)
for _, row_p in df.iterrows():
if row_p[0] not in cat_map:
cat_map[row_p[0]] = [row_p[0]]
return cat_map
def convert_kitti_to_coco(img_dir, label_dir, output_dir, mapping=None, name=None):
"""Function to convert KITTI annotations to COCO format.
Args:
img_dir (string): Directory containing the images.
label_dir (string): Directory containing the KITTI labels
output_dir (string): Directory to output the COCO annotation file
"""
annot_list, img_list, skipped_list = [], [], []
img_id, obj_id = 0, 0
img_dir = str(Path(img_dir))
label_dir = str(Path(label_dir))
project = name or img_dir.split('/')[-2]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cat_map = construct_category_map(label_dir, mapping)
categories = get_categories(cat_map)
labels2cat = {label: k for k, v in cat_map.items() for label in v}
cat2index = {c['name']: c['id'] for c in categories}
print("category to id mapping:")
print("*********************")
print(cat2index)
print("*********************")
for img in tqdm(os.listdir(img_dir)):
if str(Path(img).suffix).lower() not in [".jpg", ".png", ".jpeg"]:
continue
labels = os.path.join(label_dir, f"{img[:-4]}.txt")
img_shape = cv2.imread(os.path.join(img_dir, img)).shape
height, width = img_shape[0], img_shape[1]
# update image list
img_id += 1
img_dict = {
"file_name": img,
"scene_id": project,
"height": height,
"width": width,
"id": img_id
}
img_list.append(img_dict)
# process labels
bboxes = read_kitti_labels(labels)
df = pd.DataFrame(bboxes)
df = df.drop_duplicates()
# update annotation list
include_image = False
for _, row_p in df.iterrows():
mapped = labels2cat.get(row_p[0], None)
if not mapped:
continue
bbox = np.array(row_p[4:8])
bbox = bbox.astype(float)
coord = check_bbox_coordinates(bbox.tolist(), height, width)
if not coord:
continue
include_image = True
coord = convert_xyxy_to_xywh(coord)
area = coord[2] * coord[3]
obj_id += 1
annot_dict = {
"bbox": coord,
"image_id": img_id,
"scene_id": project,
"iscrowd": 0,
"area": area,
"category_id": cat2index[mapped],
"id": obj_id
}
annot_list.append(annot_dict)
if not include_image:
img_skipped = img_list.pop()
skipped_list.append(img_skipped['file_name'])
final_dict = {
"annotations": annot_list,
"images": img_list,
"categories": categories
}
save_path = os.path.join(output_dir, f"{project}.json")
with open(save_path, "w", encoding='utf-8') as f:
ujson.dump(final_dict, f)
if skipped_list:
with open(os.path.join(output_dir, 'skipped_files.txt'), 'w', encoding='utf-8') as g:
g.write('\n'.join(str(fname) for fname in skipped_list))
| tao_dataset_suite-main | nvidia_tao_ds/annotations/kitti_to_coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data format conversion module"""
| tao_dataset_suite-main | nvidia_tao_ds/annotations/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data conversion config module."""
| tao_dataset_suite-main | nvidia_tao_ds/annotations/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Default config file."""
from dataclasses import dataclass
from omegaconf import MISSING
from typing import Optional
@dataclass
class DataConfig:
"""Dataset configuration template."""
input_format: str = "KITTI"
output_format: str = "COCO"
output_dir: str = MISSING
@dataclass
class KITTIConfig:
"""Dataset configuration template."""
image_dir: str = MISSING
label_dir: str = MISSING
project: Optional[str] = None
mapping: Optional[str] = None
@dataclass
class COCOConfig:
"""Dataset configuration template."""
ann_file: str = MISSING
@dataclass
class ExperimentConfig:
"""Experiment configuration template."""
data: DataConfig = DataConfig()
kitti: KITTIConfig = KITTIConfig()
coco: COCOConfig = COCOConfig()
results_dir: Optional[str] = None
| tao_dataset_suite-main | nvidia_tao_ds/annotations/config/default_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint script to run TAO data format conversion."""
import os
import sys
from nvidia_tao_ds.annotations.coco_to_kitti import convert_coco_to_kitti
from nvidia_tao_ds.annotations.config.default_config import ExperimentConfig
from nvidia_tao_ds.annotations.kitti_to_coco import convert_kitti_to_coco
from nvidia_tao_ds.core.decorators import monitor_status
from nvidia_tao_ds.core.hydra.hydra_runner import hydra_runner
@hydra_runner(
config_path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "../experiment_specs"),
config_name="annotations", schema=ExperimentConfig
)
def main(cfg: ExperimentConfig) -> None:
"""Wrapper function for format conversion."""
cfg.results_dir = cfg.results_dir or cfg.data.output_dir
run_conversion(cfg=cfg)
@monitor_status(mode='Annotation conversion')
def run_conversion(cfg: ExperimentConfig):
"""TAO annotation convert wrapper."""
try:
if cfg.data.input_format == "KITTI" and cfg.data.output_format == "COCO":
convert_kitti_to_coco(
cfg.kitti.image_dir,
cfg.kitti.label_dir,
cfg.data.output_dir,
cfg.kitti.mapping,
cfg.kitti.project)
elif cfg.data.input_format == "COCO" and cfg.data.output_format == "KITTI":
convert_coco_to_kitti(
cfg.coco.ann_file,
cfg.data.output_dir)
else:
print("Unsupported format")
except KeyboardInterrupt as e:
print(f"Interrupting data conversion with error: {e}")
sys.exit()
except RuntimeError as e:
print(f"Data conversion run failed with error: {e}")
sys.exit()
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/annotations/scripts/convert.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data format conversion scripts"""
| tao_dataset_suite-main | nvidia_tao_ds/annotations/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data format conversion entrypoint"""
| tao_dataset_suite-main | nvidia_tao_ds/annotations/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define entrypoint to run tasks for annotation."""
import argparse
from nvidia_tao_ds.core.entrypoint.entrypoint import get_subtasks, launch
from nvidia_tao_ds.annotations import scripts
def main():
"""Main entrypoint wrapper."""
# Create parser for a given task.
parser = argparse.ArgumentParser(
"annotation",
add_help=True,
description="Annotation entrypoint"
)
# Build list of subtasks by inspecting the scripts package.
subtasks = get_subtasks(scripts)
# Parse the arguments and launch the subtask.
launch(
parser, subtasks, task="annotation"
)
if __name__ == '__main__':
main()
| tao_dataset_suite-main | nvidia_tao_ds/annotations/entrypoint/annotations.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
""" ConvNeXt
Paper: `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
Original code and weights from https://github.com/facebookresearch/ConvNeXt, original copyright below
Modifications and additions for timm hacked together by / Copyright 2022, Ross Wightman
"""
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.fx_features import register_notrace_module
from timm.models.helpers import named_apply, build_model_with_cfg
from timm.models.layers import trunc_normal_, ClassifierHead, SelectAdaptivePool2d, DropPath, Mlp
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = dict(
convnext_tiny=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth"),
convnext_small=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth"),
convnext_base=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth"),
convnext_large=_cfg(url="https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth"),
convnext_tiny_hnf=_cfg(url=''),
convnext_base_in22k=_cfg(
# url="https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth", num_classes=80),
url="pretrained/convnext_base_22k_224.pth", num_classes=80),
convnext_large_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth", num_classes=80),
convnext_xlarge_in22k=_cfg(
url="https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth", num_classes=80),
)
def _is_contiguous(tensor: torch.Tensor) -> bool:
"""Check if the tensor is continguous for torch jit script purpose"""
# jit is oh so lovely :/
# if torch.jit.is_tracing():
# return True
if torch.jit.is_scripting():
return tensor.is_contiguous()
return tensor.is_contiguous(memory_format=torch.contiguous_format)
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims"""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):
"""Initialize the ConvMlp Class.
Args:
in_features: number of input features
hidden_feautres: number of hidden features
out_features: number of output features
act_layer: activation layer class to be used
norm_layer: normalization layer class to be used
drop: dropout probability
"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
"""Forward function"""
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
@register_notrace_module
class LayerNorm2d(nn.LayerNorm):
""" LayerNorm for channels_first tensors with 2d spatial dimensions (ie N, C, H, W)."""
def __init__(self, normalized_shape, eps=1e-6):
"""Initialize the Layernorm2d class.
Args:
normalized_shape: shape to be normalized to
eps: epsilon value for numerically stability
"""
super().__init__(normalized_shape, eps=eps)
def forward(self, x) -> torch.Tensor:
"""Forward function."""
if _is_contiguous(x):
return F.layer_norm(
x.permute(0, 2, 3, 1), self.normalized_shape, self.weight, self.bias, self.eps).permute(0, 3, 1, 2)
s, u = torch.var_mean(x, dim=1, keepdim=True)
x = (x - u) * torch.rsqrt(s + self.eps)
x = x * self.weight[:, None, None] + self.bias[:, None, None]
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
Unlike the official impl, this one allows choice of 1 or 2, 1x1 conv can be faster with appropriate
choice of LayerNorm impl, however as model size increases the tradeoffs appear to change and nn.Linear
is a better choice. This was observed with PyTorch 1.10 on 3090 GPU, it could change over time & w/ different HW.
"""
def __init__(self, dim, drop_path=0., ls_init_value=1e-6, conv_mlp=True, mlp_ratio=4, norm_layer=None):
"""Initialize ConvNext Block.
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
super().__init__()
if not norm_layer:
norm_layer = partial(LayerNorm2d, eps=1e-6) if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
mlp_layer = ConvMlp if conv_mlp else Mlp
self.use_conv_mlp = conv_mlp
self.conv_dw = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
self.norm = norm_layer(dim)
self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=nn.GELU)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value > 0 else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
"""Forward function."""
shortcut = x
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = x.permute(0, 3, 1, 2)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + shortcut
return x
class ConvNeXtStage(nn.Module):
"""ConvNeXt Stage."""
def __init__(
self, in_chs, out_chs, stride=2, depth=2, dp_rates=None, ls_init_value=1.0, conv_mlp=True,
norm_layer=None, cl_norm_layer=None, no_downsample=False):
"""Initialize ConvNext Stage.
Args:
in_chs (int): Number of input channels.
out_chs (int): Number of output channels.
"""
super().__init__()
if in_chs != out_chs or stride > 1:
self.downsample = nn.Sequential(
norm_layer(in_chs),
nn.Conv2d(in_chs, out_chs, kernel_size=stride, stride=stride if not no_downsample else 1),
)
else:
self.downsample = nn.Identity()
dp_rates = dp_rates or [0.] * depth
self.blocks = nn.Sequential(*[ConvNeXtBlock(
dim=out_chs, drop_path=dp_rates[j], ls_init_value=ls_init_value, conv_mlp=conv_mlp,
norm_layer=norm_layer if conv_mlp else cl_norm_layer)
for j in range(depth)]
)
def forward(self, x):
"""Forward function."""
x = self.downsample(x)
x = self.blocks(x)
return x
class ConvNeXt(nn.Module):
"""A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf"""
def __init__(
self, in_chans=3, num_classes=1000, global_pool='avg', output_stride=32, patch_size=4,
depths=(3, 3, 9, 3), dims=(96, 192, 384, 768), ls_init_value=1e-6, conv_mlp=True, use_head=True,
head_init_scale=1., head_norm_first=False, norm_layer=None, drop_rate=0., drop_path_rate=0.,
remove_last_downsample=False
):
""" Initialize the ConvNext Class
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (tuple(int)): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_rate (float): Head dropout rate
drop_path_rate (float): Stochastic depth rate. Default: 0.
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
super().__init__()
assert output_stride == 32
if norm_layer is None:
norm_layer = partial(LayerNorm2d, eps=1e-6)
cl_norm_layer = norm_layer if conv_mlp else partial(nn.LayerNorm, eps=1e-6)
else:
assert conv_mlp,\
'If a norm_layer is specified, conv MLP must be used so all norm expect rank-4, channels-first input'
cl_norm_layer = norm_layer
self.num_classes = num_classes
self.drop_rate = drop_rate
self.feature_info = []
# NOTE: this stem is a minimal form of ViT PatchEmbed, as used in SwinTransformer w/ patch_size = 4
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=patch_size, stride=patch_size),
norm_layer(dims[0])
)
self.stages = nn.Sequential()
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
curr_stride = patch_size
prev_chs = dims[0]
stages = []
# 4 feature resolution stages, each consisting of multiple residual blocks
for i in range(len(depths)):
stride = 2 if i > 0 else 1
# FIXME support dilation / output_stride
curr_stride *= stride
out_chs = dims[i]
no_downsample = remove_last_downsample and (i == len(depths) - 1)
stages.append(ConvNeXtStage(
prev_chs, out_chs, stride=stride,
depth=depths[i], dp_rates=dp_rates[i], ls_init_value=ls_init_value, conv_mlp=conv_mlp,
norm_layer=norm_layer, cl_norm_layer=cl_norm_layer, no_downsample=no_downsample)
)
prev_chs = out_chs
# NOTE feature_info use currently assumes stage 0 == stride 1, rest are stride 2
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.num_features = prev_chs
if head_norm_first:
# norm -> global pool -> fc ordering, like most other nets (not compat with FB weights)
self.norm_pre = norm_layer(self.num_features) # final norm layer, before pooling
if use_head:
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
else:
# pool -> norm -> fc, the default ConvNeXt ordering (pretrained FB weights)
self.norm_pre = nn.Identity()
if use_head:
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', norm_layer(self.num_features)),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
named_apply(partial(_init_weights, head_init_scale=head_init_scale), self)
def get_classifier(self):
"""Returns classifier of ConvNeXt"""
return self.head.fc
def reset_classifier(self, num_classes=0, global_pool='avg'):
"""Redefine the classification head"""
if isinstance(self.head, ClassifierHead):
# norm -> global pool -> fc
self.head = ClassifierHead(
self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
else:
# pool -> norm -> fc
self.head = nn.Sequential(OrderedDict([
('global_pool', SelectAdaptivePool2d(pool_type=global_pool)),
('norm', self.head.norm),
('flatten', nn.Flatten(1) if global_pool else nn.Identity()),
('drop', nn.Dropout(self.drop_rate)),
('fc', nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity())
]))
def forward_features(self, x, return_feat=False):
"""Extract features"""
x = self.stem(x)
out_list = []
for i in range(len(self.stages)):
x = self.stages[i](x)
out_list.append(x)
x = self.norm_pre(x)
return x, out_list if return_feat else x
def forward(self, x):
"""Forward function"""
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module, name=None, head_init_scale=1.0):
"""Initialize weights"""
if isinstance(module, nn.Conv2d):
trunc_normal_(module.weight, std=.02)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
nn.init.constant_(module.bias, 0)
if name and 'head.' in name:
module.weight.data.mul_(head_init_scale)
module.bias.data.mul_(head_init_scale)
def checkpoint_filter_fn(state_dict, model):
""" Remap FB checkpoints -> timm """
if 'model' in state_dict:
state_dict = state_dict['model']
out_dict = {}
import re
for k, v in state_dict.items():
k = k.replace('downsample_layers.0.', 'stem.')
k = re.sub(r'stages.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = re.sub(r'downsample_layers.([0-9]+).([0-9]+)', r'stages.\1.downsample.\2', k)
k = k.replace('dwconv', 'conv_dw')
k = k.replace('pwconv', 'mlp.fc')
k = k.replace('head.', 'head.fc.')
if k in model.state_dict().keys():
if k.startswith('norm.'):
k = k.replace('norm', 'head.norm')
if v.ndim == 2 and 'head' not in k:
model_shape = model.state_dict()[k].shape
v = v.reshape(model_shape)
out_dict[k] = v
return out_dict
def _create_hybrid_backbone(variant='convnext_base_in22k', pretrained=False, **kwargs):
"""Create ConvNeXt hybrid backbone for FAN"""
model = build_model_with_cfg(
ConvNeXt, variant, pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs)
return model
| tao_dataset_suite-main | nvidia_tao_ds/backbone/convnext_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://github.com/NVlabs/FAN/blob/main/LICENSE
""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`
- https://arxiv.org/pdf/2103.14030
Code/weights from https://github.com/microsoft/Swin-Transformer, original copyright/license info below
Modifications and additions for timm hacked together by / Copyright 2021, Ross Wightman
"""
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
#
# Modified by: Daquan Zhou
# --------------------------------------------------------
import math
from copy import deepcopy
from typing import Optional
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.fx_features import register_notrace_function
from timm.models.helpers import build_model_with_cfg
from timm.models.layers import PatchEmbed, Mlp
from timm.models.layers import _assert
from timm.models.vision_transformer import checkpoint_filter_fn, init_weights_vit_timm
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
def overlay_external_default_cfg(default_cfg, kwargs):
"""Overlay 'external_default_cfg' in kwargs on top of default_cfg arg."""
external_default_cfg = kwargs.pop('external_default_cfg', None)
if external_default_cfg:
default_cfg.pop('url', None) # url should come from external cfg
default_cfg.pop('hf_hub', None) # hf hub id should come from external cfg
default_cfg.update(external_default_cfg)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
# patch models (my experiments)
'swin_base_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_base_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth',
),
'swin_large_patch4_window12_384': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'swin_large_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth',
),
'swin_small_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth',
),
'swin_tiny_patch4_window7_224': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth',
),
'swin_base_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_base_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth',
num_classes=21841),
'swin_large_patch4_window12_384_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth',
input_size=(3, 384, 384), crop_pct=1.0, num_classes=21841),
'swin_large_patch4_window7_224_in22k': _cfg(
url='https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth',
num_classes=21841),
}
def window_partition(x, window_size: int):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: int, H: int, W: int):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.):
"""Initialize WindowAttention class"""
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask: Optional[torch.Tensor] = None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class PositionalEncodingFourier(nn.Module):
"""
Positional encoding relying on a fourier kernel matching the one used in the "Attention is all of Need" paper.
Based on the official XCiT code
- https://github.com/facebookresearch/xcit/blob/master/xcit.py
"""
def __init__(self, hidden_dim=32, dim=768, temperature=10000):
"""Initialize PositionalEncodingFourier class"""
super().__init__()
self.token_projection = nn.Conv2d(hidden_dim * 2, dim, kernel_size=1)
self.scale = 2 * math.pi
self.temperature = temperature
self.hidden_dim = hidden_dim
self.dim = dim
self.eps = 1e-6
def forward(self, B: int, H: int, W: int):
"""Forward function"""
device = self.token_projection.weight.device
y_embed = torch.arange(1, H + 1, dtype=torch.float32, device=device).unsqueeze(1).repeat(1, 1, W)
x_embed = torch.arange(1, W + 1, dtype=torch.float32, device=device).repeat(1, H, 1)
y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(self.hidden_dim, dtype=torch.float32, device=device)
dim_t = self.temperature ** (2 * torch.div(dim_t, 2, rounding_mode='floor') / self.hidden_dim)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack([pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos_y = torch.stack([pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
pos = self.token_projection(pos)
return pos.repeat(B, 1, 1, 1) # (B, C, H, W)
class FANMlp(nn.Module):
"""FANMlp"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False):
"""Initialize FANMlp"""
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.linear = linear
if self.linear:
self.relu = nn.ReLU(inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
"""Forward function"""
x = self.fc1(x)
if self.linear:
x = self.relu(x)
x = self.drop(self.gamma * self.dwconv(x, H, W)) # + x
x = self.fc2(x)
x = self.drop(x)
return x
class DWConv(nn.Module):
"""Depth-wise convolution"""
def __init__(self, in_features, out_features=None, act_layer=nn.GELU, kernel_size=3):
"""Initialize DWConv class"""
super().__init__()
out_features = out_features or in_features
padding = kernel_size // 2
self.conv1 = torch.nn.Conv2d(
in_features, in_features, kernel_size=kernel_size, padding=padding, groups=in_features)
self.act = act_layer()
self.bn = nn.BatchNorm2d(in_features)
self.conv2 = torch.nn.Conv2d(
in_features, out_features, kernel_size=kernel_size, padding=padding, groups=out_features)
def forward(self, x, H: int, W: int):
"""Forward function"""
B, N, C = x.shape
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.conv1(x)
x = self.act(x)
x = self.bn(x)
x = self.conv2(x)
x = x.reshape(B, C, N).permute(0, 2, 1)
return x
class ChannelProcessing(nn.Module):
"""Channel Processing"""
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
sr_ratio=1, linear=False, drop_path=0.,
mlp_hidden_dim=None, act_layer=nn.GELU, drop=0., norm_layer=nn.LayerNorm, cha_sr_ratio=1, c_head_num=None):
"""Initialize ChannelProcessing class"""
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
num_heads = c_head_num or num_heads
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.cha_sr_ratio = cha_sr_ratio if num_heads > 1 else 1
# config of mlp for v processing
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp_v = FANMlp(in_features=dim // self.cha_sr_ratio, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, linear=linear)
self.norm_v = norm_layer(dim // self.cha_sr_ratio)
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.linear = linear
self.sr_ratio = sr_ratio
self.apply(self._init_weights)
def _init_weights(self, m):
"""Initialize weights"""
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def _gen_attn(self, q, k):
"""Returns attention"""
q = q.softmax(-2).transpose(-1, -2)
_, _, N, _ = k.shape
k = torch.nn.functional.adaptive_avg_pool2d(k.softmax(-2), (N, 1))
attn = torch.nn.functional.sigmoid(q @ k)
return attn * self.temperature
def forward(self, x, H, W, atten=None):
"""Forward functions """
B, N, C = x.shape
v = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
k = x.reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
attn = self._gen_attn(q, k)
attn = self.attn_drop(attn)
Bv, Hd, Nv, Cv = v.shape
v = self.norm_v(self.mlp_v(v.transpose(1, 2).reshape(Bv, Nv, Hd * Cv), H, W)).reshape(Bv, Nv, Hd, Cv).transpose(1, 2)
repeat_time = N // attn.shape[-1]
attn = attn.repeat_interleave(repeat_time, dim=-1) if attn.shape[-1] > 1 else attn
x = (attn * v.transpose(-1, -2)).permute(0, 3, 1, 2).reshape(B, N, C)
return x
@torch.jit.ignore
def no_weight_decay(self):
"""layers to ignore for weight decay"""
return {'temperature'}
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block."""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., mlp_type=None,
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
"""Initialize SwinTransformerBlock class
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp_type = mlp_type
if mlp_type == 'Mlp':
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
else:
self.mlp = ChannelProcessing(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
drop_path=drop_path, mlp_hidden_dim=mlp_hidden_dim)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
"""Forward function"""
H, W = self.input_resolution
B, L, C = x.shape
_assert(L == H * W, "input feature has wrong size")
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
if self.mlp_type == 'Mlp':
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class PatchMerging(nn.Module):
r""" Patch Merging Layer."""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
"""Initialize PatchMerging class
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""Forward function
Args:
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
_assert(L == H * W, "input feature has wrong size")
_assert(H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even.")
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
"""exptra representation"""
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
"""returns FLOPs"""
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage."""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., mlp_type=None,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
"""Initialize BasicLayer
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(
dim=dim, input_resolution=input_resolution, num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2, mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, mlp_type=mlp_type,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
"""Forward function"""
for blk in self.blocks:
if not torch.jit.is_scripting() and self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
"""exptra representation"""
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24),
window_size=7, mlp_ratio=4., qkv_bias=True,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, weight_init='', mlp_type='Mlp', **kwargs):
"""Initialize SwinTransformer class
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
self.patch_grid = self.patch_embed.grid_size
# absolute position embedding
if self.ape:
# self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
# trunc_normal_(self.absolute_pos_embed, std=.02)
self.absolute_pos_embed = PositionalEncodingFourier(dim=embed_dim)
else:
self.absolute_pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
layers = []
for i_layer in range(self.num_layers):
layers += [BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
input_resolution=(self.patch_grid[0] // (2 ** i_layer), self.patch_grid[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_type=mlp_type[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint)
]
self.layers = nn.Sequential(*layers)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
assert weight_init in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in weight_init else 0.
if weight_init.startswith('jax'):
for n, m in self.named_modules():
init_weights_vit_timm(m, n, head_bias=head_bias, jax_impl=True) # pylint: disable=E1123
else:
self.apply(init_weights_vit_timm)
@torch.jit.ignore
def no_weight_decay(self):
"""layers to ignore for weight decay"""
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
"""keywords to ignore for weight decay"""
return {'relative_position_bias_table'}
def get_classifier(self):
"""Returns classifier"""
return self.head
def reset_classifier(self, num_classes, global_pool=''):
"""Redefine classifier of FAN"""
self.num_classes = num_classes
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
"""Extract features"""
x = self.patch_embed(x)
B, N, _ = x.shape
H = W = math.sqrt(N)
if self.absolute_pos_embed is not None:
# import pdb; pdb.set_trace()
x = x + self.absolute_pos_embed(B, H, W).reshape(B, -1, x.shape[1]).permute(0, 2, 1)
x = self.pos_drop(x)
x = self.layers(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
"""Forward functions"""
x = self.forward_features(x)
x = self.head(x)
return x
def _create_fan_swin_transformer(variant, pretrained=False, default_cfg=None, **kwargs):
"""Create FAN Swin Transformer backbone"""
if default_cfg is None:
default_cfg = deepcopy(default_cfgs[variant])
overlay_external_default_cfg(default_cfg, kwargs)
default_num_classes = default_cfg['num_classes']
default_img_size = default_cfg['input_size'][-2:]
num_classes = kwargs.pop('num_classes', default_num_classes)
img_size = kwargs.pop('img_size', default_img_size)
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
SwinTransformer, variant, pretrained,
default_cfg=default_cfg,
img_size=img_size,
num_classes=num_classes,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs)
return model
| tao_dataset_suite-main | nvidia_tao_ds/backbone/swin_utils.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.