python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base TensorRT engine calibrator."""
import logging
import os
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
class EngineCalibrator(trt.IInt8EntropyCalibrator2):
"""Implements the INT8 Entropy Calibrator2."""
def __init__(self, cache_file):
"""Init.
Args:
cache_file (str): The location of the cache file.
"""
super().__init__()
self.cache_file = cache_file
self.image_batcher = None
self.batch_allocation = None
self.batch_generator = None
def set_image_batcher(self, image_batcher):
"""Define the image batcher to use, if any.
If using only the cache file, an image batcher doesn't need to be defined.
Args:
image_batcher (obj): The ImageBatcher object
"""
self.image_batcher = image_batcher
size = int(np.dtype(self.image_batcher.dtype).itemsize * np.prod(self.image_batcher.shape))
self.batch_allocation = cuda.mem_alloc(size)
self.batch_generator = self.image_batcher.get_batch()
def get_batch_size(self):
"""Overrides from trt.IInt8EntropyCalibrator2.
Get the batch size to use for calibration.
Returns:
Batch size.
"""
if self.image_batcher:
return self.image_batcher.batch_size
return 1
def get_batch(self, names):
"""Overrides from trt.IInt8EntropyCalibrator2.
Get the next batch to use for calibration, as a list of device memory pointers.
Args:
names (list): The names of the inputs, if useful to define the order of inputs.
Returns:
A list of int-casted memory pointers.
"""
if not self.image_batcher:
return None
try:
batch, _, _ = next(self.batch_generator)
logger.info("Calibrating image %d / %d",
self.image_batcher.image_index, self.image_batcher.num_images)
cuda.memcpy_htod(self.batch_allocation, np.ascontiguousarray(batch))
return [int(self.batch_allocation)]
except StopIteration:
logger.info("Finished calibration batches")
return None
def read_calibration_cache(self):
"""Overrides from trt.IInt8EntropyCalibrator2.
Read the calibration cache file stored on disk, if it exists.
Returns:
The contents of the cache file, if any.
"""
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
logger.info("Using calibration cache file: %s", self.cache_file)
return f.read()
return None
def write_calibration_cache(self, cache):
"""Overrides from trt.IInt8EntropyCalibrator2.
Store the calibration cache to a file on disk.
Args
cache: The contents of the calibration cache to store.
"""
with open(self.cache_file, "wb") as f:
logger.info("Writing calibration cache data to: %s", self.cache_file)
f.write(cache)
| tao_deploy-main | nvidia_tao_deploy/engine/calibrator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base TensorRT engine builder."""
from abc import ABC, abstractmethod
import json
import logging
import os
import struct
import tensorrt as trt
from nvidia_tao_deploy.engine.calibrator import EngineCalibrator
from nvidia_tao_deploy.utils.image_batcher import ImageBatcher
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
DEFAULT_MAX_WORKSPACE_SIZE = 8
DEFAULT_MAX_BATCH_SIZE = 1
DEFAULT_MIN_BATCH_SIZE = 1
DEFAULT_OPT_BATCH_SIZE = 1
NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH = [
int(item) for item
in trt.__version__.split(".")
][:3]
class EngineBuilder(ABC):
"""Parses an ONNX graph and builds a TensorRT engine from it."""
def __init__(self,
batch_size=None,
verbose=False,
max_batch_size=DEFAULT_MAX_BATCH_SIZE,
opt_batch_size=DEFAULT_OPT_BATCH_SIZE,
min_batch_size=DEFAULT_MIN_BATCH_SIZE,
workspace=DEFAULT_MAX_WORKSPACE_SIZE,
strict_type_constraints=False,
force_ptq=False,
is_qat=False):
"""Create a TensorRT engine.
Args:
batch_size (int): batch_size used for calibration
verbose (bool): If enabled, a higher verbosity level will be set on the TensorRT logger.
max_batch_size (int): Maximum batch size.
opt_batch_size (int): Optimal batch size.
min_batch_size (int): Minimum batch size.
workspace (int): Max memory workspace to allow, in Gb.
strict_type (bool): Whether or not to apply strict_type_constraints for INT8 mode.
force_ptq (bool): Flag to force post training quantization using TensorRT
for a QAT trained model. This is required if the inference platform is
a Jetson with a DLA.
is_qat (bool): Wheter or not the model is a QAT.
"""
self.trt_logger = trt.Logger(trt.Logger.INFO)
if verbose:
self.trt_logger.min_severity = trt.Logger.Severity.VERBOSE
trt.init_libnvinfer_plugins(self.trt_logger, namespace="")
self.builder = trt.Builder(self.trt_logger)
self.config = self.builder.create_builder_config()
self.config.max_workspace_size = workspace * (2 ** 30)
self.batch_size = batch_size
self.max_batch_size, self.opt_batch_size, self.min_batch_size = max_batch_size, opt_batch_size, min_batch_size
self.network = None
self.parser = None
# Disable QAT regardless of is_qat flag if force_ptq is True
self._is_qat = is_qat if not force_ptq else False
self._strict_type = strict_type_constraints
self._trt_version_number = NV_TENSORRT_MAJOR * 1000 + NV_TENSORRT_MINOR * 100 + \
NV_TENSORRT_PATCH
# if self._trt_version_number < 8600:
# if self._trt_version_number >= 8500:
# logger.info("TRT version is lower than 8.6. Setting PreviewFeature.FASTER_DYNAMIC_SHAPES_0805 for better performance")
# faster_dynamic_shapes = True # Only supported from TRT 8.5+
# else:
# faster_dynamic_shapes = False
# self.config.set_preview_feature(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805, faster_dynamic_shapes)
@abstractmethod
def create_network(self, model_path):
"""Parse the ONNX or UFF graph and create the corresponding TensorRT network definition.
Args:
model_path (str): The path to the ONNX or UFF graph to load.
"""
pass
def set_calibrator(self,
inputs=None,
calib_cache=None,
calib_input=None,
calib_num_images=5000,
calib_batch_size=8,
calib_data_file=None,
image_mean=None):
"""Simple function to set an int8 calibrator. (Default is ImageBatcher based)
Args:
inputs (list): Inputs to the network
calib_input (str): The path to a directory holding the calibration images.
calib_cache (str): The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images (int): The maximum number of images to use for calibration.
calib_batch_size (int): The batch size to use for the calibration process.
Returns:
No explicit returns.
"""
logger.info("Calibrating using ImageBatcher")
self.config.int8_calibrator = EngineCalibrator(calib_cache)
if not os.path.exists(calib_cache):
calib_shape = [calib_batch_size] + list(inputs[0].shape[1:])
calib_dtype = trt.nptype(inputs[0].dtype)
self.config.int8_calibrator.set_image_batcher(
ImageBatcher(calib_input, calib_shape, calib_dtype,
max_num_images=calib_num_images,
exact_batches=True))
def _logger_info_IBuilderConfig(self):
"""Print tensorrt.tensorrt.IBuilderConfig"""
if self.config:
logger.info("TensorRT engine build configurations:")
opt_prof = self.config.get_calibration_profile()
if opt_prof:
logger.info(' OptimizationProfile: ')
for index in range(self.network.num_inputs):
tensor = self.network.get_input(index)
if tensor.is_shape_tensor:
min_shape, opt_shape, max_shape = opt_prof.get_shape_input(tensor.name)
else:
min_shape, opt_shape, max_shape = opt_prof.get_shape(tensor.name)
logger.info(' \"%s\": %s, %s, %s', tensor.name, min_shape, opt_shape, max_shape)
logger.info(' ')
if self.config.get_flag(trt.BuilderFlag.FP16):
logger.info(' BuilderFlag.FP16')
if self.config.get_flag(trt.BuilderFlag.INT8):
logger.info(' BuilderFlag.INT8')
if self.config.get_flag(trt.BuilderFlag.DEBUG):
logger.info(' BuilderFlag.DEBUG')
if self.config.get_flag(trt.BuilderFlag.GPU_FALLBACK):
logger.info(' BuilderFlag.GPU_FALLBACK')
if self.config.get_flag(trt.BuilderFlag.STRICT_TYPES):
logger.info(' BuilderFlag.STRICT_TYPES')
if self.config.get_flag(trt.BuilderFlag.REFIT):
logger.info(' BuilderFlag.REFIT')
if self.config.get_flag(trt.BuilderFlag.DISABLE_TIMING_CACHE):
logger.info(' BuilderFlag.DISABLE_TIMING_CACHE')
if self.config.get_flag(trt.BuilderFlag.TF32):
logger.info(' BuilderFlag.TF32')
if self.config.get_flag(trt.BuilderFlag.SPARSE_WEIGHTS):
logger.info(' BuilderFlag.SPARSE_WEIGHTS')
if self.config.get_flag(trt.BuilderFlag.SAFETY_SCOPE):
logger.info(' BuilderFlag.SAFETY_SCOPE')
if self.config.get_flag(trt.BuilderFlag.OBEY_PRECISION_CONSTRAINTS):
logger.info(' BuilderFlag.OBEY_PRECISION_CONSTRAINTS')
if self.config.get_flag(trt.BuilderFlag.PREFER_PRECISION_CONSTRAINTS):
logger.info(' BuilderFlag.PREFER_PRECISION_CONSTRAINTS')
if self.config.get_flag(trt.BuilderFlag.DIRECT_IO):
logger.info(' BuilderFlag.DIRECT_IO')
if self.config.get_flag(trt.BuilderFlag.REJECT_EMPTY_ALGORITHMS):
logger.info(' BuilderFlag.REJECT_EMPTY_ALGORITHMS')
if self.config.get_flag(trt.BuilderFlag.ENABLE_TACTIC_HEURISTIC):
logger.info(' BuilderFlag.ENABLE_TACTIC_HEURISTIC')
logger.info(' ')
# Return int32 and thus cannot represent >2GB
logger.info(' Note: max representabile value is 2,147,483,648 bytes or 2GB.')
pool_limit = self.config.get_memory_pool_limit(trt.MemoryPoolType.WORKSPACE)
logger.info(' MemoryPoolType.WORKSPACE = %d bytes', pool_limit)
pool_limit = self.config.get_memory_pool_limit(trt.MemoryPoolType.DLA_MANAGED_SRAM)
logger.info(' MemoryPoolType.DLA_MANAGED_SRAM = %d bytes', pool_limit)
pool_limit = self.config.get_memory_pool_limit(trt.MemoryPoolType.DLA_LOCAL_DRAM)
logger.info(' MemoryPoolType.DLA_LOCAL_DRAM = %d bytes', pool_limit)
pool_limit = self.config.get_memory_pool_limit(trt.MemoryPoolType.DLA_GLOBAL_DRAM)
logger.info(' MemoryPoolType.DLA_GLOBAL_DRAM = %d bytes', pool_limit)
logger.info(' ')
if self.config.get_preview_feature(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805):
logger.info(' PreviewFeature.FASTER_DYNAMIC_SHAPES_0805')
if self.config.get_preview_feature(trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805):
logger.info(' PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805')
if self.config.get_quantization_flag(trt.QuantizationFlag.CALIBRATE_BEFORE_FUSION):
logger.info(' QuantizationFlag.CALIBRATE_BEFORE_FUSION')
tactic_sources = self.config.get_tactic_sources()
logger.info(' Tactic Sources = %d', tactic_sources)
def create_engine(self, engine_path, precision,
calib_input=None, calib_cache=None, calib_num_images=5000,
calib_batch_size=8, calib_data_file=None, calib_json_file=None):
"""Build the TensorRT engine and serialize it to disk.
Args:
engine_path (str): The path where to serialize the engine to.
precision (str): The datatype to use for the engine, either 'fp32', 'fp16' or 'int8'.
calib_input (str): The path to a directory holding the calibration images.
calib_cache (str): The path where to write the calibration cache to,
or if it already exists, load it from.
calib_num_images (int): The maximum number of images to use for calibration.
calib_batch_size (int): The batch size to use for the calibration process.
calib_json_file (str): The path to JSON file containing tensor scale dictionary for QAT
"""
engine_path = os.path.realpath(engine_path)
engine_dir = os.path.dirname(engine_path)
os.makedirs(engine_dir, exist_ok=True)
logger.debug("Building %s Engine in %s", precision, engine_path)
inputs = [self.network.get_input(i) for i in range(self.network.num_inputs)]
if self.batch_size is None:
self.batch_size = calib_batch_size
self.builder.max_batch_size = self.batch_size
# This should be only applied for ONNX
if self.batch_size != calib_batch_size and self.batch_size > 0:
warning_msg = "For ONNX models with static batch size, " \
"calibration is done using the original batch size " \
f"of the ONNX model which is {self.batch_size}. " \
f"Overriding the provided calibration batch size {calib_batch_size}" \
f" to {self.batch_size}"
logger.warning(warning_msg)
calib_batch_size = self.batch_size
if self._is_qat and precision != "int8":
raise ValueError(f"QAT model only supports data_type int8 but {precision} was provided.")
if precision == "fp16":
if not self.builder.platform_has_fast_fp16:
logger.warning("FP16 is not supported natively on this platform/device")
else:
self.config.set_flag(trt.BuilderFlag.FP16)
elif precision == "int8":
if not self.builder.platform_has_fast_int8:
logger.warning("INT8 is not supported natively on this platform/device")
elif self._is_qat:
self.config.set_flag(trt.BuilderFlag.INT8)
if self.builder.platform_has_fast_fp16 and not self._strict_type:
# Also enable fp16, as some layers may be even more efficient in fp16 than int8
self.config.set_flag(trt.BuilderFlag.FP16)
else:
self.config.set_flag(trt.BuilderFlag.STRICT_TYPES)
logger.info("Calibrating using tensor scales for QAT model")
# Load from calib_json_file
self.calibration_cache_from_dict(calib_cache, calib_json_file)
# Set dynamic ranges of tensors using scales from QAT
self._set_tensor_dynamic_ranges(
network=self.network, tensor_scale_dict=self.tensor_scale_dict
)
else:
if self.builder.platform_has_fast_fp16 and not self._strict_type:
# Also enable fp16, as some layers may be even more efficient in fp16 than int8
self.config.set_flag(trt.BuilderFlag.FP16)
else:
self.config.set_flag(trt.BuilderFlag.STRICT_TYPES)
self.config.set_flag(trt.BuilderFlag.INT8)
# Set ImageBatcher based calibrator
self.set_calibrator(inputs=inputs,
calib_cache=calib_cache,
calib_input=calib_input,
calib_num_images=calib_num_images,
calib_batch_size=calib_batch_size,
calib_data_file=calib_data_file)
self._logger_info_IBuilderConfig()
with self.builder.build_engine(self.network, self.config) as engine, \
open(engine_path, "wb") as f:
logger.debug("Serializing engine to file: %s", engine_path)
f.write(engine.serialize())
def calibration_cache_from_dict(self, calibration_cache=None, calib_json=None):
"""Write calibration cache file for QAT model.
This function converts a tensor scale dictionary generated by processing
QAT models to TRT readable format. By default we set it as a
trt.IInt8.EntropyCalibrator2 cache file.
Args:
calibration_cache (str): Path to output calibration cache file.
calib_json (str): Path to calibration json file containing scale value
Returns:
No explicit returns.
"""
if not os.path.exists(calib_json):
raise FileNotFoundError(f"Calibration JSON file is required for QAT \
but {calib_json} does not exist.")
with open(calib_json, "r", encoding="utf-8") as f:
self.tensor_scale_dict = json.load(f)["tensor_scales"]
if calibration_cache is not None:
cal_cache_str = f"TRT-{self._trt_version_number}-EntropyCalibration2\n"
assert not os.path.exists(calibration_cache), (
"A pre-existing cache file exists. Please delete this "
"file and re-run export."
)
# Converting float numbers to hex representation.
for tensor in self.tensor_scale_dict:
scaling_factor = self.tensor_scale_dict[tensor] / 127.0
cal_scale = hex(struct.unpack(
"i", struct.pack("f", scaling_factor))[0])
assert cal_scale.startswith(
"0x"), "Hex number expected to start with 0x."
cal_scale = cal_scale[2:]
cal_cache_str += tensor + ": " + cal_scale + "\n"
with open(calibration_cache, "w", encoding="utf-8") as f:
f.write(cal_cache_str)
def _set_tensor_dynamic_ranges(self, network, tensor_scale_dict):
"""Set the scaling factors obtained from quantization-aware training.
Args:
network: TensorRT network object.
tensor_scale_dict (dict): Dictionary mapping names to tensor scaling factors.
"""
tensors_found = []
for idx in range(network.num_inputs):
input_tensor = network.get_input(idx)
if input_tensor.name in tensor_scale_dict:
tensors_found.append(input_tensor.name)
cal_scale = tensor_scale_dict[input_tensor.name]
input_tensor.dynamic_range = (-cal_scale, cal_scale)
for layer in network:
found_all_outputs = True
for idx in range(layer.num_outputs):
output_tensor = layer.get_output(idx)
if output_tensor.name in tensor_scale_dict:
tensors_found.append(output_tensor.name)
cal_scale = tensor_scale_dict[output_tensor.name]
output_tensor.dynamic_range = (-cal_scale, cal_scale)
else:
found_all_outputs = False
if found_all_outputs:
layer.precision = trt.int8
tensors_in_dict = tensor_scale_dict.keys()
if set(tensors_in_dict) != set(tensors_found):
logger.info("Tensors in scale dictionary but not in network: %s",
set(tensors_in_dict) - set(tensors_found))
| tao_deploy-main | nvidia_tao_deploy/engine/builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modulus export APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import io
import logging
import numpy as np
"""Logger for data export APIs."""
logger = logging.getLogger(__name__)
class TensorFile(io.RawIOBase):
"""Class to read/write multiple tensors to a file.
The underlying implementation using an HDF5 database
to store data.
Note: this class does not support multiple writers to
the same file.
Args:
filename (str): path to file.
mode (str): mode to open file in.
r Readonly, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- Create file, fail if exists
a Read/write if exists, create otherwise (default)
enforce_same_shape (bool): whether to enforce that all tensors be the same shape.
"""
DEFAULT_ARRAY_KEY = "_tensorfile_array_key_"
GROUP_NAME_PREFIX = "_tensorfile_array_key_"
def __init__(
self, filename, mode="a", enforce_same_shape=True, *args, **kwargs
): # pylint: disable=W1113
"""Init routine."""
super().__init__(*args, **kwargs)
logger.debug("Opening %s with mode=%s", filename, mode)
self._enforce_same_shape = enforce_same_shape
self._mode = mode
# Open or create the HDF5 file.
self._db = h5py.File(filename, mode)
if "count" not in self._db.attrs:
self._db.attrs["count"] = 0
if "r" in mode:
self._cursor = 0
else:
self._cursor = self._db.attrs["count"]
def _get_group_name(cls, cursor):
"""Return the name of the H5 dataset to create, given a cursor index."""
return f"{cls.GROUP_NAME_PREFIX}_{cursor}"
def _write_data(self, group, data):
for key, value in data.items():
if isinstance(value, dict):
self._write_data(group.create_group(key), value)
elif isinstance(value, np.ndarray):
if self._enforce_same_shape:
if "shape" not in self._db.attrs:
self._db.attrs["shape"] = value.shape
else:
expected_shape = tuple(
self._db.attrs["shape"].tolist())
if expected_shape != value.shape:
raise ValueError(
f"Shape mismatch: {str(expected_shape)} v.s. {str(value.shape)}"
)
group.create_dataset(key, data=value, compression="gzip")
else:
raise ValueError(
"Only np.ndarray or dicts can be written into a TensorFile."
)
def close(self):
"""Close this file."""
self._db.close()
# For python3.
def __next__(self):
"""Return next element."""
if self._cursor < self._db.attrs["count"]:
return self.read()
raise StopIteration()
def _read_data(self, group):
if isinstance(group, h5py.Group):
data = {key: self._read_data(value)
for key, value in group.items()}
else:
data = group[()]
return data
def read(self):
"""Read from current cursor.
Return array assigned to current cursor, or ``None`` to indicate
the end of the file.
"""
if not self.readable():
raise IOError("Instance is not readable.")
group_name = self._get_group_name(self._cursor)
if group_name in self._db:
self._cursor += 1
group = self._db[group_name]
data = self._read_data(group)
if list(data.keys()) == [self.DEFAULT_ARRAY_KEY]:
# The only key in this group is the default key.
# Return the numpy array directly.
return data[self.DEFAULT_ARRAY_KEY]
return data
return None
def readable(self):
"""Return whether this instance is readable."""
return self._mode in ["r", "r+", "a"]
def seekable(self):
"""Return whether this instance is seekable."""
return True
def seek(self, n):
"""Move cursor."""
self._cursor = min(n, self._db.attrs["count"])
return self._cursor
def tell(self):
"""Return current cursor index."""
return self._cursor
def truncate(self, n):
"""Truncation is not supported."""
raise IOError("Truncate operation is not supported.")
def writable(self):
"""Return whether this instance is writable."""
return self._mode in ["r+", "w", "w-", "a"]
def write(self, data):
"""Write a Numpy array or a dictionary of numpy arrays into file."""
if not self.writable():
raise IOError("Instance is not writable.")
if isinstance(data, np.ndarray):
data = {self.DEFAULT_ARRAY_KEY: data}
group_name = self._get_group_name(self._cursor)
# Delete existing instance of datasets at this cursor position.
if group_name in self._db:
del self._db[group_name]
group = self._db.create_group(group_name)
self._write_data(group, data)
self._cursor += 1
if self._cursor > self._db.attrs["count"]:
self._db.attrs["count"] = self._cursor
| tao_deploy-main | nvidia_tao_deploy/engine/tensorfile.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TRT engine building base utilities."""
import logging
import numpy as np
from PIL import Image
from six.moves import xrange
from tqdm import tqdm
from nvidia_tao_deploy.engine.tensorfile import TensorFile
logging.basicConfig(format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level="INFO")
logger = logging.getLogger(__name__)
def generate_random_tensorfile(data_file_name, input_dims, n_batches=1, batch_size=1):
"""Generate a random tensorfile.
This function generates a random tensorfile containing n_batches of random np.arrays
of dimensions (batch_size,) + (input_dims).
Args:
data_file_name (str): Path to where the data tensorfile will be stored.
input_dims (tuple): Input blob dimensions in CHW order.
n_batches (int): Number of batches to save.
batch_size (int): Number of images per batch.
Return:
No explicit returns.
"""
sample_shape = (batch_size, ) + tuple(input_dims)
with TensorFile(data_file_name, 'w') as f:
for i in tqdm(xrange(n_batches)):
logger.debug("Writing batch: %d", i)
dump_sample = np.random.sample(sample_shape)
f.write(dump_sample)
def prepare_chunk(image_ids, image_list,
image_width=480,
image_height=272,
channels=3,
scale=1.0,
means=None,
flip_channel=False,
batch_size=1):
"""Prepare a single batch of data to dump into a Tensorfile."""
dump_placeholder = np.zeros(
(batch_size, channels, image_height, image_width))
for i in xrange(len(image_ids)):
idx = image_ids[i]
im = Image.open(image_list[idx]).resize((image_width, image_height),
Image.LANCZOS)
if channels == 1:
logger.debug("Converting image from RGB to Grayscale")
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
bg_colour = (255, 255, 255)
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
im = im.convert('L')
dump_input = np.asarray(im).astype(np.float32)
dump_input = dump_input[:, :, np.newaxis]
elif channels == 3:
dump_input = np.asarray(im.convert('RGB')).astype(np.float32)
else:
raise NotImplementedError("Unsupported channel dimensions.")
# flip channel: RGB --> BGR
if flip_channel:
dump_input = dump_input[:, :, ::-1]
# means is a list of per-channel means, (H, W, C) - (C)
if means is not None:
dump_input -= np.array(means)
# (H, W, C) --> (C, H, W)
dump_input = dump_input.transpose(2, 0, 1) * scale
dump_placeholder[i, :, :, :] = dump_input
return dump_placeholder
| tao_deploy-main | nvidia_tao_deploy/engine/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base calibrator class for TensorRT INT8 Calibration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import pycuda.autoinit # noqa pylint: disable=unused-import
import pycuda.driver as cuda
import tensorrt as trt
# Simple helper class for calibration.
from nvidia_tao_deploy.engine.tensorfile import TensorFile
logger = logging.getLogger(__name__)
class TensorfileCalibrator(trt.IInt8EntropyCalibrator2):
"""Calibrator class."""
def __init__(self, data_filename, cache_filename,
n_batches, batch_size,
*args, **kwargs):
"""Init routine.
This inherits from ``trt.IInt8EntropyCalibrator2``
to implement the calibration interface that TensorRT needs to
calibrate the INT8 quantization factors. The data source here is assumed
to be a Tensorfile as defined in engine.tensorfile.Tensorfile(), which
was pre-generated using the dataloader
Args:
data_filename (str): ``TensorFile`` data file to use.
cache_filename (str): name of calibration file to read/write to.
n_batches (int): number of batches for calibrate for.
batch_size (int): batch size to use for calibration data.
"""
super().__init__(*args, **kwargs)
self._data_source = None
self._cache_filename = cache_filename
self._batch_size = batch_size
self._n_batches = n_batches
self._batch_count = 0
self._data_mem = None
self.instantiate_data_source(data_filename)
def instantiate_data_source(self, data_filename):
"""Simple function to instantiate the data_source of the dataloader.
Args:
data_filename (str): The path to the data file.
Returns:
No explicit returns.
"""
if os.path.exists(data_filename):
self._data_source = TensorFile(data_filename, "r")
else:
logger.info(
"A valid data source wasn't provided to the calibrator. "
"The calibrator will attempt to read from a cache file if provided."
)
def get_data_from_source(self):
"""Simple function to get data from the defined data_source."""
batch = np.array(self._data_source.read())
if batch is not None:
# <@vpraveen>: Disabling pylint error check on line below
# because of a python3 linting error. To be reverted when
# pylint/issues/3139 gets fixed.
batch_size = batch.shape[0] # pylint: disable=E1136
if batch_size < self._batch_size:
raise ValueError(
f"Batch size yielded from data source {batch_size} < requested batch size "
"from calibrator {self._batch_size}"
)
batch = batch[:self._batch_size]
else:
raise ValueError(
"Batch wasn't yielded from the data source. You may have run "
"out of batches. Please set the num batches accordingly")
return batch
def get_batch_size(self):
"""Return batch size."""
return self._batch_size
def get_batch(self, names):
"""Return one batch.
Args:
names (list): list of memory bindings names.
"""
if self._batch_count < self._n_batches:
batch = self.get_data_from_source()
if batch is not None:
if self._data_mem is None:
# 4 bytes per float32.
self._data_mem = cuda.mem_alloc(batch.size * 4)
self._batch_count += 1
# Transfer input data to device.
cuda.memcpy_htod(self._data_mem, np.ascontiguousarray(
batch, dtype=np.float32))
return [int(self._data_mem)]
if self._data_mem is not None:
self._data_mem.free()
return None
def read_calibration_cache(self):
"""Read calibration from file."""
logger.debug("read_calibration_cache - no-op")
if os.path.isfile(self._cache_filename):
logger.warning("Calibration file exists at %s."
" Reading this cache.", self._cache_filename)
with open(self._cache_filename, "rb") as cal_file:
return cal_file.read()
return None
def write_calibration_cache(self, cache):
"""Write calibration to file.
Args:
cache (memoryview): buffer to read calibration data from.
"""
logger.info("Saving calibration cache (size %d) to %s",
len(cache), self._cache_filename)
with open(self._cache_filename, 'wb') as f:
f.write(cache)
| tao_deploy-main | nvidia_tao_deploy/engine/tensorfile_calibrator.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO Loader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import ABC, abstractmethod
import os
import numpy as np
from pycocotools.coco import COCO
class COCOLoader(ABC):
"""Base Class for COCO dataloader"""
def __init__(self, val_json_file, shape, dtype, batch_size=1, data_format='channels_last', image_dir=None, eval_samples=None):
"""Init.
Args:
val_json_file (str): validation json file.
shape (list): shape of the network.
dtype (str): data type.
data_format (str): data format (default: channels_last).
image_dir (str): directory where images are located.
eval_samples (str): total number of samples to evaluate.
"""
self.image_dir = image_dir
self.coco = COCO(val_json_file)
self.image_ids = self.coco.getImgIds()
self.n_samples = eval_samples or len(self.image_ids)
self.batch_size = batch_size
self.n_batches = self.n_samples // self.batch_size
assert self.n_batches > 0, "empty image dir or batch size too large!"
self.load_classes()
if data_format == "channels_last":
self.height = shape[1]
self.width = shape[2]
else:
self.height = shape[2]
self.width = shape[3]
self.dtype = dtype
self.data_format = data_format
@abstractmethod
def preprocess_image(self, image_path):
"""The image preprocessor loads an image from disk and prepares it as needed for batching.
This may include padding, resizing, normalization, data type casting, and transposing.
"""
pass
def load_classes(self):
"""create class mapping."""
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes) + 1
self.classes[c['name']] = len(self.classes) + 1
def coco_label_to_label(self, coco_label):
"""coco label to label mapping."""
return self.coco_labels_inverse[coco_label]
def _load_gt_image(self, image_index):
"""Load image."""
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
if os.path.exists(image_info['file_name']):
# Read absolute path from the annotation
path = image_info['file_name']
else:
# Override the root directory to user provided image_dir
path = os.path.join(self.image_dir, image_info['file_name'])
if not os.path.exists(path):
raise FileNotFoundError(f"Image path {path} does not exist!")
return self.preprocess_image(path), image_info['id']
def _load_gt_label(self, image_index):
"""Load COCO labels.
Returns:
[class_idx, is_difficult, x_min, y_min, x_max, y_max]
where is_diffcult is hardcoded to 0 in the current COCO GT labels.
"""
# get image info
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
h = image_info['height']
w = image_info['width']
# image_id = image_info['id']
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = {
'labels': np.empty((0,)),
'bboxes': np.empty((0, 4)),
'masks': [],
}
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
# return empty annotations
return np.empty((0, 6)), [self.height, self.width, h, w]
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for _, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotations['labels'] = np.concatenate(
[annotations['labels'], [self.coco_label_to_label(a['category_id'])]], axis=0)
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [[
a['bbox'][1],
a['bbox'][0],
a['bbox'][1] + a['bbox'][3],
a['bbox'][0] + a['bbox'][2],
]]], axis=0)
labels = np.expand_dims(annotations['labels'], axis=-1)
return np.concatenate(
(annotations['bboxes'], np.full_like(labels, 0), np.full_like(labels, -1), labels),
axis=1), [self.height, self.width, h, w]
def _get_single_processed_item(self, idx):
"""Load and process single image and its label."""
gt_image_info, image_id = self._load_gt_image(idx)
gt_image, gt_scale = gt_image_info
gt_label = self._load_gt_label(idx)
return gt_image, gt_scale, image_id, gt_label
def __iter__(self):
"""Iterate."""
self.n = 0
return self
def __next__(self):
"""Load a full batch."""
images = []
labels = []
image_ids = []
scales = []
if self.n < self.n_batches:
for idx in range(self.n * self.batch_size,
(self.n + 1) * self.batch_size):
image, scale, image_id, label = self._get_single_processed_item(idx)
images.append(image)
labels.append(label)
image_ids.append(image_id)
scales.append(scale)
self.n += 1
return images, scales, image_ids, labels
raise StopIteration
def __len__(self):
"""Return length."""
return int(np.ceil(self.n_samples / self.batch_size))
| tao_deploy-main | nvidia_tao_deploy/dataloader/coco.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KITTI loader."""
from abc import ABC, abstractmethod
import logging
import os
import numpy as np
from PIL import Image
from nvidia_tao_deploy.cv.common.constants import VALID_IMAGE_EXTENSIONS
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
level="DEBUG")
logger = logging.getLogger(__name__)
class KITTILoader(ABC):
"""Base Class for KITTI dataloader"""
def __init__(self,
shape,
image_dirs,
label_dirs,
mapping_dict,
exclude_difficult=True,
batch_size=10,
is_inference=False,
image_mean=None,
data_format="channels_first",
image_depth=8,
dtype=None):
"""Init.
Args:
shape (list): list of input dimension that is either (c, h, w) or (h, w, c) format.
image_dirs (list): list of image directories.
label_dirs (list): list of label directories.
mapping_dict (dict): class mapping. e.g. {'Person': 'person', 'Crowd': 'person'}
exclude_difficult (bool): whether to include difficult samples.
batch_size (int): size of the batch.
is_inference (bool): If True, no labels will be returned
image_mean (list): image mean used for preprocessing.
data_format (str): Data format of the input. (Default: channels_first)
image_depth(int): Bit depth of images(8 or 16).
dtype (str): data type to cast to
"""
assert len(image_dirs) == len(label_dirs), "Mismatch in the length of image and label dirs!"
self.image_paths = []
self.label_paths = []
self.is_inference = is_inference
self.dtype = dtype
# mapping class to 1-based integer
self.mapping_dict = mapping_dict
classes = sorted({str(x).lower() for x in mapping_dict.values()})
self.classes = dict(zip(classes, range(1, len(classes) + 1)))
self.class_mapping = {key.lower(): self.classes[str(val.lower())]
for key, val in mapping_dict.items()}
# use numpy array to accelerate
self._add_source(image_dirs, label_dirs)
self.image_paths = np.array(self.image_paths)
self.label_paths = np.array(self.label_paths)
self.data_inds = np.arange(len(self.image_paths))
self.batch_size = batch_size
if data_format == "channels_first":
self.num_channels, self.height, self.width = shape
else:
self.height, self.width, self.num_channels = shape
self.image_depth = image_depth
self.exclude_difficult = exclude_difficult
self.image_mean = image_mean
self.n_samples = len(self.data_inds)
self.n_batches = int(len(self.image_paths) // self.batch_size)
assert self.n_batches > 0, "empty image dir or batch size too large!"
def _add_source(self, image_folders, label_folders):
"""Add Kitti sources."""
for image_folder, label_folder in zip(image_folders, label_folders):
img_paths = os.listdir(image_folder)
if not self.is_inference:
label_paths = set(os.listdir(label_folder))
else:
label_paths = []
for img_path in img_paths:
# Only add valid items to paths
filename, _ = os.path.splitext(img_path)
if img_path.lower().endswith(VALID_IMAGE_EXTENSIONS) and filename + '.txt' in label_paths:
self.image_paths.append(os.path.join(image_folder, img_path))
self.label_paths.append(os.path.join(label_folder, filename + '.txt'))
elif img_path.lower().endswith(VALID_IMAGE_EXTENSIONS) and self.is_inference:
self.image_paths.append(os.path.join(image_folder, img_path))
def __len__(self):
"""Get length of Sequence."""
return self.n_batches
def _load_gt_image(self, image_path):
"""Load GT image from file."""
img = Image.open(image_path)
if self.num_channels == 3:
img = img.convert('RGB') # Color Image
else:
if self.image_depth == 16:
img = img.convert('I') # PIL int32 mode for 16-bit images
else:
img = img.convert('L') # Grayscale Image
return img
def _load_gt_label(self, label_path):
"""Load Kitti labels.
Returns:
[class_idx, is_difficult, x_min, y_min, x_max, y_max]
"""
with open(label_path, 'r', encoding="utf-8") as f:
entries = f.read().strip().split('\n')
results = []
for entry in entries:
items = entry.strip().split()
if len(items) < 9:
continue
items[0] = items[0].lower()
if items[0] not in self.class_mapping:
continue
label = [self.class_mapping[items[0]], 1 if int(
items[2]) != 0 else 0, *items[4:8]]
results.append([float(x) for x in label])
return np.array(results).reshape(-1, 6)
def _filter_invalid_labels(self, labels):
"""filter out invalid labels.
Arg:
labels: size (N, 6).
Returns:
labels: size (M, 6), filtered bboxes with clipped boxes.
"""
x_coords = labels[:, [-4, -2]]
x_coords = np.clip(x_coords, 0, self.width - 1)
labels[:, [-4, -2]] = x_coords
y_coords = labels[:, [-3, -1]]
y_coords = np.clip(y_coords, 0, self.height - 1)
labels[:, [-3, -1]] = y_coords
# exclude invalid boxes
x_cond = labels[:, -2] - labels[:, -4] > 1e-3
y_cond = labels[:, -1] - labels[:, -3] > 1e-3
return labels[x_cond & y_cond]
def _get_single_item_raw(self, idx):
"""Load single image and its label.
Returns:
image (PIL.image): image object in original resolution
label (np.array): [class_idx, is_difficult, x_min, y_min, x_max, y_max]
with normalized coordinates
"""
image = self._load_gt_image(self.image_paths[self.data_inds[idx]])
if self.is_inference:
label = np.zeros((1, 6)) # Random array to label
else:
label = self._load_gt_label(self.label_paths[self.data_inds[idx]])
return image, label
@abstractmethod
def preprocessing(self, image, label):
"""Perform preprocessing on image and label."""
pass
def _get_single_processed_item(self, idx):
"""Load and process single image and its label."""
image, label = self._get_single_item_raw(idx)
image, label = self.preprocessing(image, label)
return image, label
def _batch_post_processing(self, images, labels):
"""Post processing for a batch."""
images = np.array(images)
# For num_channels=1 case, we assume that additional dimension was created in preprocessing.
if self.num_channels == 3:
images = images[..., [2, 1, 0]] # RGB -> BGR
images = images.transpose(0, 3, 1, 2) # channels_last -> channels_first
if self.num_channels == 3:
if self.image_mean:
bb, gg, rr = self.image_mean
else:
bb, gg, rr = 103.939, 116.779, 123.68
# subtract imagenet mean
images -= np.array([[[[bb]], [[gg]], [[rr]]]])
else:
if self.image_mean:
bb = self.image_mean # grayscale only contains one value
elif self.image_depth == 8:
bb = 117.3786
elif self.image_depth == 16:
# 117.3786 * 256
bb = 30048.9216
else:
raise ValueError(
f"Unsupported image depth: {self.image_depth}, should be 8 or 16, "
"please check `augmentation_config.output_depth` in spec file"
)
# subtract imagenet mean
images -= np.array([[[bb]]])
# try to make labels a numpy array
is_make_array = True
x_shape = None
for x in labels:
if not isinstance(x, np.ndarray):
is_make_array = False
break
if x_shape is None:
x_shape = x.shape
elif x_shape != x.shape:
is_make_array = False
break
if is_make_array:
labels = np.array(labels)
return images, labels
def __iter__(self):
"""Iterate."""
self.n = 0
return self
def __next__(self):
"""Load a full batch."""
images = []
labels = []
if self.n < self.n_batches:
for idx in range(self.n * self.batch_size,
(self.n + 1) * self.batch_size):
image, label = self._get_single_processed_item(idx)
images.append(image)
labels.append(label)
self.n += 1
return self._batch_post_processing(images, labels)
raise StopIteration
| tao_deploy-main | nvidia_tao_deploy/dataloader/kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Deploy DataLoader"""
| tao_deploy-main | nvidia_tao_deploy/dataloader/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-speech-data-processor-main | __init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import hydra
from sdp.run_processors import run_processors
@hydra.main(version_base=None)
def main(cfg):
run_processors(cfg)
if __name__ == "__main__":
# hacking the arguments to always disable hydra's output
# TODO: maybe better to copy-paste hydra_runner from nemo if there are
# any problems with this approach
sys.argv.extend(
["hydra.run.dir=.", "hydra.output_subdir=null", "hydra/job_logging=none", "hydra/hydra_logging=none"]
)
main()
| NeMo-speech-data-processor-main | main.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Class which will process the data for Fisher Spanish and create an initial manifest.
The raw Fisher Spanish data is provided in .sph files and with 2 channels (1 for each speaker).
The data needs to be converted in .wav and trimmed+segmented. This script will do the processing
required, and result in the following tree:
<ROOT_DATA_DIR>
├── fisher_spa_LDC2010S01.tgz
├── LDC2010T04.tgz
├── downloaded
│ ├── fisher_spa
│ │ ├── data
│ │ │ └── speech
│ │ └── docs
│ └── fisher_spa_tr
│ ├── data
│ │ └── transcripts
│ └── docs
└── processed
├── manifests
└── wavs
├── original_duration
└── trimmed_and_segmented
"""
import glob
import json
import os
import subprocess
from pathlib import Path
from typing import List, Optional
from sox import Transformer
from tqdm import tqdm
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
from sdp.utils.common import extract_archive
AUDIO_TGZ_FILE = "fisher_spa_LDC2010S01.tgz"
TRANSCRIPT_TGZ_FILE = "LDC2010T04.tgz"
class CreateInitialManifestFisherSpanish(BaseParallelProcessor):
"""
Class which will create an initial manifest from the initial Fisher Spanish
data files, which must be located in root_data_dir.
Args:
raw_data_dir: path to where the initial data archive files are located. This will
also be where the new audio files are processed and where the manifests are saved.
path_to_sph2pipe: the path to the sph2pipe tool, which will be used to convert
the sph audio files to wav files.
"""
def __init__(self, raw_data_dir: str, path_to_sph2pipe: str, **kwargs):
super().__init__(**kwargs)
self.raw_data_dir = Path(raw_data_dir)
self.path_to_sph2pipe = path_to_sph2pipe
self.extracted_path = str(Path(self.raw_data_dir) / "extracted")
self.processed_path = str(Path(self.raw_data_dir) / "processed")
def prepare(self):
audio_archive_path = self.raw_data_dir / AUDIO_TGZ_FILE
transcript_archive_path = self.raw_data_dir / TRANSCRIPT_TGZ_FILE
if audio_archive_path.exists() and transcript_archive_path.exists():
extract_archive(audio_archive_path, self.extracted_path)
extract_archive(transcript_archive_path, self.extracted_path)
else:
raise RuntimeError(
"Did not find the expected raw data files. Expected to find "
f"2 files: {audio_archive_path} and {transcript_archive_path}"
)
# convert audio files from .sph to .wav
sph_src_dir = os.path.join(self.extracted_path, "fisher_spa/data/speech")
wav_tgt_dir = os.path.join(self.processed_path, "wavs/original_duration")
if not os.path.exists(wav_tgt_dir):
os.makedirs(wav_tgt_dir)
logger.info("Converting files from .sph to .wav")
sph_list = glob.glob(sph_src_dir + "/*.sph")
for sph_path in tqdm(sph_list):
file_id = os.path.basename(sph_path).split(".sph")[0]
wav_path = os.path.join(wav_tgt_dir, file_id + ".wav")
cmd = [self.path_to_sph2pipe, "-f", "wav", "-p", sph_path, wav_path]
subprocess.run(cmd)
logger.info("Finished converting files from .sph to .wav")
def read_manifest(self) -> List[tuple[str]]:
transcript_src_dir = os.path.join(self.extracted_path, "fisher_spa_tr/data/transcripts/")
logger.info(f"Attempting to read transcription files in dir {transcript_src_dir}")
dataset_entries = []
for transcript_file in tqdm(glob.glob(transcript_src_dir + "/*.tdf")):
with open(transcript_file, "r") as f_in:
f_in.readline() # skip column headings
f_in.readline() # skip comments with ;;
f_in.readline() # skip comments with ;;
for line_i, line in enumerate(f_in):
line = line.strip()
line = line.split("\t")
line = [line_i] + line
dataset_entries.append(tuple(line))
return dataset_entries
def process_dataset_entry(self, data_entry: tuple[str]):
wav_src_dir = os.path.join(self.processed_path, "wavs/original_duration")
wav_tgt_dir = os.path.join(self.processed_path, "wavs/trimmed_and_segmented")
manifest_dir = os.path.join(self.processed_path, "manifests/")
os.makedirs(wav_tgt_dir, exist_ok=True)
os.makedirs(manifest_dir, exist_ok=True)
(
line_i,
file_id,
channel,
start,
end,
speaker,
speaker_type,
speaker_dialect,
transcript,
section,
turn,
segment,
*other_info,
) = data_entry
file_id = file_id.split(".sph")[0]
src_wav_file = os.path.join(wav_src_dir, f"{file_id}.wav")
tgt_wav_file = os.path.join(
wav_tgt_dir,
f"{file_id}_line{line_i}_channel{channel}_{section}_{turn}_{segment}.wav",
)
if len(transcript) == 0:
logger.info(f"Empty transcript. Skipping trying to make wav file {tgt_wav_file}")
return []
if float(end) - float(start) < 0.2:
logger.info(f"start time: {start}, end time: {end}")
logger.info(f"=> (end time) - (start time) is too small. Skipping trying to make wav file {tgt_wav_file}")
return []
# make trimmed wave file
transformer = Transformer()
transformer.trim(float(start), float(end))
transformer.rate(samplerate=16000, quality="v")
# pick out 1 speaker and make mono
# Note that mapping in remix dictionary is
# (output channel):(input channel), with indexing starting from 1
transformer.remix({1: [int(channel) + 1]}, num_output_channels=1)
transformer.build(src_wav_file, tgt_wav_file)
entry = {}
entry["audio_filepath"] = tgt_wav_file
# get duration
duration = subprocess.check_output("soxi -D {0}".format(entry["audio_filepath"]), shell=True)
if float(duration) == 0:
logger.info(f"created wave file with duration zero: {tgt_wav_file}")
logger.info(f"=> will not add this file to manifest")
return []
entry["duration"] = float(duration)
entry["text"] = transcript
entry["channel"] = channel
entry["start"] = start
entry["end"] = end
entry["speaker"] = speaker
entry["speaker_type"] = speaker_type
entry["speaker_dialect"] = speaker_dialect
entry["section"] = section
entry["turn"] = turn
entry["segment"] = segment
entry["other_info"] = ",".join(other_info)
return [DataEntry(data=entry)]
| NeMo-speech-data-processor-main | dataset_configs/spanish_pc/fisher/unique_processors/create_initial_manifest_fisher_spanish.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import re
from typing import List
import pandas as pd
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
from sdp.utils.edit_spaces import add_start_end_spaces, remove_extra_spaces
class CleanRomanNumerals(BaseParallelProcessor):
def __init__(
self,
king_triggers,
queen_triggers,
ordinal_masc_triggers,
ordinal_fem_triggers,
cardinal_triggers,
numerals_data_path=None,
text_key: str = "text",
**kwargs,
):
super().__init__(**kwargs)
self.numerals_data_path = numerals_data_path
self.king_triggers = king_triggers
self.queen_triggers = queen_triggers
self.ordinal_masc_triggers = ordinal_masc_triggers
self.ordinal_fem_triggers = ordinal_fem_triggers
self.cardinal_triggers = cardinal_triggers
self.text_key = text_key
# read csv
current_dir = os.path.dirname(os.path.abspath(__file__))
csv_path = os.path.join(current_dir, "1-100_roman_numeral_table.csv")
df = pd.read_csv(csv_path, sep="\t", index_col=0)
self.roman_numeral_to_ordinal_masc = {}
for i, row in df.iterrows():
self.roman_numeral_to_ordinal_masc[row["roman"]] = row["ordinal_masc"].strip()
self.roman_numeral_to_ordinal_fem = {}
for i, row in df.iterrows():
self.roman_numeral_to_ordinal_fem[row["roman"]] = row["ordinal_fem"].strip()
self.roman_numeral_to_cardinal = {}
for i, row in df.iterrows():
self.roman_numeral_to_cardinal[row["roman"]] = row["cardinal"].strip()
self.roman_numeral_to_king = {}
for i, row in df.iterrows():
self.roman_numeral_to_king[row["roman"]] = row["king"].strip()
self.roman_numeral_to_queen = {}
for i, row in df.iterrows():
self.roman_numeral_to_queen[row["roman"]] = row["queen"].strip()
self.clean_roman_numerals_count = collections.defaultdict(int)
def process_dataset_entry(self, data_entry) -> List:
data_entry[self.text_key] = add_start_end_spaces(data_entry[self.text_key])
data_entry = self.clean_operation(data_entry, self.ordinal_masc_triggers, self.roman_numeral_to_ordinal_masc)
data_entry = self.clean_operation(data_entry, self.ordinal_fem_triggers, self.roman_numeral_to_ordinal_fem)
data_entry = self.clean_operation(data_entry, self.cardinal_triggers, self.roman_numeral_to_cardinal)
data_entry = self.clean_operation(data_entry, self.king_triggers, self.roman_numeral_to_king)
data_entry = self.clean_operation(data_entry, self.queen_triggers, self.roman_numeral_to_queen)
data_entry[self.text_key] = remove_extra_spaces(data_entry[self.text_key])
return [DataEntry(data=data_entry, metrics=self.clean_roman_numerals_count)]
def finalize(self, metrics):
total_counter = collections.defaultdict(int)
for counter in metrics:
for word, count in counter.items():
total_counter[word] += count
logger.info("Num of roman numeral substitutions")
total_counter_sorted = dict(
sorted(
total_counter.items(),
key=lambda x: x[1],
reverse=True,
)
)
for word, count in total_counter_sorted.items():
logger.info(f"{word} {count}")
super().finalize(metrics)
def clean_operation(self, data, triggers, roman_numeral_to_num_written):
for trigger in triggers:
trigger_match = re.search(
pattern=f"({trigger} \S*)\s",
string=data[self.text_key],
flags=re.IGNORECASE,
)
if trigger_match:
trigger_numeral = trigger_match.group(0).strip()
trigger, numeral = trigger_numeral.split(" ")
if numeral.lower() in roman_numeral_to_num_written:
number = roman_numeral_to_num_written[numeral.lower()]
if trigger[0].isupper():
# 'felipe iv' --> 'felipe cuarto'
# 'Felipe iv' --> 'Felipe Cuarto'
number = number.capitalize()
trigger_number = f"{trigger} {number}"
data[self.text_key] = data[self.text_key].replace(trigger_numeral, trigger_number)
self.clean_roman_numerals_count[trigger_numeral] += 1
return data
| NeMo-speech-data-processor-main | dataset_configs/spanish/mls/unique_processors/clean_roman_numerals.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sdp.processors import DropNonAlphabet
def test_empty_test_cases():
"""Testing that empty test cases don't raise an error."""
processor = DropNonAlphabet("123", output_manifest_file="tmp")
processor.test()
| NeMo-speech-data-processor-main | tests/test_modify_manifest.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sdp.utils.edit_spaces import add_start_end_spaces, remove_extra_spaces
@pytest.mark.parametrize("input,expected_output", [("abc xyz abc xyz", "abc xyz abc xyz"), (" abc xyz ", "abc xyz")])
def test_remove_extra_spaces(input, expected_output):
assert remove_extra_spaces(input) == expected_output
@pytest.mark.parametrize("input,expected_output", [("abc", " abc "), ("abc xyz", " abc xyz ")])
def test_add_start_end_spaces(input, expected_output):
assert add_start_end_spaces(input) == expected_output
| NeMo-speech-data-processor-main | tests/test_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
from pathlib import Path
import hydra
import pytest
from omegaconf import OmegaConf, open_dict
DATASET_CONFIGS_ROOT = Path(__file__).parents[1] / "dataset_configs"
def get_test_cases():
"""Returns paths to all configs that are checked in."""
for config_path in glob.glob(f"{DATASET_CONFIGS_ROOT}/**/*.yaml", recursive=True):
yield config_path
@pytest.mark.parametrize("config_path", get_test_cases())
def test_configs(config_path: str):
try:
# to be able to correctly read some of the configs
OmegaConf.register_new_resolver("subfield", lambda node, field: node[field])
OmegaConf.register_new_resolver("not", lambda x: not x)
OmegaConf.register_new_resolver("equal", lambda field, value: field == value)
except ValueError: # already registered
pass
cfg = OmegaConf.load(config_path)
cfg["data_split"] = "train" # in case it's required for tests
for processor_cfg in cfg.processors:
if "test_cases" in processor_cfg:
# clear input_manifest_file and output_manifest_file to make sure we don't get
# a MissingMandatoryValue error when we instantiate the processor
with open_dict(processor_cfg):
processor_cfg["output_manifest_file"] = None
processor_cfg["input_manifest_file"] = None
# in case should_run is specified, we need to remove it
processor_cfg.pop("should_run", None)
processor = hydra.utils.instantiate(processor_cfg)
processor.test()
| NeMo-speech-data-processor-main | tests/test_cfg_runtime_tests.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sdp.processors.modify_manifest.data_to_data import (
InsIfASRInsertion,
SubIfASRSubstitution,
SubMakeLowercase,
SubRegex,
)
test_params_list = []
test_params_list.extend(
[
(
InsIfASRInsertion,
{"insert_words": [" nemo", "nemo ", " nemo "]},
{"text": "i love the toolkit", "pred_text": "i love the nemo toolkit"},
{"text": "i love the nemo toolkit", "pred_text": "i love the nemo toolkit"},
),
(
InsIfASRInsertion,
{"insert_words": [" nemo", "nemo ", " nemo "]},
{"text": "i love the toolkit", "pred_text": "i love the new nemo toolkit"},
{"text": "i love the toolkit", "pred_text": "i love the new nemo toolkit"},
),
]
)
test_params_list.extend(
[
(
SubIfASRSubstitution,
{"sub_words": {"nmo ": "nemo "}},
{"text": "i love the nmo toolkit", "pred_text": "i love the nemo toolkit"},
{"text": "i love the nemo toolkit", "pred_text": "i love the nemo toolkit"},
),
]
)
test_params_list.extend(
[
(
SubIfASRSubstitution,
{"sub_words": {"nmo ": "nemo "}},
{"text": "i love the nmo toolkit", "pred_text": "i love the nemo toolkit"},
{"text": "i love the nemo toolkit", "pred_text": "i love the nemo toolkit"},
),
]
)
test_params_list.extend(
[
(
SubMakeLowercase,
{},
{"text": "Hello Привет 123"},
{"text": "hello привет 123"},
),
(
SubMakeLowercase,
{"text_key": "text_new"},
{"text_new": "Hello Привет 123"},
{"text_new": "hello привет 123"},
),
]
)
test_params_list.extend(
[
(
SubRegex,
{"regex_params_list": [{"pattern": "\s<.*>\s", "repl": " "}]},
{"text": "hello <cough> world"},
{"text": "hello world"},
),
]
)
@pytest.mark.parametrize("test_class,class_kwargs,test_input,expected_output", test_params_list, ids=str)
def test_data_to_data(test_class, class_kwargs, test_input, expected_output):
processor = test_class(**class_kwargs, output_manifest_file=None)
output = processor.process_dataset_entry(test_input)[0].data
assert output == expected_output
| NeMo-speech-data-processor-main | tests/test_data_to_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sdp.processors.modify_manifest.data_to_dropbool import (
DropASRErrorBeginningEnd,
DropHighCER,
DropHighLowCharrate,
DropHighLowDuration,
DropHighLowWordrate,
DropHighWER,
DropIfNoneOfRegexMatch,
DropIfRegexMatch,
DropIfSubstringInInsertion,
DropLowWordMatchRate,
DropNonAlphabet,
)
test_params_list = []
test_params_list.extend(
[
(
DropHighLowCharrate,
{"high_charrate_threshold": 9.9, "low_charrate_threshold": 0},
{"text": "0123456789", "duration": 1},
True,
),
(
DropHighLowCharrate,
{"high_charrate_threshold": 99, "low_charrate_threshold": 10.1},
{"text": "0123456789", "duration": 1},
True,
),
(
DropHighLowCharrate,
{"high_charrate_threshold": 10.1, "low_charrate_threshold": 9.9},
{"text": "0123456789", "duration": 1},
False,
),
]
)
test_params_list.extend(
[
(
DropHighLowWordrate,
{"high_wordrate_threshold": 3.9, "low_wordrate_threshold": 0},
{"text": "11 22 33 44", "duration": 1},
True,
),
(
DropHighLowWordrate,
{"high_wordrate_threshold": 99, "low_wordrate_threshold": 4.1},
{"text": "11 22 33 44", "duration": 1},
True,
),
(
DropHighLowWordrate,
{"high_wordrate_threshold": 4.1, "low_wordrate_threshold": 3.9},
{"text": "11 22 33 44", "duration": 1},
False,
),
]
)
test_params_list.extend(
[
(
DropHighLowDuration,
{"high_duration_threshold": 3.9, "low_duration_threshold": 0},
{"duration": 4},
True,
),
(
DropHighLowDuration,
{"high_duration_threshold": 99, "low_duration_threshold": 4.1},
{"duration": 4},
True,
),
(
DropHighLowDuration,
{"high_duration_threshold": 4.1, "low_duration_threshold": 3.9},
{"duration": 4},
False,
),
]
)
test_params_list.extend(
[
(
DropNonAlphabet,
{"alphabet": " abc"},
{"text": "ab ba cab dac"},
True,
),
(
DropNonAlphabet,
{"alphabet": " abcd"},
{"text": "ab ba cab dac"},
False,
),
]
)
test_params_list.extend(
[
(
DropASRErrorBeginningEnd,
{"beginning_error_char_threshold": 0, "end_error_char_threshold": 2},
{"text": "2", "pred_text": "1 2 3"},
True,
),
(
DropASRErrorBeginningEnd,
{"beginning_error_char_threshold": 2, "end_error_char_threshold": 0},
{"text": "2", "pred_text": "1 2 3"},
True,
),
(
DropASRErrorBeginningEnd,
{"beginning_error_char_threshold": 2, "end_error_char_threshold": 2},
{"text": "2", "pred_text": "1 2 3"},
False,
),
(
DropASRErrorBeginningEnd,
{"beginning_error_char_threshold": 0, "end_error_char_threshold": 2},
{"text": "sentence with some text here", "pred_text": "sentence with some text her"},
False,
),
(
DropASRErrorBeginningEnd,
{"beginning_error_char_threshold": 0, "end_error_char_threshold": 2},
{
"text": "sentence with some text here but actually more text was spoken",
"pred_text": "sentence with some text her",
},
True,
),
]
)
test_params_list.extend(
[
(
DropHighCER,
{"cer_threshold": 9.9},
{"text": "0123456789", "pred_text": "012345678"},
True,
),
(
DropHighCER,
{"cer_threshold": 10.1},
{"text": "0123456789", "pred_text": "012345678"},
False,
),
]
)
test_params_list.extend(
[
(
DropHighWER,
{"wer_threshold": 0},
{"text": "11 22", "pred_text": "11 22"},
False,
),
(
DropHighWER,
{"wer_threshold": 50.1},
{"text": "11 22", "pred_text": "11 22 33"},
False,
),
(
DropHighWER,
{"wer_threshold": 49.9},
{"text": "11 22", "pred_text": "11 22 33"},
True,
),
]
)
test_params_list.extend(
[
(DropIfNoneOfRegexMatch, {"regex_patterns": ["keep this", "also this"]}, {"text": "I don't want this"}, True),
(
DropIfNoneOfRegexMatch,
{"regex_patterns": ["keep this", "also this"]},
{"text": "I want to keep this"},
False,
),
]
)
test_params_list.extend(
[
(DropIfRegexMatch, {"regex_patterns": ["incorrect_text"]}, {"text": "incorrect_text"}, True),
(
DropIfRegexMatch,
{"regex_patterns": ["001/002"], "text_key": "audio_filepath"},
{"audio_filepath": "001/002/003.wav"},
True,
),
]
)
test_params_list.extend(
[
(
DropLowWordMatchRate,
{"wmr_threshold": 50.1},
{"text": "hello world i'm nemo", "pred_text": "hello world"},
True,
),
(
DropLowWordMatchRate,
{"wmr_threshold": 49.9},
{"text": "hello world i'm nemo", "pred_text": "hello world"},
False,
),
]
)
test_params_list.extend(
[
(
DropIfSubstringInInsertion,
{"substrings_in_insertion": ["might "]},
{"text": "we miss certain words", "pred_text": "we might miss certain words"},
True,
),
(
DropIfSubstringInInsertion,
{"substrings_in_insertion": ["might "]},
{"text": "we may certain words", "pred_text": "we might miss certain words"},
False,
),
]
)
@pytest.mark.parametrize("test_class,class_kwargs,test_input,expected_output", test_params_list, ids=str)
def test_data_to_data(test_class, class_kwargs, test_input, expected_output):
processor = test_class(**class_kwargs, output_manifest_file=None)
output = processor.process_dataset_entry(test_input)
if output:
output = output[0].data
if expected_output:
assert output is None
else:
assert output == test_input
| NeMo-speech-data-processor-main | tests/test_data_to_dropbool.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tarfile
from functools import partial
from pathlib import Path
import shutil
from typing import Callable
from unittest import mock
import pytest
from omegaconf import OmegaConf
import sdp.processors.datasets.coraal.create_initial_manifest as coraal_processor
from sdp.run_processors import run_processors
DATASET_CONFIGS_ROOT = Path(__file__).parents[1] / "dataset_configs"
def data_check_fn_mls(raw_data_dir: str, language: str) -> None:
"""Raises error if do not find expected data"""
expected_file = Path(raw_data_dir) / f"mls_{language}.tar.gz"
if not expected_file.exists():
raise ValueError(f"No such file {str(expected_file)}")
def data_check_fn_mcv(raw_data_dir: str, archive_file_stem: str) -> None:
"""Raises error if do not find expected data"""
expected_file = Path(raw_data_dir) / f"{archive_file_stem}.tar.gz"
if not expected_file.exists():
raise ValueError(f"No such file {str(expected_file)}")
def data_check_fn_voxpopuli(raw_data_dir: str) -> None:
"""Raises error if do not find expected data.
Will also extract the archive as initial processor expects extracted data.
"""
if (Path(raw_data_dir) / "transcribed_data").exists():
return
expected_file = Path(raw_data_dir) / "transcribed_data.tar.gz"
if not expected_file.exists():
raise ValueError(f"No such file {str(expected_file)}")
with tarfile.open(Path(raw_data_dir) / "transcribed_data.tar.gz", 'r:gz') as tar:
tar.extractall(path=raw_data_dir)
coraal_processor.get_coraal_url_list = mock.Mock(
return_value=[
'http://lingtools.uoregon.edu/coraal/les/2021.07/LES_metadata_2021.07.txt',
'http://lingtools.uoregon.edu/coraal/les/2021.07/LES_audio_part01_2021.07.tar.gz',
'http://lingtools.uoregon.edu/coraal/les/2021.07/LES_audio_part02_2021.07.tar.gz',
'http://lingtools.uoregon.edu/coraal/les/2021.07/LES_audio_part03_2021.07.tar.gz',
'http://lingtools.uoregon.edu/coraal/les/2021.07/LES_textfiles_2021.07.tar.gz',
]
)
def get_test_cases():
"""Returns paths, and data check fn for all configs that we want to test."""
return [
(f"{DATASET_CONFIGS_ROOT}/spanish/mls/config.yaml", partial(data_check_fn_mls, language="spanish")),
# above one is without p&c, but it's also important to check p&c version as it's substantially different
(f"{DATASET_CONFIGS_ROOT}/italian/mls/config.yaml", partial(data_check_fn_mls, language="italian")),
(
f"{DATASET_CONFIGS_ROOT}/spanish_pc/mcv12/config.yaml",
partial(data_check_fn_mcv, archive_file_stem="cv-corpus-12.0-2022-12-07-es"),
),
(f"{DATASET_CONFIGS_ROOT}/italian/voxpopuli/config.yaml", data_check_fn_voxpopuli),
# audio will be downloaded on the fly, so nothing to check here
(f"{DATASET_CONFIGS_ROOT}/english/slr83/config.yaml", lambda raw_data_dir: True),
# audio will be downloaded on the fly from a subset of files.
# No checks, but need to mock the url list function (done above)
(f"{DATASET_CONFIGS_ROOT}/english/coraal/config.yaml", lambda raw_data_dir: True),
]
def check_e2e_test_data() -> bool:
"""Checks if required environment variables are defined for e2e data.
Either TEST_DATA_ROOT needs to be defined or both AWS_SECRET_KEY
and AWS_ACCESS_KEY.
"""
if os.getenv("TEST_DATA_ROOT"):
return True
if os.getenv("AWS_SECRET_KEY") and os.getenv("AWS_ACCESS_KEY"):
return True
return False
def get_e2e_test_data_path() -> str:
"""Returns path to e2e test data (downloading from AWS if necessary).
In case of downloading from AWS, will create "test_data" folder in the
current folder and set TEST_DATA_ROOT automatically (used by the sdp code
to locate test data).
"""
test_data_root = os.getenv("TEST_DATA_ROOT")
if test_data_root: # assume it's present locally
return test_data_root
import boto3
s3_resource = boto3.resource(
"s3",
aws_access_key_id=os.getenv("AWS_ACCESS_KEY"),
aws_secret_access_key=os.getenv("AWS_SECRET_KEY"),
)
bucket = s3_resource.Bucket("sdp-test-data")
print("Downloading test data from s3")
for obj in bucket.objects.all():
if not os.path.exists(os.path.dirname(obj.key)):
os.makedirs(os.path.dirname(obj.key))
bucket.download_file(obj.key, obj.key)
print("Test data downloaded to 'test_data' folder.")
os.environ["TEST_DATA_ROOT"] = os.path.abspath("test_data")
return os.environ["TEST_DATA_ROOT"]
@pytest.mark.skipif(
not check_e2e_test_data(),
reason="Either TEST_DATA_ROOT needs to be defined or both AWS_SECRET_KEY "
"and AWS_ACCESS_KEY to run e2e config tests",
)
@pytest.mark.parametrize("config_path,data_check_fn", get_test_cases())
def test_configs(config_path: str, data_check_fn: Callable, tmp_path: str):
test_data_root = get_e2e_test_data_path()
# we expect DATASET_CONFIGS_ROOT and TEST_DATA_ROOT
# to have the same structure (e.g. <lang>/<dataset>)
rel_path_from_root = os.path.relpath(Path(config_path).parent, DATASET_CONFIGS_ROOT)
# run data_check_fn - it will raise error if the expected test data is not found
data_check_fn(raw_data_dir=str(Path(test_data_root) / rel_path_from_root))
reference_manifest = str(Path(test_data_root) / rel_path_from_root / "test_data_reference.json")
if not os.path.exists(reference_manifest):
raise ValueError(f"Did not find reference manifest {reference_manifest}")
cfg = OmegaConf.load(config_path)
assert "processors" in cfg
cfg["processors_to_run"] = "all"
cfg["workspace_dir"] = str(tmp_path)
cfg["final_manifest"] = str(tmp_path / "final_manifest.json")
cfg["data_split"] = "train"
cfg["processors"][0]["raw_data_dir"] = str(Path(test_data_root) / rel_path_from_root)
run_processors(cfg)
# additionally, let's test that final generated manifest matches the
# reference file (ignoring the file paths)
with open(reference_manifest, "rt", encoding="utf8") as reference_fin, open(
cfg["final_manifest"], "rt", encoding="utf8"
) as generated_fin:
# sorting to avoid mismatches because of randomness in utterances order
reference_lines = sorted(reference_fin.readlines())
generated_lines = sorted(generated_fin.readlines())
assert len(reference_lines) == len(generated_lines)
for reference_line, generated_line in zip(reference_lines, generated_lines):
reference_data = json.loads(reference_line)
generated_data = json.loads(generated_line)
reference_data.pop("audio_filepath")
generated_data.pop("audio_filepath")
assert reference_data == generated_data
# if CLEAN_UP_TMP_PATH is set to non-0 value, we will delete tmp_path
if os.getenv("CLEAN_UP_TMP_PATH", "0") != "0":
shutil.rmtree(tmp_path)
| NeMo-speech-data-processor-main | tests/test_cfg_end_to_end_tests.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-speech-data-processor-main | tests/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Will take the downloaded tar file and create a version with only X entries."""
import argparse
import os
import shutil
import tarfile
import tempfile
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser("Preparing MLS test data")
parser.add_argument("--extracted_data_path", required=True, help="Path to the downloaded and extracted data.")
parser.add_argument(
"--language",
required=True,
help="The name of the language, used to determine output file name mls_{language}.tar.gz",
)
parser.add_argument("--num_entries", default=200, type=int, help="How many entries to keep (in each split)")
parser.add_argument("--test_data_folder", required=True, help="Where to place the prepared data")
args = parser.parse_args()
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir_path = Path(tmpdir)
for split in ["train", "dev", "test"]:
os.makedirs(tmpdir_path / split / "audio")
transcript_path = Path(args.extracted_data_path) / split / "transcripts.txt"
with open(transcript_path, "rt", encoding="utf8") as fin, open(
tmpdir_path / split / "transcripts.txt", "wt", encoding="utf8"
) as fout:
for idx, line in enumerate(fin):
if idx == args.num_entries:
break
utt_id = line.split("\t", 1)[0]
src_flac_path = os.path.join(
args.extracted_data_path, split, "audio", *utt_id.split("_")[:2], utt_id + ".flac"
)
fout.write(line)
tgt_flac_dir = os.path.join(tmpdir_path, split, "audio", *utt_id.split("_")[:2])
os.makedirs(tgt_flac_dir, exist_ok=True)
shutil.copy(src_flac_path, os.path.join(tgt_flac_dir, utt_id + ".flac"))
os.makedirs(args.test_data_folder, exist_ok=True)
with tarfile.open(os.path.join(args.test_data_folder, f"mls_{args.language}.tar.gz"), "w:gz") as tar:
# has to be the same as what's before .tar.gz
tar.add(tmpdir, arcname=f"mls_{args.language}")
| NeMo-speech-data-processor-main | tests/prepare_test_data/prepare_mls_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Will take the downloaded tar file and create a version with only X entries."""
import argparse
import os
import shutil
import tarfile
import tempfile
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser("Preparing VoxPopuli test data")
parser.add_argument("--data_path", required=True, help="Path to the processed data.")
parser.add_argument(
"--language_id",
required=True,
help="The id of the language",
)
parser.add_argument("--num_entries", default=200, type=int, help="How many entries to keep (in each split)")
parser.add_argument("--test_data_folder", required=True, help="Where to place the prepared data")
args = parser.parse_args()
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir_path = Path(tmpdir)
os.makedirs(tmpdir_path / "transcribed_data" / args.language_id)
for split in ["train", "dev", "test"]:
transcript_path = Path(args.data_path) / "transcribed_data" / args.language_id / f"asr_{split}.tsv"
with open(transcript_path, "rt", encoding="utf8") as fin, open(
tmpdir_path / "transcribed_data" / args.language_id / f"asr_{split}.tsv", "wt", encoding="utf8"
) as fout:
for idx, line in enumerate(fin):
if idx == args.num_entries + 1:
break
fout.write(line)
if idx == 0: # skipping header
continue
utt_id, raw_text, norm_text, spk_id, _, gender, is_gold_transcript, accent = line.split("\t")
year = utt_id[:4]
src_audio_path = (
Path(args.data_path) / "transcribed_data" / args.language_id / year / (utt_id + ".ogg")
)
target_audio_dir = tmpdir_path / "transcribed_data" / args.language_id / year
os.makedirs(target_audio_dir, exist_ok=True)
shutil.copy(src_audio_path, target_audio_dir / (utt_id + ".ogg"))
# even though the voxpopuli processor expects untarred folder,
# we still tar it to save time on the download from s3
with tarfile.open(os.path.join(args.test_data_folder, f"transcribed_data.tar.gz"), "w:gz") as tar:
# has to be the same as what's before .tar.gz
tar.add(tmpdir_path / "transcribed_data", arcname=f"transcribed_data")
| NeMo-speech-data-processor-main | tests/prepare_test_data/prepare_voxpopuli_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Will take the downloaded tar file and create a version with only X entries."""
import argparse
import os
import shutil
import tarfile
import tempfile
from pathlib import Path
if __name__ == "__main__":
parser = argparse.ArgumentParser("Preparing MCV test data")
parser.add_argument("--extracted_data_path", required=True, help="Path to the downloaded and extracted data.")
parser.add_argument(
"--archive_file_stem",
required=True,
help="What the stem (ie without the 'tar.gz' bit) of the new archive file should be",
)
parser.add_argument("--num_entries", default=200, type=int, help="How many entries to keep (in each split)")
parser.add_argument("--test_data_folder", required=True, help="Where to place the prepared data")
args = parser.parse_args()
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir_path = Path(tmpdir)
os.makedirs(tmpdir_path / "clips")
for split in ["train", "dev", "test"]:
transcript_path = Path(args.extracted_data_path) / f"{split}.tsv"
with open(transcript_path, "rt", encoding="utf8") as fin, open(
tmpdir_path / f"{split}.tsv", "wt", encoding="utf8"
) as fout:
fout.write(fin.readline()) # just copy over header line
for idx, line in enumerate(fin):
if idx == args.num_entries:
break
utt_id = line.split("\t")[1]
src_mp3_path = os.path.join(args.extracted_data_path, "clips", utt_id)
fout.write(line)
tgt_mp3_path = os.path.join(tmpdir_path, "clips", utt_id)
shutil.copy(src_mp3_path, tgt_mp3_path)
with tarfile.open(os.path.join(args.test_data_folder, f"{args.archive_file_stem}.tar.gz"), "w:gz") as tar:
# has to be the same as what's before .tar.gz
tar.add(tmpdir, arcname=args.archive_file_stem)
| NeMo-speech-data-processor-main | tests/prepare_test_data/prepare_mcv_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run this file to generate documentation for SDP config files.
Will parse all the yaml files and include any built-in documentation in
the expected format.
"""
import yaml
import os
from pathlib import Path
ROOT_LINK = "https://github.com/NVIDIA/NeMo-speech-data-processor/blob/main/dataset_configs"
# let's ignore some of the configs we don't (yet) want to be exposed in the documentation
IGNORE_CONFIGS = []
def gen_docs():
config_dir = str(Path(__file__).absolute().parents[1] / 'dataset_configs')
config_docs_dir = str(Path(__file__).parents[0] / 'src' / 'sdp' / 'config-docs')
for root, dirs, files in os.walk(config_dir):
# Create corresponding directories in the destination directory
for directory in dirs:
source_path = os.path.join(root, directory)
destination_path = source_path.replace(config_dir, config_docs_dir)
os.makedirs(destination_path, exist_ok=True)
# Copy files and change the file extensions
for file in files:
if file.endswith('.yaml'):
source_path = os.path.join(root, file)
config_path = source_path.replace(config_dir, '')[1:] # removing leading /
if config_path in IGNORE_CONFIGS:
continue
destination_path = source_path.replace(config_dir, config_docs_dir).replace('.yaml', '.rst')
with open(source_path, "rt", encoding="utf-8") as fin:
docs = yaml.safe_load(fin).get('documentation', "Documentation is not yet available.") + "\n\n"
link = f"Config link: `dataset_configs/{config_path} <{ROOT_LINK}/{config_path}>`_"
with open(destination_path, "wt", encoding="utf-8") as fout:
fout.write(docs + link)
if __name__ == '__main__':
gen_docs()
| NeMo-speech-data-processor-main | docs/gen_docs.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import glob
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../"))
templates_path = ["_templates"]
autodoc_mock_imports = []
_skipped_autodoc_mock_imports = []
for req_path in sorted(list(glob.glob("../../requirements/*.txt"))):
if "docs.txt" in req_path:
continue
req_file = os.path.abspath(os.path.expanduser(req_path))
with open(req_file, 'r') as f:
for line in f:
line = line.replace("\n", "")
req = re.search(r"([a-zA-Z0-9-_]*)", line)
if req:
req = req.group(1)
req = req.replace("-", "_")
if req not in autodoc_mock_imports:
if req in _skipped_autodoc_mock_imports:
print(f"Skipping req : `{req}` (lib {line})")
continue
if req:
autodoc_mock_imports.append(req)
print(f"Adding req : `{req}` to autodoc mock requirements (lib {line})")
else:
print(f"`{req}` already added to autodoc mock requirements (lib {line})")
#
# -- General configuration ------------------------------------------------
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.githubpages",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.intersphinx",
"sphinx_copybutton",
"sphinxext.opengraph",
]
intersphinx_mapping = {
'pytorch': ('https://pytorch.org/docs/stable', None),
'pytorch-lightning': ('https://pytorch-lightning.readthedocs.io/en/latest/', None),
}
# Set default flags for all classes.
autodoc_default_options = {'members': None, 'undoc-members': None, 'show-inheritance': True}
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False # optional.
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "NVIDIA NeMo Speech Data Processor"
copyright = "© 2022-2023 NVIDIA Corporation & Affiliates. All rights reserved."
author = "NVIDIA CORPORATION"
version = '0.1.0'
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "nemosdpdoc"
html_theme = "sphinx_book_theme"
html_title = 'NVIDIA NeMo SDP'
html_theme_options = {
'collapse_navigation': False,
'navigation_depth': 10,
'repository_url': 'https://github.com/NVIDIA/NeMo-speech-data-processor/',
'use_repository_button': True,
'show_navbar_depth': 1,
'show_toc_level': 10,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_favicon = 'favicon.ico'
html_static_path = ['_static']
html_last_updated_fmt = ''
def setup(app):
app.add_css_file('css/custom.css')
app.add_js_file('js/pk_scripts.js')
# OpenGraph settings
ogp_site_url = 'https://nvidia.github.io/NeMo-speech-data-processor/'
ogp_image = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/_static/nv_logo.png'
# MathJax CDN
mathjax_path = "https://cdn.jsdelivr.net/npm/[email protected]/es5/mml-chtml.min.js"
nitpick_ignore = [
('py:class', 'abc.ABC'),
('py:class', 'sdp.processors.base_processor.DataEntry'),
]
# nitpick_ignore_regex = [('py:class', '*')]
| NeMo-speech-data-processor-main | docs/src/conf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
# overriding with the library specific logger, so that it's possible to
# customize in any downstream applications
logger = logging.getLogger("sdp")
| NeMo-speech-data-processor-main | sdp/logging.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-speech-data-processor-main | sdp/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import tempfile
import uuid
from typing import List
import hydra
from omegaconf import OmegaConf, open_dict
from sdp.logging import logger
# registering new resolvers to simplify config files
OmegaConf.register_new_resolver("subfield", lambda node, field: node[field])
OmegaConf.register_new_resolver("not", lambda x: not x)
OmegaConf.register_new_resolver("equal", lambda field, value: field == value)
# customizing logger
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
'[SDP %(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s',
datefmt="%Y-%m-%d %H:%M:%S",
)
handler.setFormatter(formatter)
logger.handlers
logger.addHandler(handler)
logger.propagate = False
def select_subset(input_list: List, select_str: str) -> List:
"""This function parses a string and selects objects based on that.
The string is expected to be a valid representation of Python slice. The
only difference with using an actual slice is that we are always returning
a list, never a single element. See examples below for more details.
Examples::
>>> processors_to_run = [1, 2, 3, 4, 5]
>>> select_subset(processors_to_run, "3:") # to exclude first 3 objects
[4, 5]
>>> select_subset(processors_to_run, ":-1") # to select all but last
[1, 2, 3, 4]
>>> select_subset(processors_to_run, "2:5") # to select 3rd to 5th
[3, 4, 5]
>>> # note that unlike normal slice, we still return a list here
>>> select_subset(processors_to_run, "0") # to select only the first
[1]
>>> select_subset(processors_to_run, "-1") # to select only the last
[5]
Args:
input_list (list): input list to select objects from.
select_str (str): string representing Python slice.
Returns:
list: a subset of the input according to the ``select_str``
"""
if ":" not in select_str:
selected_objects = [input_list[int(select_str)]]
else:
slice_obj = slice(*map(lambda x: int(x.strip()) if x.strip() else None, select_str.split(":")))
selected_objects = input_list[slice_obj]
return selected_objects
def run_processors(cfg):
logger.info(f"Hydra config: {OmegaConf.to_yaml(cfg)}")
processors_to_run = cfg.get("processors_to_run", "all")
if processors_to_run == "all":
processors_to_run = ":"
selected_cfgs = select_subset(cfg.processors, processors_to_run)
# filtering out any processors that have should_run=False
processors_cfgs = []
for processor_cfg in selected_cfgs:
with open_dict(processor_cfg):
should_run = processor_cfg.pop("should_run", True)
if should_run:
processors_cfgs.append(processor_cfg)
logger.info(
"Specified to run the following processors: %s ",
[cfg["_target_"] for cfg in processors_cfgs],
)
processors = []
# let's build all processors first to automatically check
# for errors in parameters
with tempfile.TemporaryDirectory() as tmp_dir:
# special check for the first processor.
# In case user selected something that does not start from
# manifest creation we will try to infer the input from previous
# output file
if processors_cfgs[0] is not cfg.processors[0] and "input_manifest_file" not in processors_cfgs[0]:
# locating starting processor
for idx, processor in enumerate(cfg.processors):
if processor is processors_cfgs[0]: # we don't do a copy, so can just check object ids
if "output_manifest_file" in cfg.processors[idx - 1]:
with open_dict(processors_cfgs[0]):
processors_cfgs[0]["input_manifest_file"] = cfg.processors[idx - 1]["output_manifest_file"]
break
for idx, processor_cfg in enumerate(processors_cfgs):
logger.info('=> Building processor "%s"', processor_cfg["_target_"])
# we assume that each processor defines "output_manifest_file"
# and "input_manifest_file" keys, which can be optional. In case they
# are missing, we create tmp files here for them
if "output_manifest_file" not in processor_cfg:
tmp_file_path = os.path.join(tmp_dir, str(uuid.uuid4()))
with open_dict(processor_cfg):
processor_cfg["output_manifest_file"] = tmp_file_path
if idx != len(processors_cfgs) - 1 and "input_manifest_file" not in processors_cfgs[idx + 1]:
with open_dict(processors_cfgs[idx + 1]):
processors_cfgs[idx + 1]["input_manifest_file"] = processor_cfg["output_manifest_file"]
processor = hydra.utils.instantiate(processor_cfg)
# running runtime tests to fail right-away if something is not
# matching users expectations
processor.test()
processors.append(processor)
for processor in processors:
# TODO: add proper str method to all classes for good display
logger.info('=> Running processor "%s"', processor)
processor.process()
| NeMo-speech-data-processor-main | sdp/run_processors.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import diff_match_patch
from sdp.utils.edit_spaces import remove_extra_spaces
diff = diff_match_patch.diff_match_patch()
diff.Diff_Timeout = 0
def get_diff(orig_words: str, pred_words: str) -> List[tuple]:
orig_words = remove_extra_spaces(orig_words)
orig_words = orig_words.replace(" ", "\n") + "\n"
pred_words = remove_extra_spaces(pred_words)
pred_words = pred_words.replace(" ", "\n") + "\n"
orig_enc, pred_enc, enc = diff.diff_linesToChars(orig_words, pred_words)
diffs = diff.diff_main(orig_enc, pred_enc, False)
diff.diff_charsToLines(diffs, enc)
diffs_post = []
for d in diffs:
diffs_post.append((d[0], d[1].replace("\n", " ")))
return diffs_post
def get_diff_with_subs_grouped(orig_words: str, pred_words: str) -> List[tuple]:
"""
Function to produce a list of word-level diffs, but with the substitutions
grouped together.
e.g.
orig_words = "hello there nemo"
pred_words = "hello my name is nemo"
will give an output of:
[(0, 'hello '), ((-1, 'there '), (1, 'my name is ')), (0, 'nemo ')]
(note how the 'there' nad 'my name is' entry are grouped together in a tuple)
This is to make it easier to find substitutions in the diffs, as
dif_match_patch does not show substitutions clearly, only as a deletion followed by
an insertion.
Args:
orig_words: a string containing the ground truth.
pred_words: a string containing the text predicted by ASR.
Returns:
A list of tuples containing the word-level diffs between the ground truth
and ASR.
"""
diffs = get_diff(orig_words, pred_words)
diffs_group_subs = []
i = 0
while i < len(diffs):
if i < len(diffs) - 1: # if i == len(diffs), line accessing diffs[i+1] will raise error
if diffs[i][0] == -1 and diffs[i + 1][0] == 1:
diffs_group_subs.append((diffs[i], diffs[i + 1]))
i += 1 # skip extra diff entry so we don't append diffs[i+1] again
else:
diffs_group_subs.append(diffs[i])
else:
diffs_group_subs.append(diffs[i])
i += 1
return diffs_group_subs
| NeMo-speech-data-processor-main | sdp/utils/get_diff.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import editdistance
sm = difflib.SequenceMatcher()
def get_cer(text, pred_text):
char_dist = editdistance.eval(text, pred_text)
num_chars = len(text)
cer = round(char_dist / num_chars * 100.0, 2)
return cer
def get_wer(text, pred_text):
text_words = text.split()
pred_text_words = pred_text.split()
word_dist = editdistance.eval(text_words, pred_text_words)
num_words = len(text_words)
wer = round(word_dist / num_words * 100.0, 2)
return wer
def get_charrate(text, duration):
num_chars = len(text)
charrate = round(num_chars / duration, 2)
return charrate
def get_wordrate(text, duration):
num_words = len(text.split())
wordrate = round(num_words / duration, 2)
return wordrate
def get_wmr(text, pred_text):
orig = text.strip().split()
sm.set_seqs(orig, pred_text.strip().split())
num_matches = 0
for m in sm.get_matching_blocks():
for word_idx in range(m[0], m[0] + m[2]):
num_matches += 1
wmr = round(num_matches / len(orig) * 100.0, 2)
return wmr
| NeMo-speech-data-processor-main | sdp/utils/metrics_computation.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-speech-data-processor-main | sdp/utils/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def remove_extra_spaces(input_string):
"""
Removes extra spaces in between words and at the start and end
of the string.
e.g. "abc xyz abc xyz" --> "abc xyz abc xyz"
e.g. " abc xyz " --> "abc xyz"
"""
output_string = " ".join(input_string.split())
return output_string
def add_start_end_spaces(input_string):
"""
Adds spaces at the start and end of the input string.
This is useful for when we specify we are looking for a particular
word " <word> ". This will ensure we will find the word even
if it is at the beginning or end of the utterances (ie. there will
definitely be two spaces around the word).
e.g. "abc xyz" --> " abc xyz "
"""
# ensure no extra spaces
no_extra_spaces_string = remove_extra_spaces(input_string)
output_string = f" {no_extra_spaces_string} "
return output_string
| NeMo-speech-data-processor-main | sdp/utils/edit_spaces.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
import urllib
import zipfile
import wget
from sdp.logging import logger
def download_file(source_url: str, target_directory: str, verbose = True):
# make sure target_directory is an absolute path to avoid bugs when we change directories to download data later
target_directory = os.path.abspath(target_directory)
if verbose:
logger.info(f"Trying to download data from {source_url} and save it in this directory: {target_directory}")
filename = os.path.basename(urllib.parse.urlparse(source_url).path)
target_filepath = os.path.join(target_directory, filename)
if os.path.exists(target_filepath):
if verbose:
logger.info(f"Found file {target_filepath} => will not be attempting download from {source_url}")
else:
original_dir = os.getcwd() # record current working directory so can cd back to it
os.chdir(target_directory) # cd to target dir so that temporary download file will be saved in target dir
wget.download(source_url, target_directory)
# change back to original directory as the rest of the code may assume that we are in that directory
os.chdir(original_dir)
if verbose:
logger.info("Download completed")
return target_filepath
def extract_archive(archive_path: str, extract_path: str, force_extract: bool = False) -> str:
logger.info(f"Attempting to extract all contents from tar file {archive_path} and save in {extract_path}")
if not force_extract:
if tarfile.is_tarfile(archive_path):
with tarfile.open(archive_path, "r") as archive:
archive_extracted_dir = archive.getnames()[0]
elif zipfile.is_zipfile(archive_path):
with zipfile.ZipFile(archive_path, "r") as archive:
archive_extracted_dir = archive.namelist()[0]
else:
raise RuntimeError(f"Unknown archive format: {archive_path}. We only support tar and zip archives.")
archive_contents_dir = os.path.join(extract_path, archive_extracted_dir)
if not force_extract and os.path.exists(archive_contents_dir):
logger.info(f"Directory {archive_contents_dir} already exists => will not attempt to extract file")
else:
if tarfile.is_tarfile(archive_path):
with tarfile.open(archive_path, "r") as archive:
archive.extractall(path=extract_path)
elif zipfile.is_zipfile(archive_path):
with zipfile.ZipFile(archive_path, "r") as archive:
archive.extractall(extract_path)
logger.info("Finished extracting")
if force_extract:
return None
return archive_contents_dir
| NeMo-speech-data-processor-main | sdp/utils/common.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# let's import all supported processors here to simplify target specification
from sdp.processors.datasets.coraal import (
CreateInitialManifestCORAAL,
TrainDevTestSplitCORAAL,
)
from sdp.processors.datasets.mcv.create_initial_manifest import CreateInitialManifestMCV
from sdp.processors.datasets.mls.create_initial_manifest import CreateInitialManifestMLS
from sdp.processors.datasets.mls.restore_pc import RestorePCForMLS
from sdp.processors.datasets.slr83.create_initial_manifest import (
CreateInitialManifestSLR83,
CustomDataSplitSLR83,
)
from sdp.processors.datasets.voxpopuli.create_initial_manifest import (
CreateInitialManifestVoxpopuli,
)
from sdp.processors.datasets.voxpopuli.normalize_from_non_pc_text import (
NormalizeFromNonPCTextVoxpopuli,
)
from sdp.processors.modify_manifest.common import (
AddConstantFields,
ChangeToRelativePath,
CombineSources,
DuplicateFields,
RenameFields,
SortManifest,
SplitOnFixedDuration,
KeepOnlySpecifiedFields,
)
from sdp.processors.modify_manifest.data_to_data import (
InsIfASRInsertion,
SubIfASRSubstitution,
SubMakeLowercase,
SubRegex,
)
from sdp.processors.modify_manifest.data_to_dropbool import (
DropASRError,
DropASRErrorBeginningEnd,
DropHighCER,
DropHighLowCharrate,
DropHighLowDuration,
DropHighLowWordrate,
DropHighWER,
DropIfNoneOfRegexMatch,
DropIfRegexMatch,
DropIfSubstringInInsertion,
DropLowWordMatchRate,
DropNonAlphabet,
DropOnAttribute,
)
from sdp.processors.modify_manifest.make_letters_uppercase_after_period import (
MakeLettersUppercaseAfterPeriod,
)
from sdp.processors.nemo.asr_inference import ASRInference
from sdp.processors.nemo.pc_inference import PCInference
| NeMo-speech-data-processor-main | sdp/processors/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import multiprocessing
import os
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map
from sdp.logging import logger
@dataclass
class DataEntry:
"""A wrapper for data entry + any additional metrics."""
data: Optional[Dict] # can be None to drop the entry
metrics: Any = None
class BaseProcessor(ABC):
"""Abstract class for SDP processors.
All processor classes inherit from the ``BaseProcessor`` class.
This is a simple abstract class which has 2 empty methods: :meth:`process`
and :meth:`test`.
These serve to remind us that SDP essentially just runs ``.test()`` on all
processors (to implement :ref:`run-time tests <sdp-runtime-tests>`),
and then ``.process()`` on all processors.
Args:
output_manifest_file (str): path of where the output manifest file will
be located.
input_manifest_file (str): path of where the input manifest file is
located. This arg is optional - some processors may not take in
an input manifest because they need to create an initial manifest
from scratch (ie from some transcript file that is in a format
different to the NeMo manifest format).
"""
def __init__(self, output_manifest_file: str, input_manifest_file: Optional[str] = None):
self.output_manifest_file = output_manifest_file
self.input_manifest_file = input_manifest_file
@abstractmethod
def process(self):
"""Should be overriden by the child classes to implement some data processing."""
pass
def test(self):
"""This method can be used to perform "runtime" tests.
This can be any kind of self-consistency tests, but are usually
in the form of checking that provided input test data entries match
provided output test data entries.
There are not tests by default.
"""
class BaseParallelProcessor(BaseProcessor):
"""Processor class which allows operations on each utterance to be parallelized.
Parallelization is done using ``tqdm.contrib.concurrent.process_map`` inside
the :meth:`process` method. Actual processing should be defined on a
per-examples bases inside the :meth:`process_dataset_entry` method.
See the documentation of all the methods for more details.
Args:
max_workers (int): maximum number of workers that will be spawned
during the parallel processing.
chunksize (int): the size of the chunks that will be sent to worker processes.
test_cases (list[dict]): an optional list of dicts containing test
cases for checking that the processor makes the changes that we
are expecting.
The dicts must have a key ``input``, the value of which is a dictionary
containing data which is our test's input manifest line, and a key
``output``, the value of which is a dictionary containing data which is
the expected output manifest line.
"""
def __init__(
self,
max_workers: int = -1,
chunksize: int = 100,
test_cases: Optional[List[Dict]] = None,
**kwargs
):
super().__init__(**kwargs)
if max_workers == -1:
max_workers = multiprocessing.cpu_count()
self.max_workers = max_workers
self.chunksize = chunksize
self.number_of_entries = 0
self.total_duration = 0
self.test_cases = test_cases
# need to convert to list to avoid errors in iteration over None
if self.test_cases is None:
self.test_cases = []
def process(self):
"""Parallelized implementation of the data processing.
The execution flow of this method is the following.
1. :meth:`prepare` is called. It's empty by default but can be used to
e.g. download the initial data files or compute some aggregates
required for subsequent processing.
2. A list of data entries is created by calling :meth:`read_manifest`.
Default implementation reads an input manifest file and returns a
list of dictionaries for each line (we assume a standard NeMo format
of one json per line).
3. :meth:`process_dataset_entry` is called **in parallel** on each element
of the list created in the previous step. Note that you cannot create
any new counters or modify the attributes of this class in any way
inside that function as this will lead to an undefined behavior.
Each call to the :meth:`process_dataset_entry` returns a list of
``DataEntry`` objects that are then aggregated together. ``DataEntry``
simply defines a ``data`` and ``metrics`` keys.
4. We loop through all returned data entries and do the following
a) All ``metrics`` keys are collected in a separate list and passed
over to the :meth:`finalize` method for any desired metric
aggregation and reporting.
b) If ``data`` is set to None, the objects are ignored (metrics are
still collected).
c) All non-ignored objects are dumped to the output manifest file
with a call to ``json.dump``, one object per-line.
Here is a diagram outlining the execution flow of this method:
.. can only be viewed in the online documentation
.. raw:: html
<div align="center">
<img src="https://mermaid.ink/img/pako:eNplUl1r6zAM_SvCFy4pbL3vvaVwu-59sL0tl6LESmqIP7DkjWzsv89O0rVjzosiHR8dHetdtV6T2qg-YjjB0-Fv7SAfTs2cqdWjUGAwDrYiuz0yPWDEYaDhIfqWmH1chzmqVts_GQOW5OR1rWaqcv4916pcZxq6jKaAkRb0tok7IBtkXO5BM4KmDtMgUIotOmgIEpMG8VOK1v0atH91g0cNEV9BoyBgEm9RTJvljbX6D7e3O9hfVOyvVURCfbToTEcs11pKocwbksC5PnWFyhB00VvIE7wYnxiWwY3rgbNNqwlnOpATRQLD4B2dhdxdhNx9t2PiOJYRmORITuJYlb85XEydFGDDErGVL4tn6gNcuA-Zm_GFwCf5McJvwL6P1KNQoYim5SlfTY7-At9BEmHQ0YdAenVucH_hv7_W3hmHg3mj40JWXYudX8lwGHD86rb4d7YtN6hd-Qo1Oa1ulKVo0ei8k-8lXatsps0ubnK47EVZrY8MLQ_-OLpWbSQmulEpZNvoYDDvrlWbDgemj0-10vX9" height=100% />
</div>
"""
self.prepare()
dataset_entries = self.read_manifest()
# this will unroll all inner lists
data = itertools.chain(
*process_map(
self.process_dataset_entry,
dataset_entries,
max_workers=self.max_workers,
chunksize=self.chunksize,
)
)
metrics = []
os.makedirs(os.path.dirname(self.output_manifest_file), exist_ok=True)
with open(self.output_manifest_file, "wt", encoding="utf8") as fout:
for data_entry in tqdm(data):
metrics.append(data_entry.metrics)
if data_entry.data is None:
continue
json.dump(data_entry.data, fout, ensure_ascii=False)
self.number_of_entries += 1
self.total_duration += data_entry.data.get("duration", 0)
fout.write("\n")
self.finalize(metrics)
def prepare(self):
"""Can be used in derived classes to prepare the processing in any way.
E.g., download data or compute some aggregates. Will be called before
starting processing the data.
"""
def read_manifest(self):
"""Reading the input manifest file.
.. note::
This function should be overridden in the "initial" class creating
manifest to read from the original source of data.
"""
if self.input_manifest_file is None:
raise NotImplementedError("Override this method if the processor creates initial manifest")
# TODO: should we not assume that manifest can fully fit in memory?
with open(self.input_manifest_file, "rt", encoding="utf8") as fin:
dataset_entries = [json.loads(line) for line in fin.readlines()]
return dataset_entries
@abstractmethod
def process_dataset_entry(self, data_entry) -> List[DataEntry]:
"""Needs to be implemented in the derived classes.
Each returned value should be a ``DataEntry`` object that will hold
a dictionary (or anything else that can be json-serialized) with
the actual data + any additional metrics required for statistics
reporting. Those metrics can be used in :meth:`finalize` to
prepare for final reporting.
``DataEntry`` is a simple dataclass defined in the following way::
@dataclass
class DataEntry:
# can be None to drop the entry
data: Optional[Dict]
# anything - you'd need to aggregate all
# values in the finalize method manually
metrics: Any = None
.. note::
This method should always return a list of objects to allow a
one-to-many mapping. E.g., if you want to cut an utterance into
multiple smaller parts, you can return a list of all the produced
utterances and they will be handled correctly.
The many-to-one mapping is not currently supported by design of
this method (but can still be done if you don't inherit from
this class and process the data sequentially).
Args:
data_entry: most often, ``data_entry`` will be a dictionary
containing items which represent the JSON manifest entry.
Sometimes, such as in :class:`sdp.processors.CreateInitialManifestMLS`,
it will be a string containing a line for that utterance
from the original raw MLS transcript. In general it is an element
of the list returned from the :meth:`read_manifest` method.
"""
# TODO: it would be more straightforward to use a generator here, but
# seems that it's not supported with multiprocessing. Is there a
# way to make it work?
def finalize(self, metrics: List):
"""Can be used to output statistics about the processed data.
By default outputs new number of entries/hours.
Args:
metrics (list): a list containing all ``metrics`` keys from the
data entries returned from the :meth:`process_dataset_entry`
method.
"""
logger.info("Total number of entries after processing: %d", self.number_of_entries)
if self.total_duration != 0:
logger.info("Total audio duration (hours) after processing: %.2f", self.total_duration / 3600)
def test(self):
"""Applies processing to "test_cases" and raises an error in case of mismatch."""
for test_case in self.test_cases:
generated_outputs = self.process_dataset_entry(test_case["input"].copy())
expected_outputs = (
[test_case["output"]] if not isinstance(test_case["output"], list) else test_case["output"]
)
for generated_output, expected_output in zip(generated_outputs, expected_outputs):
generated_output = generated_output.data
if generated_output != expected_output:
raise RuntimeError(
"Runtime test failed.\n"
f"Test input: {test_case['input']}\n"
f"Generated output: {generated_output}\n"
f"Expected output: {expected_output}"
)
| NeMo-speech-data-processor-main | sdp/processors/base_processor.py |
NeMo-speech-data-processor-main | sdp/processors/nemo/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from pathlib import Path
from sdp.processors.base_processor import BaseProcessor
# Note that we do not re-use base parallel implementation, since the ASR
# inference is already run in batches.
# TODO: actually, it might still be beneficial to have another level of
# parallelization, but that needs to be tested.
class ASRInference(BaseProcessor):
"""This processor performs ASR inference on each utterance of the input manifest.
ASR predictions will be saved in the ``pred_text`` key.
Args:
pretrained_model (str): the name of the pretrained NeMo ASR model
which will be used to do inference.
batch_size (int): the batch size to use for ASR inference. Defaults to 32.
Returns:
The same data as in the input manifest with an additional field
``pred_text`` containing ASR model's predictions.
"""
def __init__(
self,
pretrained_model: str,
batch_size: int = 32,
**kwargs,
):
super().__init__(**kwargs)
self.script_path = Path(__file__).parents[1] / "nemo" / "transcribe_speech.py"
self.pretrained_model = pretrained_model
self.batch_size = batch_size
def process(self):
"""This will add "pred_text" key into the output manifest."""
os.makedirs(os.path.dirname(self.output_manifest_file), exist_ok=True)
subprocess.run(
f"python {self.script_path} "
f"pretrained_name={self.pretrained_model} "
f"dataset_manifest={self.input_manifest_file} "
f"output_filename={self.output_manifest_file} "
f"batch_size={self.batch_size} ",
shell=True,
check=True,
)
| NeMo-speech-data-processor-main | sdp/processors/nemo/asr_inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pathlib import Path
from typing import Dict, List, Optional, Union
from sdp.processors.base_processor import BaseProcessor
def load_manifest(manifest: Path) -> List[Dict[str, Union[str, float]]]:
result = []
with manifest.open() as f:
for i, line in enumerate(f):
data = json.loads(line)
result.append(data)
return result
class PCInference(BaseProcessor):
"""Adds predictions of a text-based punctuation and capitalization (P&C) model.
Operates on the text in the ``input_text_field``, and saves predictions in
the ``output_text_field``.
Args:
input_text_field (str): the text field that will be the input to the P&C model.
output_text_field (str): the text field where the output of the PC model
will be saved.
batch_size (int): the batch sized used by the P&C model.
device (str): the device used by the P&C model. Can be skipped to auto-select.
pretrained_name (str): the pretrained_name of the P&C model.
model_path (str): the model path to the P&C model.
.. note::
Either ``pretrained_name`` or ``model_path`` have to be specified.
Returns:
The same data as in the input manifest with an additional field
<output_text_field> containing P&C model's predictions.
"""
def __init__(
self,
input_text_field: str,
output_text_field: str,
batch_size: int,
device: Optional[str] = None,
pretrained_name: Optional[str] = None,
model_path: Optional[str] = None,
**kwargs,
):
super().__init__(**kwargs)
self.pretrained_name = pretrained_name
self.model_path = model_path
self.input_text_field = input_text_field
self.output_text_field = output_text_field
self.device = device
self.batch_size = batch_size
# verify self.pretrained_name/model_path
if self.pretrained_name is None and self.model_path is None:
raise ValueError("pretrained_name and model_path cannot both be None")
if self.pretrained_name is not None and self.model_path is not None:
raise ValueError("pretrained_name and model_path cannot both be specified")
def process(self):
import torch # importing after nemo to make sure users first install nemo, instead of torch, then nemo
from nemo.collections.nlp.models import PunctuationCapitalizationModel
if self.pretrained_name:
model = PunctuationCapitalizationModel.from_pretrained(self.pretrained_name)
else:
model = PunctuationCapitalizationModel.restore_from(self.model_path)
if self.device is None:
if torch.cuda.is_available():
model = model.cuda()
else:
model = model.cpu()
else:
model = model.to(self.device)
manifest = load_manifest(Path(self.input_manifest_file))
texts = []
for item in manifest:
texts.append(item[self.input_text_field])
processed_texts = model.add_punctuation_capitalization(
texts,
batch_size=self.batch_size,
)
Path(self.output_manifest_file).parent.mkdir(exist_ok=True, parents=True)
with Path(self.output_manifest_file).open('w') as f:
for item, t in zip(manifest, processed_texts):
item[self.output_text_field] = t
f.write(json.dumps(item, ensure_ascii=False) + '\n')
| NeMo-speech-data-processor-main | sdp/processors/nemo/pc_inference.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import glob
import json
import os
from dataclasses import dataclass, is_dataclass
from pathlib import Path
from typing import Optional
import pytorch_lightning as pl
import torch
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecodingConfig
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
from nemo.collections.asr.parts.utils.transcribe_utils import transcribe_partial_audio
from nemo.collections.common.tokenizers.aggregate_tokenizer import AggregateTokenizer
from nemo.core.config import hydra_runner
from nemo.utils import logging, model_utils
from omegaconf import OmegaConf
"""
Transcribe audio file on a single CPU/GPU. Useful for transcription of moderate amounts of audio data.
# Arguments
model_path: path to .nemo ASR checkpoint
pretrained_name: name of pretrained ASR model (from NGC registry)
audio_dir: path to directory with audio files
dataset_manifest: path to dataset JSON manifest file (in NeMo format)
compute_langs: Bool to request language ID information (if the model supports it)
output_filename: Output filename where the transcriptions will be written
batch_size: batch size during inference
cuda: Optional int to enable or disable execution of model on certain CUDA device.
amp: Bool to decide if Automatic Mixed Precision should be used during inference
audio_type: Str filetype of the audio. Supported = wav, flac, mp3
overwrite_transcripts: Bool which when set allows repeated transcriptions to overwrite previous results.
rnnt_decoding: Decoding sub-config for RNNT. Refer to documentation for specific values.
# Usage
ASR model can be specified by either "model_path" or "pretrained_name".
Data for transcription can be defined with either "audio_dir" or "dataset_manifest".
append_pred - optional. Allows you to add more than one prediction to an existing .json
pred_name_postfix - optional. The name you want to be written for the current model
Results are returned in a JSON manifest file.
python transcribe_speech.py \
model_path=null \
pretrained_name=null \
audio_dir="" \
dataset_manifest="" \
output_filename="" \
batch_size=32 \
compute_langs=False \
cuda=0 \
amp=True \
append_pred=False \
pred_name_postfix=""
"""
@dataclass
class TranscriptionConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
audio_dir: Optional[str] = None # Path to a directory which contains audio files
dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest
# General configs
output_filename: Optional[str] = None
batch_size: int = 32
num_workers: int = 0
append_pred: bool = False # Sets mode of work, if True it will add new field transcriptions.
pred_name_postfix: Optional[str] = None # If you need to use another model name, rather than standard one.
# Set to True to output language ID information
compute_langs: bool = False
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
amp: bool = False
audio_type: str = "wav"
# Recompute model transcription, even if the output folder exists with scores.
overwrite_transcripts: bool = True
# Decoding strategy for CTC models
ctc_decoding: CTCDecodingConfig = CTCDecodingConfig()
# Decoding strategy for RNNT models
rnnt_decoding: RNNTDecodingConfig = RNNTDecodingConfig(fused_batch_size=-1)
@hydra_runner(config_name="TranscriptionConfig", schema=TranscriptionConfig)
def main(cfg: TranscriptionConfig) -> TranscriptionConfig:
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
if cfg.audio_dir is None and cfg.dataset_manifest is None:
raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!")
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
# setup model
if cfg.model_path is not None:
# restore model from .nemo file path
model_cfg = ASRModel.restore_from(restore_path=cfg.model_path, return_config=True)
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath) # type: ASRModel
logging.info(f"Restoring model : {imported_class.__name__}")
asr_model = imported_class.restore_from(
restore_path=cfg.model_path, map_location=map_location
) # type: ASRModel
model_name = os.path.splitext(os.path.basename(cfg.model_path))[0]
else:
# restore model by name
asr_model = ASRModel.from_pretrained(
model_name=cfg.pretrained_name, map_location=map_location
) # type: ASRModel
model_name = cfg.pretrained_name
trainer = pl.Trainer(devices=device, accelerator=accelerator)
asr_model.set_trainer(trainer)
asr_model = asr_model.eval()
partial_audio = False
# collect additional transcription information
return_hypotheses = True
# we will adjust this flag is the model does not support it
compute_langs = cfg.compute_langs
# Setup decoding strategy
if hasattr(asr_model, 'change_decoding_strategy'):
# Check if ctc or rnnt model
if hasattr(asr_model, 'joint'): # RNNT model
cfg.rnnt_decoding.fused_batch_size = -1
cfg.rnnt_decoding.compute_langs = cfg.compute_langs
asr_model.change_decoding_strategy(cfg.rnnt_decoding)
else:
asr_model.change_decoding_strategy(cfg.ctc_decoding)
if cfg.audio_dir is not None and not cfg.append_pred:
filepaths = list(glob.glob(os.path.join(cfg.audio_dir, f"**/*.{cfg.audio_type}"), recursive=True))
else:
# get filenames from manifest
filepaths = []
if os.stat(cfg.dataset_manifest).st_size == 0:
logging.error(f"The input dataset_manifest {cfg.dataset_manifest} is empty. Exiting!")
return None
manifest_dir = Path(cfg.dataset_manifest).parent
with open(cfg.dataset_manifest, 'r') as f:
has_two_fields = []
for line in f:
item = json.loads(line)
if "offset" in item and "duration" in item:
has_two_fields.append(True)
else:
has_two_fields.append(False)
audio_file = Path(item['audio_filepath'])
if not audio_file.is_file() and not audio_file.is_absolute():
audio_file = manifest_dir / audio_file
filepaths.append(str(audio_file.absolute()))
partial_audio = all(has_two_fields)
logging.info(f"\nTranscribing {len(filepaths)} files...\n")
# setup AMP (optional)
if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
# Compute output filename
if cfg.output_filename is None:
# create default output filename
if cfg.audio_dir is not None:
cfg.output_filename = os.path.dirname(os.path.join(cfg.audio_dir, '.')) + '.json'
elif cfg.pred_name_postfix is not None:
cfg.output_filename = cfg.dataset_manifest.replace('.json', f'_{cfg.pred_name_postfix}.json')
else:
cfg.output_filename = cfg.dataset_manifest.replace('.json', f'_{model_name}.json')
# if transcripts should not be overwritten, and already exists, skip re-transcription step and return
if not cfg.overwrite_transcripts and os.path.exists(cfg.output_filename):
logging.info(
f"Previous transcripts found at {cfg.output_filename}, and flag `overwrite_transcripts`"
f"is {cfg.overwrite_transcripts}. Returning without re-transcribing text."
)
return cfg
# transcribe audio
with autocast():
with torch.no_grad():
if partial_audio:
if isinstance(asr_model, EncDecCTCModel):
transcriptions = transcribe_partial_audio(
asr_model=asr_model,
path2manifest=cfg.dataset_manifest,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
return_hypotheses=return_hypotheses,
)
else:
logging.warning(
"RNNT models do not support transcribe partial audio for now. Transcribing full audio."
)
transcriptions = asr_model.transcribe(
paths2audio_files=filepaths,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
return_hypotheses=return_hypotheses,
)
else:
transcriptions = asr_model.transcribe(
paths2audio_files=filepaths,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
return_hypotheses=return_hypotheses,
)
logging.info(f"Finished transcribing {len(filepaths)} files !")
logging.info(f"Writing transcriptions into file: {cfg.output_filename}")
# if transcriptions form a tuple (from RNNT), extract just "best" hypothesis
if type(transcriptions) == tuple and len(transcriptions) == 2:
transcriptions = transcriptions[0]
# write audio transcriptions
if cfg.append_pred:
logging.info(f'Transcripts will be written in "{cfg.output_filename}" file')
if cfg.pred_name_postfix is not None:
pred_by_model_name = cfg.pred_name_postfix
else:
pred_by_model_name = model_name
pred_text_attr_name = 'pred_text_' + pred_by_model_name
else:
pred_text_attr_name = 'pred_text'
with open(cfg.output_filename, 'w', encoding='utf-8') as f:
if cfg.audio_dir is not None:
for idx, transcription in enumerate(transcriptions):
item = {'audio_filepath': filepaths[idx], pred_text_attr_name: transcription.text}
if compute_langs:
item['pred_lang'] = transcription.langs
item['pred_lang_chars'] = transcription.langs_chars
f.write(json.dumps(item, ensure_ascii=False) + "\n")
else:
with open(cfg.dataset_manifest, 'r') as fr:
for idx, line in enumerate(fr):
item = json.loads(line)
item[pred_text_attr_name] = transcriptions[idx].text
if compute_langs:
item['pred_lang'] = transcriptions[idx].langs
item['pred_lang_chars'] = transcriptions[idx].langs_chars
f.write(json.dumps(item, ensure_ascii=False) + "\n")
logging.info("Finished writing predictions !")
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-speech-data-processor-main | sdp/processors/nemo/transcribe_speech.py |
NeMo-speech-data-processor-main | sdp/processors/datasets/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
class TrainDevTestSplitCORAAL(BaseParallelProcessor):
"""Custom train-dev-test split for CORAAL dataset.
Split is done speaker-wise, so the same speakers don't appear in different
splits.
Args:
data_split (str): train, dev or test.
Returns:
All the same fields as in the input manifest, but only a subset of
the data is retained.
"""
def __init__(
self,
data_split: str,
**kwargs,
):
super().__init__(**kwargs)
if data_split not in ["train", "dev", "test"]:
raise ValueError("data_split has to be either train, dev or test")
self.data_split = data_split
self.split_map = {}
self.split_map["train"] = set(
[
'ATL_se0_ag1_m',
'DCA_se1_ag1_f',
'DCA_se1_ag2_f',
'DCA_se1_ag2_m',
'DCA_se1_ag3_f',
'DCA_se1_ag3_m',
'DCA_se1_ag4_m',
'DCA_se2_ag1_f',
'DCA_se2_ag1_m',
'DCA_se2_ag2_m',
'DCB_se1_ag1_m',
'DCB_se1_ag2_f',
'DCB_se1_ag2_m',
'DCB_se1_ag3_f',
'DCB_se1_ag3_m',
'DCB_se1_ag4_f',
'DCB_se1_ag4_m',
'DCB_se2_ag1_f',
'DCB_se2_ag1_m',
'DCB_se2_ag2_f',
'LES_se0_ag2_f',
'LES_se0_ag2_m',
'PRV_se0_ag1_f',
'PRV_se0_ag2_f',
'ROC_se0_ag1_m',
'ROC_se0_ag2_f',
'VLD_se0_ag2_f',
'VLD_se0_ag2_m',
]
)
self.split_map["dev"] = set(
[
'ATL_se0_ag1_f',
'DCA_se1_ag1_m',
'DCB_se1_ag1_f',
'LES_se0_ag3_f',
'PRV_se0_ag1_m',
'ROC_se0_ag1_f',
'VLD_se0_ag3_f',
]
)
self.split_map["test"] = set(
[
'ATL_se0_ag2_f',
'ATL_se0_ag2_m',
'DCA_se2_ag3_m',
'DCA_se2_ag4_f',
'DCA_se2_ag4_m',
'DCA_se3_ag1_f',
'DCA_se3_ag1_m',
'DCA_se3_ag2_f',
'DCA_se3_ag2_m',
'DCA_se3_ag3_f',
'DCA_se3_ag3_m',
'DCA_se3_ag4_m',
'DCB_se2_ag2_m',
'DCB_se2_ag3_f',
'DCB_se2_ag3_m',
'DCB_se2_ag4_f',
'DCB_se2_ag4_m',
'DCB_se3_ag1_f',
'DCB_se3_ag1_m',
'DCB_se3_ag2_f',
'DCB_se3_ag3_f',
'DCB_se3_ag3_m',
'DCB_se3_ag4_f',
'DCB_se3_ag4_m',
'LES_se0_ag3_m',
'LES_se0_ag4_f',
'LES_se0_ag4_m',
'PRV_se0_ag2_m',
'PRV_se0_ag3_f',
'PRV_se0_ag3_m',
'ROC_se0_ag2_m',
'ROC_se0_ag3_f',
'ROC_se0_ag3_m',
'VLD_se0_ag3_m',
'VLD_se0_ag4_f',
'VLD_se0_ag4_m',
]
)
def process_dataset_entry(self, data_entry):
if data_entry["original_file"][:-5] in self.split_map[self.data_split]:
return [DataEntry(data=data_entry)]
return []
| NeMo-speech-data-processor-main | sdp/processors/datasets/coraal/data_splits.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .create_initial_manifest import CreateInitialManifestCORAAL
from .data_splits import TrainDevTestSplitCORAAL
| NeMo-speech-data-processor-main | sdp/processors/datasets/coraal/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import urllib.request
from pathlib import Path
import pandas as pd
from sox import Transformer
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
from sdp.utils.common import download_file, extract_archive
def get_coraal_url_list():
"""Returns url list for CORAAL dataset.
There are a few mistakes in the official url list that are fixed here.
Can be overridden by tests to select a subset of urls.
"""
dataset_url = "http://lingtools.uoregon.edu/coraal/coraal_download_list.txt"
urls = []
for file_url in urllib.request.urlopen(dataset_url):
file_url = file_url.decode('utf-8').strip()
# fixing known errors in the urls
if file_url == 'http://lingtools.uoregon.edu/coraal/les/2021.07/LES_metadata_2018.10.06.txt':
file_url = 'http://lingtools.uoregon.edu/coraal/les/2021.07/LES_metadata_2021.07.txt'
if file_url == 'http://lingtools.uoregon.edu/coraal/vld/2021.07/VLD_metadata_2018.10.06.txt':
file_url = 'http://lingtools.uoregon.edu/coraal/vld/2021.07/VLD_metadata_2021.07.txt'
urls.append(file_url)
return urls
class CreateInitialManifestCORAAL(BaseParallelProcessor):
"""Processor to create initial manifest for the Corpus of Regional African American Language (CORAAL) dataset.
Dataset link: https://oraal.uoregon.edu/coraal/
Will download all files, extract tars and split wav files based on the
provided durations in the transcripts.
Args:
raw_data_dir (str): where to put raw downloaded data.
resampled_audio_dir (str): where to put re-sampled and trimmed wav files.
target_samplerate (int): sample rate to resample to. Defaults to 16000.
target_nchannels (int): target number of channels. Defaults to 1.
drop_pauses (bool): if True, will drop all transcriptions that contain
only silence (indicated by ``(pause X)`` in the transcript).
Defaults to True.
group_duration_threshold (float): can be used to group consecutive
utterances from the same speaker to a longer duration. Set to 0
to disable this grouping (but note that many utterances are
transcribed with only a few seconds, so grouping is generally
advised). Defaults to 20.
Returns:
This processor generates an initial manifest file with the following fields::
{
"audio_filepath": <path to the audio file>,
"duration": <duration of the audio in seconds>,
"text": <transcription>,
"original_file": <name of the original file in the dataset this audio came from>,
"speaker": <speaker id>,
"is_interviewee": <whether this is an interviewee (accented speech)>,
"gender": <speaker gender>,
"age": <speaker age>,
"education": <speaker education>,
"occupation": <speaker occupation>,
}
"""
def __init__(
self,
raw_data_dir: str,
resampled_audio_dir: str,
target_samplerate: int = 16000,
target_nchannels: int = 1,
drop_pauses: bool = True,
group_duration_threshold: float = 20.0,
**kwargs,
):
super().__init__(**kwargs)
self.raw_data_dir = Path(raw_data_dir)
self.resampled_audio_dir = resampled_audio_dir
self.target_samplerate = target_samplerate
self.target_nchannels = target_nchannels
self.drop_pauses = drop_pauses
self.group_duration_threshold = group_duration_threshold
def prepare(self):
os.makedirs(self.raw_data_dir, exist_ok=True)
os.makedirs(self.resampled_audio_dir, exist_ok=True)
# downloading all files
for file_url in get_coraal_url_list():
download_file(file_url, str(self.raw_data_dir))
os.makedirs(self.raw_data_dir / "audio", exist_ok=True)
os.makedirs(self.raw_data_dir / "transcripts", exist_ok=True)
# extracting all files
for data_file in glob.glob(f'{self.raw_data_dir}/*_audio_*.tar.gz'):
# need to set force_extract=True, since there is no folder inside, just a list of files
# and we extract data from multiple tars. Ideally, should change the way we check
# for extracted data (currently there is an assumption that all data in archive is in a single folder)
extract_archive(data_file, self.raw_data_dir / "audio", force_extract=True)
for data_file in glob.glob(f'{self.raw_data_dir}/*_textfiles_*.tar.gz'):
extract_archive(data_file, self.raw_data_dir / "transcripts", force_extract=True)
def read_manifest(self):
dfs = []
for data_file in glob.glob(f'{self.raw_data_dir}/transcripts/*.txt'):
df = pd.read_csv(data_file, delimiter='\t')
df['Basefile'] = os.path.basename(data_file)[:-4] # dropping .wav in the end
if self.drop_pauses:
df = df[~df['Content'].str.contains(r'\(pause \d+(?:\.\d+)?\)')]
# grouping consecutive segments from the same speaker
if self.group_duration_threshold > 0:
df['Duration'] = df['EnTime'] - df['StTime']
# puts each sequence of same speaker utts in a "bin"
speaker_bins = (~df['Spkr'].eq(df['Spkr'].shift())).cumsum()
# within each bin, computes cumulative duration and then int-divides by the threshold
df['ThresholdMult'] = df.groupby(speaker_bins)['Duration'].transform(
lambda x: pd.Series.cumsum(x) // self.group_duration_threshold
)
# finally, we take all positions where the int-division changes,
# which indicates that cumsum exceded the threshold. And combine those
# with speaker-change positions to get the final groups for utterance merging
final_bins = (
(~df['Spkr'].eq(df['Spkr'].shift())) | (~df['ThresholdMult'].eq(df['ThresholdMult'].shift()))
).cumsum()
df = df.groupby(final_bins).agg(
{
'StTime': 'min',
'EnTime': 'max',
'Content': ' '.join,
# will be the same in the group
'Spkr': lambda x: x.iloc[0],
'Basefile': lambda x: x.iloc[0],
}
)
# assigning label for interviewee vs interviewer (can be used to select a subset later)
df['is_interviewee'] = df.apply(lambda x: x['Spkr'] in x['Basefile'], axis=1)
# matching with metadata (age, gender, etc.)
metadata_dfs = []
for data_file in glob.glob(f'{self.raw_data_dir}/*_metadata_*.txt'):
metadata_dfs.append(pd.read_csv(data_file, delimiter='\t'))
metadata_df = pd.concat(metadata_dfs)
# only selecting a subset of columns - can be changed if more are needed
# dropping duplicates since there are multiple rows per speaker because of
# bit-rate, tar name and other file-specific information
metadata_df = metadata_df[['CORAAL.Spkr', 'Gender', 'Age', 'Education', 'Occupation']].drop_duplicates()
df = df.merge(metadata_df, left_on='Spkr', right_on='CORAAL.Spkr', how='left')
df = df.drop('CORAAL.Spkr', axis=1)
dfs.append(df)
df = pd.concat(dfs)
# would be better to keep it as df, but .values is way faster than .iterrows
return df.values
def process_dataset_entry(self, data_entry):
(
start_time,
end_time,
content,
speaker,
basefile,
is_interviewee,
gender,
age,
education,
occupation,
) = data_entry
src_file = str(self.raw_data_dir / 'audio' / (basefile + '.wav'))
output_wav_path = os.path.join(
self.resampled_audio_dir,
f"{basefile}_{int(start_time * 1000)}_{int(end_time * 1000)}.wav",
)
if not os.path.exists(output_wav_path):
tfm = Transformer()
tfm.trim(start_time, end_time)
tfm.rate(samplerate=self.target_samplerate)
tfm.channels(n_channels=self.target_nchannels)
tfm.build(input_filepath=src_file, output_filepath=output_wav_path)
data = {
"audio_filepath": output_wav_path,
"duration": end_time - start_time,
"text": content.strip(),
"original_file": basefile,
"speaker": speaker,
"is_interviewee": is_interviewee,
"gender": gender,
"age": age,
"education": education,
"occupation": occupation,
}
return [DataEntry(data=data)]
| NeMo-speech-data-processor-main | sdp/processors/datasets/coraal/create_initial_manifest.py |
NeMo-speech-data-processor-main | sdp/processors/datasets/slr83/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Tuple
import numpy as np
import sox
from tqdm import tqdm
from sdp.logging import logger
from sdp.processors.base_processor import (
BaseParallelProcessor,
BaseProcessor,
DataEntry,
)
from sdp.utils.common import download_file, extract_archive
DATASET_URL = "https://www.openslr.org/resources/83/{dialect}.zip"
AVAILABLE_DIALECTS = [
'irish_english_male',
'midlands_english_female',
'midlands_english_male',
'northern_english_female',
'northern_english_male',
'scottish_english_female',
'scottish_english_male',
'southern_english_female',
'southern_english_male',
'welsh_english_female',
'welsh_english_male',
]
EXPECTED_SPLIT_STATS = {
('irish_english_male', 'test'): (102, 604.757),
('irish_english_male', 'train'): (293, 1656.917),
('irish_english_male', 'dev'): (53, 302.763),
('midlands_english_female', 'test'): (90, 608.341),
('midlands_english_female', 'train'): (94, 636.843),
('midlands_english_female', 'dev'): (45, 306.261),
('midlands_english_male', 'test'): (106, 604.672),
('midlands_english_male', 'train'): (270, 1568.683),
('midlands_english_male', 'dev'): (52, 301.227),
('northern_english_female', 'test'): (267, 1803.435),
('northern_english_female', 'train'): (330, 2146.816),
('northern_english_female', 'dev'): (145, 906.496),
('northern_english_male', 'test'): (587, 3607.467),
('northern_english_male', 'train'): (1126, 7003.136),
('northern_english_male', 'dev'): (298, 1807.957),
('scottish_english_female', 'test'): (284, 1801.301),
('scottish_english_female', 'train'): (426, 2681.344),
('scottish_english_female', 'dev'): (142, 906.24),
('scottish_english_male', 'test'): (612, 3603.883),
('scottish_english_male', 'train'): (663, 3994.027),
('scottish_english_male', 'dev'): (306, 1800.96),
('southern_english_female', 'test'): (572, 3600.128),
('southern_english_female', 'train'): (3124, 19213.312),
('southern_english_female', 'dev'): (293, 1804.8),
('southern_english_male', 'test'): (582, 3600.555),
('southern_english_male', 'train'): (3295, 20210.773),
('southern_english_male', 'dev'): (296, 1807.445),
('welsh_english_female', 'test'): (239, 1805.739),
('welsh_english_female', 'train'): (774, 5621.675),
('welsh_english_female', 'dev'): (125, 905.387),
('welsh_english_male', 'test'): (557, 3605.931),
('welsh_english_male', 'train'): (726, 4660.651),
('welsh_english_male', 'dev'): (286, 1805.909),
}
class CreateInitialManifestSLR83(BaseParallelProcessor):
"""Processor to create initial manifest for the SLR83 dataset.
This is a dataset introduced in `Open-source Multi-speaker Corpora of the
English Accents in the British Isles <https://aclanthology.org/2020.lrec-1.804/>`_.
Args:
raw_data_dir (str): where to put raw downloaded data.
dialect (str): should be one of the
* ``irish_english_male``
* ``midlands_english_female``
* ``midlands_english_male``
* ``northern_english_female``
* ``northern_english_male``
* ``scottish_english_female``
* ``scottish_english_male``
* ``southern_english_female``
* ``southern_english_male``
* ``welsh_english_female``
* ``welsh_english_male``
Returns:
This processor generates an initial manifest file with the following fields::
{
"audio_filepath": <path to the audio file>,
"duration": <duration of the audio in seconds>,
"text": <transcription>,
}
"""
def __init__(
self,
raw_data_dir: str,
dialect: str,
**kwargs,
):
super().__init__(**kwargs)
self.raw_data_dir = Path(raw_data_dir)
self.dialect = dialect
if dialect not in AVAILABLE_DIALECTS:
raise ValueError(f"dialect has to be one of {AVAILABLE_DIALECTS}")
def prepare(self):
"""Downloading and extracting data (unless already done)."""
os.makedirs(self.raw_data_dir, exist_ok=True)
url = DATASET_URL.format(dialect=self.dialect)
if not (self.raw_data_dir / f"{self.dialect}.zip").exists():
download_file(url, str(self.raw_data_dir))
extract_archive(str(self.raw_data_dir / os.path.basename(url)), str(self.raw_data_dir))
self.transcription_file = str(self.raw_data_dir / "line_index.csv")
def read_manifest(self):
if self.transcription_file is None:
raise RuntimeError("self.process has to be called before processing the data.")
with open(self.transcription_file, "rt", encoding="utf8") as fin:
dataset_entries = fin.readlines()
return dataset_entries
def process_dataset_entry(self, data_entry: str):
split_entry = data_entry.split(", ")
if len(split_entry) != 3:
raise RuntimeError(f"Input data is badly formatted! Bad line: {data_entry}")
_, utt_id, transcript_text = split_entry
audio_path = str(self.raw_data_dir / (utt_id + ".wav"))
data = {
"audio_filepath": audio_path,
"duration": float(sox.file_info.duration(audio_path)),
"text": transcript_text.strip(),
}
return [DataEntry(data=data)]
class CustomDataSplitSLR83(BaseProcessor):
"""Splits SLR83 data into train, dev or test subset.
The original paper does not provide train/dev/test splits, so we include a
custom processing that can be used as a standardized split to compare
results. For more details on this data split see `Damage Control During
Domain Adaptation for Transducer Based Automatic Speech Recognition
<https://arxiv.org/abs/2210.03255>`_.
.. note::
All data dropping has to be done before the split. We will check the
total number of files to be what is expected in the reference split.
But if you add any custom pre-processing that changes duration or
number of files, your splits will likely be different.
Args:
dialect (str): same as in the :class:`sdp.processors.CreateInitialManifestSLR83`.
data_split (str): "train", "dev" or "test".
Returns:
All the same fields as in the input manifest, but only a subset of
the data is retained.
"""
def __init__(self, dialect, data_split, **kwargs):
super().__init__(**kwargs)
self.dialect = dialect
self.data_split = data_split
def process(self):
with open(self.input_manifest_file, "rt", encoding="utf8") as fin:
manifest_data = [json.loads(line) for line in fin.readlines()]
# sorting and fixing random seed for reproducibility
manifest_data = sorted(manifest_data, key=lambda x: x['audio_filepath'])
sample_idxs = list(range(len(manifest_data)))
rng = np.random.RandomState(0)
rng.shuffle(sample_idxs)
duration = sum([x['duration'] for x in manifest_data])
validation_duration, test_duration = 1800, 3600 # 30 minutes, 1 hour
if duration <= 3600: # 1 hour
validation_duration, test_duration = 300, 600 # 5 minutes, 10 minutes
elif duration > 3600 and duration <= 9000: # 2.5 hours
validation_duration, test_duration = 900, 1800 # 15 minutes, 30 minutes
split_data = {}
split_data['dev'] = self._accumulate_samples(manifest_data, sample_idxs, validation_duration)
split_data['test'] = self._accumulate_samples(manifest_data, sample_idxs, test_duration)
split_data['train'] = (
[manifest_data[x] for x in sample_idxs],
sum([manifest_data[x]['duration'] for x in sample_idxs]),
)
for split in ['train', 'dev', 'test']:
actual_stats = (len(split_data[split][0]), round(split_data[split][1], 3))
if EXPECTED_SPLIT_STATS[(self.dialect, split)] != actual_stats:
raise RuntimeError(
f"Generated split stats (num files, duration) = {actual_stats}. "
f"But expected to see {EXPECTED_SPLIT_STATS[(self.dialect, split)]}. "
f"Did you add some custom pre-processing that changes number of files or duration?"
)
number_of_entries = 0
total_duration = 0
os.makedirs(os.path.dirname(self.output_manifest_file), exist_ok=True)
with open(self.output_manifest_file, "wt", encoding="utf8") as fout:
for data_entry in tqdm(split_data[self.data_split][0]):
json.dump(data_entry, fout, ensure_ascii=False)
number_of_entries += 1
total_duration += data_entry["duration"]
fout.write("\n")
logger.info("Total number of entries after processing: %d", number_of_entries)
logger.info("Total audio duration (hours) after processing: %.2f", total_duration / 3600)
def _accumulate_samples(
self, manifest_data: List[dict], sample_idxs: List[int], duration_threshold: int
) -> Tuple[List[dict], float]:
"""Create a subset of the manifest data having duration less than duration_threshold.
Args:
manifest_data: data for the manifest file
sample_idxs: list of available indices to pick a sample from the manifest data
duration_threshold: maximum duration of the samples to be included in the subset
Returns:
tuple: The accumulated subset of the manifest data and total accumulated duration
"""
accumulated_data = []
accumulated_duration = 0
while accumulated_duration <= duration_threshold:
sample_idx = sample_idxs.pop(0)
accumulated_data.append(manifest_data[sample_idx])
accumulated_duration += manifest_data[sample_idx]['duration']
return accumulated_data, accumulated_duration
| NeMo-speech-data-processor-main | sdp/processors/datasets/slr83/create_initial_manifest.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import string
from typing import Dict
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
def is_same(orig_word, norm_word):
# word is the same, except last symbol, which could indicate punctuation
if orig_word[-1] in string.punctuation and orig_word[:-1].lower() == norm_word.lower():
return True, 1
# word is the same, except last symbol, which could indicate punctuation
# (but by mistake it's been put in norm text)
if norm_word[-1] in string.punctuation and norm_word[:-1].lower() == orig_word.lower():
return True, 0
# word is the same, but casing could be different
if orig_word.lower() == norm_word.lower():
return True, 1
return False, None
def restore_pc(orig_words, norm_words):
# separate out any "¿" so they have a space either side
orig_words = orig_words.replace("¿", " ¿ ")
orig_words = " ".join(orig_words.split())
norm_words = norm_words.replace("¿", " ¿ ")
norm_words = " ".join(norm_words.split())
orig_words_list = orig_words.split()
norm_words_list = norm_words.split()
# copy so not to corrupt
# merging any commas and dots between numbers right away to simplify logic below
orig_text = list([re.sub(r'(\d)[\.,](\d)', r"\1\2", word) for word in orig_words_list])
norm_text = list(norm_words_list)
# to simplify logic below, so that we can assume last word always matches
orig_text.append("end_text")
norm_text.append("end_text")
idx_orig = 0
idx_norm = 0
merged_text = []
while idx_orig < len(orig_text) and idx_norm < len(norm_text):
same, is_orig = is_same(orig_text[idx_orig], norm_text[idx_norm])
if same:
merged_text.append(orig_text[idx_orig] if is_orig else norm_text[idx_norm])
idx_orig += 1
idx_norm += 1
continue
# add all "¿" 'words' in orig_text (which didnt have match in norm_text) to merged_text
if orig_text[idx_orig] == "¿":
merged_text.append("¿")
idx_orig += 1
continue
# checking if first letter is a number, but the whole word is not - that happens
# on typos like 37a which should really be 37 a. So fixing those
# another case is for number + punctuation, like 2017, - handling separately
# another case is for numbers separated by comma, like this "1,5". Those are spelled out
# separately in normalized form, so just removing the comma here
add_punct = ""
if orig_text[idx_orig][0].isdigit() and not orig_text[idx_orig].isdigit():
number, word = re.split('(\d+)', orig_text[idx_orig])[1:]
orig_text[idx_orig] = number
if word in string.punctuation:
add_punct = word
else:
orig_text.insert(idx_orig + 1, word)
# another annoying case is if typo ends with number like here "dell'11"
# same logic, but need to go back to the first check, so doing "continue" below
if orig_text[idx_orig][-1].isdigit() and not orig_text[idx_orig].isdigit():
word, number = re.split('(\d+)', orig_text[idx_orig])[:-1]
orig_text[idx_orig] = word
orig_text.insert(idx_orig + 1, number)
continue
# word is different, but original is a number - take from normalized in this case until
# get same word again (as number might be represented with multiple words)
# also handling case for number + punctuation
while orig_text[idx_orig].isdigit():
idx_orig += 1
while idx_norm < len(norm_text) and not is_same(orig_text[idx_orig], norm_text[idx_norm])[0]:
merged_text.append(norm_text[idx_norm])
idx_norm += 1
# if there is any trailing punctuation from last digit, let's add it
merged_text[-1] = merged_text[-1] + add_punct
if idx_norm != len(norm_text):
print(idx_orig, idx_norm, len(orig_text), len(norm_text), orig_text, norm_text, merged_text)
raise RuntimeError("Something went wrong during merging")
# merging all "¿ " to the next word and removing end_text token
norm_text = " ".join(merged_text[:-1]).replace("¿ ", "¿")
return norm_text
class NormalizeFromNonPCTextVoxpopuli(BaseParallelProcessor):
"""Tries to restore punctuation and capitalization from the un-normalized text version.
VoxPopuli contains two versions of the transcription - original (non-normalized,
but with punctuation and capitalization) and normalized (without punctuation or capitalization),
but with digits and other forms normalized. This processor can be used
to map the normalized and non-normalized versions and produce a normalized
version with restored punctuation and capitalization.
.. note::
The current map logic is highly heuristical and might not work for all
languages. The processor will return ``n/a`` for any text it was not able
to restore, so make sure you check how much data was removed and
consider updating the heuristics to retain more data.
Args:
restored_text_field (str): the field where the recovered text (or ``n/a``)
will be stored. Defaults to "text".
raw_text_key (str): which field contains the original text without normalization.
Defaults to "raw_text".
norm_text_key (str): which field contains the normalized text.
Defaults to "provided_norm_text".
Returns:
All the same data as in the input manifest with an additional key::
<restored_text_field>: <restored text or n/a if mapping failed>``
"""
def __init__(
self,
restored_text_field: str = "text",
raw_text_key: str = "raw_text",
norm_text_key: str = "provided_norm_text",
**kwargs,
):
super().__init__(**kwargs)
self.restored_text_field = restored_text_field
self.raw_text_key = raw_text_key
self.norm_text_key = norm_text_key
def process_dataset_entry(self, data_entry: Dict):
try:
restored_norm_text = restore_pc(data_entry[self.raw_text_key], data_entry[self.norm_text_key])
except:
logger.warning(
f"Failed to restore normalization.\nRaw text: %s\nNormalized text: %s",
data_entry[self.raw_text_key],
data_entry[self.norm_text_key],
)
restored_norm_text = "n/a"
data_entry[self.restored_text_field] = restored_norm_text
return [DataEntry(data=data_entry)]
| NeMo-speech-data-processor-main | sdp/processors/datasets/voxpopuli/normalize_from_non_pc_text.py |
NeMo-speech-data-processor-main | sdp/processors/datasets/voxpopuli/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
from pathlib import Path
import sox
from sox import Transformer
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
VOXPOPULI_URL = "https://github.com/facebookresearch/voxpopuli"
class CreateInitialManifestVoxpopuli(BaseParallelProcessor):
"""Processor to create initial manifest for the VoxPopuli dataset.
Dataset link: https://github.com/facebookresearch/voxpopuli/
Downloads and unzips raw VoxPopuli data for the specified language,
and creates an initial manifest using the transcripts provided in the
raw data.
.. note::
This processor will install a couple of Python packages, including
PyTorch, so it might be a good idea to run it in an isolated Python
environment.
Args:
raw_data_dir (str): the directory where the downloaded data will be/is saved.
language_id (str): the language of the data you wish to be downloaded.
E.g., "en", "es", "it", etc.
data_split (str): "train", "dev" or "test".
resampled_audio_dir (str): the directory where the resampled wav
files will be stored.
target_samplerate (int): sample rate (Hz) to use for resampling.
Defaults to 16000.
target_nchannels (int): number of channels to create during resampling process.
Defaults to 1.
Returns:
This processor generates an initial manifest file with the following fields::
{
"audio_filepath": <path to the audio file>,
"duration": <duration of the audio in seconds>,
"text": <transcription (with provided normalization)>,
"raw_text": <original transcription (without normalization)>,
"speaker_id": <speaker id>,
"gender": <speaker gender>,
"age": <speaker age>,
"is_gold_transcript": <whether the transcript has been verified>,
"accent": <speaker accent, if known>,
}
"""
def __init__(
self,
raw_data_dir: str,
language_id: str,
data_split: str,
resampled_audio_dir: str,
target_samplerate: int = 16000,
target_nchannels: int = 1,
**kwargs,
):
super().__init__(**kwargs)
self.raw_data_dir = Path(raw_data_dir)
self.language_id = language_id
self.data_split = data_split
self.resampled_audio_dir = resampled_audio_dir
self.target_samplerate = target_samplerate
self.target_nchannels = target_nchannels
def prepare(self):
"""Downloading data (unless already done)"""
os.makedirs(self.raw_data_dir, exist_ok=True)
if not (self.raw_data_dir / "transcribed_data").exists():
# TODO: some kind of isolated environment?
if not os.path.exists(self.raw_data_dir / 'voxpopuli'):
logger.info("Downloading voxpopuli and installing requirements")
subprocess.run(f"git clone {VOXPOPULI_URL} {self.raw_data_dir / 'voxpopuli'}", check=True, shell=True)
subprocess.run(
f"pip install -r {self.raw_data_dir / 'voxpopuli' / 'requirements.txt'}", check=True, shell=True
)
if not os.path.exists(self.raw_data_dir / 'raw_audios'):
logger.info("Downloading raw audios")
subprocess.run(
f"cd {self.raw_data_dir / 'voxpopuli'} && "
f"python -m voxpopuli.download_audios --root {self.raw_data_dir} --subset asr",
check=True,
shell=True,
)
if not os.path.exists(self.raw_data_dir / 'transcribed_data' / self.language_id):
logger.info("Segmenting and transcribing the data")
subprocess.run(
f"cd {self.raw_data_dir / 'voxpopuli'} && "
f"python -m voxpopuli.get_asr_data --root {self.raw_data_dir} --lang {self.language_id}",
check=True,
shell=True,
)
def read_manifest(self):
with open(
self.raw_data_dir / "transcribed_data" / self.language_id / f"asr_{self.data_split}.tsv",
"rt",
encoding="utf8",
) as fin:
dataset_entries = fin.readlines()[1:] # skip header line
return dataset_entries
def process_dataset_entry(self, data_entry: str):
if len(data_entry.split("\t")) != 8:
raise RuntimeError(f"have more/less than 7 tabs in line {data_entry}")
utt_id, raw_text, norm_text, spk_id, _, gender, is_gold_transcript, accent = data_entry.split("\t")
year = utt_id[:4]
src_audio_path = os.path.join(self.raw_data_dir, "transcribed_data", self.language_id, year, utt_id + ".ogg")
tgt_wav_path = os.path.join(self.resampled_audio_dir, utt_id + ".wav")
if not os.path.exists(os.path.dirname(tgt_wav_path)):
os.makedirs(os.path.dirname(tgt_wav_path), exist_ok=True)
if not os.path.exists(tgt_wav_path):
tfm = Transformer()
tfm.rate(samplerate=self.target_samplerate)
tfm.channels(n_channels=self.target_nchannels)
tfm.build(input_filepath=src_audio_path, output_filepath=tgt_wav_path)
data = {
"audio_filepath": tgt_wav_path,
"duration": float(sox.file_info.duration(tgt_wav_path)),
"text": norm_text,
"raw_text": raw_text,
"speaker_id": spk_id,
"gender": gender,
"is_gold_transcript": is_gold_transcript,
"accent": accent,
}
return [DataEntry(data=data)]
| NeMo-speech-data-processor-main | sdp/processors/datasets/voxpopuli/create_initial_manifest.py |
NeMo-speech-data-processor-main | sdp/processors/datasets/mcv/__init__.py |
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To convert mp3 files to wav using sox, you must have installed sox with mp3 support
# For example sudo apt-get install libsox-fmt-mp3
import csv
import glob
import os
from pathlib import Path
from typing import Tuple
import sox
from sox import Transformer
from tqdm.contrib.concurrent import process_map
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
from sdp.utils.common import extract_archive
class CreateInitialManifestMCV(BaseParallelProcessor):
"""Processor to create initial manifest for the Mozilla Common Voice (MCV) dataset.
Dataset link: https://commonvoice.mozilla.org/
Extracts raw MCV data for the specified language and creates an initial manifest
using the transcripts provided in the raw data.
Args:
raw_data_dir (str): the path to the directory containing the raw data archive file.
Needs to be manually downloaded from https://commonvoice.mozilla.org/.
extract_archive_dir (str): directory where the extracted data will be saved.
resampled_audio_dir (str): directory where the resampled audio will be saved.
data_split (str): "train", "dev" or "test".
language_id (str): the ID of the language of the data. E.g., "en", "es", "it", etc.
already_extracted (bool): if True, we will not try to extract the raw data.
Defaults to False.
target_samplerate (int): sample rate (Hz) to use for resampling.
Defaults to 16000.
target_nchannels (int): number of channels to create during resampling process.
Defaults to 1.
Returns:
This processor generates an initial manifest file with the following fields::
{
"audio_filepath": <path to the audio file>,
"duration": <duration of the audio in seconds>,
"text": <transcription (with capitalization and punctuation)>,
}
"""
def __init__(
self,
raw_data_dir: str,
extract_archive_dir: str,
resampled_audio_dir: str,
data_split: str,
language_id: str,
already_extracted: bool = False,
target_samplerate: int = 16000,
target_nchannels: int = 1,
**kwargs,
):
super().__init__(**kwargs)
self.raw_data_dir = Path(raw_data_dir)
self.extract_archive_dir = extract_archive_dir
self.resampled_audio_dir = resampled_audio_dir
self.data_split = data_split
self.language_id = language_id
self.already_extracted = already_extracted
self.target_samplerate = target_samplerate
self.target_nchannels = target_nchannels
def prepare(self):
"""Extracting data (unless already done)."""
os.makedirs(self.raw_data_dir, exist_ok=True)
if not self.already_extracted:
tar_gz_files = glob.glob(str(self.raw_data_dir) + f"/*{self.language_id}.tar.gz")
if not tar_gz_files:
raise RuntimeError(
f"Did not find any file matching {self.raw_data_dir}/*.tar.gz. "
"For MCV dataset we cannot automatically download the data, so "
"make sure to get the data from https://commonvoice.mozilla.org/ "
"and put it in the 'raw_data_dir' folder."
)
elif len(tar_gz_files) > 1:
raise RuntimeError(
f"Expecting exactly one *{self.language_id}.tar.gz file in directory {self.raw_data_dir}"
)
data_folder = extract_archive(tar_gz_files[0], self.extract_archive_dir)
self.transcription_file = Path(data_folder)
else:
self.transcription_file = Path(self.extract_archive_dir) / self.language_id
self.audio_path_prefix = str(self.transcription_file / "clips")
self.transcription_file = str(self.transcription_file / (self.data_split + ".tsv"))
os.makedirs(self.resampled_audio_dir, exist_ok=True)
def read_manifest(self):
if self.transcription_file is None:
raise RuntimeError("self.process has to be called before processing the data.")
with open(self.transcription_file, "rt", encoding="utf8") as csvfile:
reader = csv.DictReader(csvfile, delimiter="\t")
next(reader, None) # skip the headers
dataset_entries = [(row["path"], row["sentence"]) for row in reader]
return dataset_entries
def process_dataset_entry(self, data_entry: Tuple[str, str]):
file_path, text = data_entry
file_name = os.path.splitext(os.path.basename(file_path))[0]
transcript_text = text.strip()
audio_path = os.path.join(self.audio_path_prefix, file_path)
output_wav_path = os.path.join(self.resampled_audio_dir, file_name + ".wav")
if not os.path.exists(output_wav_path):
tfm = Transformer()
tfm.rate(samplerate=self.target_samplerate)
tfm.channels(n_channels=self.target_nchannels)
tfm.build(input_filepath=audio_path, output_filepath=output_wav_path)
data = {
"audio_filepath": output_wav_path,
"duration": float(sox.file_info.duration(output_wav_path)),
"text": transcript_text,
}
return [DataEntry(data=data)]
| NeMo-speech-data-processor-main | sdp/processors/datasets/mcv/create_initial_manifest.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os
import re
import string
import sys
from glob import glob
from pathlib import Path
from typing import Optional
import regex
from joblib import Parallel, delayed
from tqdm import tqdm
from sdp.logging import logger
from sdp.processors.base_processor import BaseProcessor
from sdp.utils.common import download_file, extract_archive
sys.setrecursionlimit(1000000)
NA = "n/a"
MLS_TEXT_URL = "https://dl.fbaipublicfiles.com/mls/lv_text.tar.gz"
def abbreviations(text):
text = (
text.replace("Cap'n", "Captain")
.replace("cap'n", "captain")
.replace("o'shot", "o shot")
.replace("o' shot", "o shot")
.replace("on'y", "only")
.replace("on' y", "only")
.replace(" 'a ", " a ")
.replace(" 'em ", " em ")
.replace("gen'leman", "gentleman")
)
return text
def process(text):
text = (
text.replace("www.gutenberg.org", "www dot gutenberg dot org")
.replace(".txt", "dot txt")
.replace(".zip", "dot zip")
)
text = (
text.replace("’", "'")
.replace("_", " ")
.replace("\n", " ")
.replace("\t", " ")
.replace("…", "...")
.replace("»", '"')
.replace("«", '"')
.replace("\\", "")
.replace("”", '"')
.replace("„", '"')
.replace("´", "'")
.replace("-- --", "--")
.replace("--", " -- ")
.replace(". . .", "...")
.replace("’", "'")
.replace("“", '"')
.replace("“", '"')
.replace("‘", "'")
.replace("_", " ")
.replace("*", " ")
.replace("—", "-")
.replace("- -", "--")
.replace("•", " ")
.replace("^", " ")
.replace(">", " ")
.replace("■", " ")
.replace("/", " ")
.replace("––––", "...")
.replace("W⸺", "W")
.replace("`", "'")
.replace("<", " ")
.replace("{", " ")
.replace("Good-night", "Good night")
.replace("good-night", "good night")
.replace("good-bye", "goodbye")
.replace("Good-bye", "Goodbye")
.replace(" !", "!")
.replace(" ?", "?")
.replace(" ,", ",")
.replace(" .", ".")
.replace(" ;", ";")
.replace(" :", ":")
.replace("!!", "!")
.replace("--", "-")
.replace("“", '"')
.replace(", , ", ", ")
.replace("=", " ")
.replace("l,000", "1,000")
.replace("–", "-")
)
# remove dash in between the words
text = re.sub(r"([A-Za-z0-9]+)(-)([A-Za-z0-9]+)", r"\g<1> \g<3>", text)
text = re.sub(r"([A-Za-z0-9]+)(\.)([A-Za-z]+)", r"\g<1>\g<2> \g<3>", text)
text = re.sub(r"([A-Za-z]+)(\.)([A-Za-z0-9]+)", r"\g<1>\g<2> \g<3>", text)
# # remove text inside square brackets
# text = re.sub(r"(\[.*?\])", " ", text)
def __fix_space(text):
# remove commas between digits
text = re.sub(r"([0-9]+)(,)(\d\d\d)", r"\g<1>\g<3>", text)
text = re.sub(r"([A-Za-z]+)(,)([A-Za-z0-9]+)", r"\g<1>\g<2> \g<3>", text)
return text
for _ in range(3):
text = __fix_space(text)
text = re.sub(r" +", " ", text)
# make sure the text starts with an alpha
start_idx = 0
while not text[start_idx].isalpha():
start_idx += 1
end_text = "END OF THIS PROJECT GUTENBERG"
end_idx = len(text)
if end_text in text:
end_idx = text.find(end_text)
end_text = "End of the Project Gutenberg"
if end_text in text:
end_idx = text.find(end_text)
return text[start_idx:end_idx]
def read_text(text_f):
with open(text_f, "r") as f:
text = f.read()
return text
def remove_punctuation(text: str, remove_spaces=True, do_lower=True, exclude=None, remove_accents=False):
all_punct_marks = string.punctuation + "¿¡⸘"
if exclude is not None:
for p in exclude:
all_punct_marks = all_punct_marks.replace(p, "")
# a weird bug where commas is getting deleted when dash is present in the list of punct marks
all_punct_marks = all_punct_marks.replace("-", "")
text = re.sub("[" + all_punct_marks + "]", " ", text)
if exclude and "-" not in exclude:
text = text.replace("-", " ")
text = re.sub(r" +", " ", text)
if remove_spaces:
text = text.replace(" ", "").replace("\u00A0", "").strip()
if do_lower:
text = text.lower()
if remove_accents:
text = text.replace("á", "a")
text = text.replace("é", "e")
text = text.replace("í", "i")
text = text.replace("ó", "o")
text = text.replace("ú", "u")
text = text.replace("à", "a")
text = text.replace("è", "e")
text = text.replace("ù", "u")
text = text.replace("â", "a")
text = text.replace("ê", "e")
text = text.replace("î", "i")
text = text.replace("ô", "o")
text = text.replace("û", "u")
return text.strip()
def recover_lines(manifest, processed_text, output_dir, restored_text_field):
manifest_recovered = f"{output_dir}/{os.path.basename(manifest)}"
if os.path.exists(manifest_recovered):
return
lines = []
with open(manifest, "r") as f:
for line in f:
line = json.loads(line)
lines.append(line["text"])
logger.debug(f"processing {manifest}")
logger.debug(f"processing - {len(lines)} lines")
last_found_start_idx = 0
recovered_lines = {}
for idx, cur_line in enumerate(lines):
stop_search_for_line = False
cur_word_idx = 0
cur_line = abbreviations(cur_line)
cur_line = cur_line.split()
end_match_found = False
while not stop_search_for_line:
cur_word = cur_line[cur_word_idx]
pattern = cur_word
max_start_match_len = min(4, len(cur_line))
for i in range(1, max_start_match_len):
pattern += f"[^A-Za-z]+{cur_line[i]}"
pattern = re.compile(pattern)
for i, m in enumerate(pattern.finditer(processed_text[last_found_start_idx:].lower())):
if end_match_found:
break
match_idx = m.start() + last_found_start_idx
processed_text_list = processed_text[match_idx:].split()
raw_text_pointer = (
len(cur_line) - 3
) # added in case some dash separated words and split into multiple words in the cur_line
stop_end_search = False
right_offset = 20
while not end_match_found and raw_text_pointer <= len(processed_text_list) and not stop_end_search:
if cur_line[-1].replace("'", "") == remove_punctuation(
processed_text_list[raw_text_pointer - 1],
remove_spaces=True,
do_lower=True,
remove_accents=False,
):
# processed text could contain apostrophes that are parts of quotes, let's remove them from the processed text as well
if "'" not in cur_line[-1] and "'" in processed_text_list[raw_text_pointer - 1]:
processed_text_list[raw_text_pointer - 1] = processed_text_list[
raw_text_pointer - 1
].replace("'", "")
recovered_line = " ".join(processed_text_list[:raw_text_pointer])
if not is_valid(" ".join(cur_line), recovered_line):
raw_text_pointer += 1
else:
recovered_lines[idx] = recovered_line
end_match_found = True
raw_text_pointer += 1
stop_search_for_line = True
last_found_start_idx = raw_text_pointer
else:
raw_text_pointer += 1
if raw_text_pointer > (len(cur_line) + right_offset):
stop_end_search = True
if not end_match_found:
stop_search_for_line = True
logger.debug(
f"recovered {len(recovered_lines)} lines out of {len(lines)} -- {round(len(recovered_lines)/len(lines)*100, 2)}% -- {os.path.basename(manifest)}"
)
with open(manifest_recovered, "w") as f_out, open(manifest, "r") as f_in:
for idx, line in enumerate(f_in):
line = json.loads(line)
if idx in recovered_lines:
line[restored_text_field] = recovered_lines[idx]
else:
line[restored_text_field] = NA
f_out.write(json.dumps(line, ensure_ascii=False) + "\n")
def split_text_into_sentences(text: str):
"""
Split text into sentences.
Args:
text: text
Returns list of sentences
"""
# TODO: should this be filled up and exposed as a parameter?
lower_case_unicode = ""
upper_case_unicode = ""
# end of quoted speech - to be able to split sentences by full stop
text = re.sub(r"([\.\?\!])([\"\'])", r"\g<2>\g<1> ", text)
# remove extra space
text = re.sub(r" +", " ", text)
# remove space in the middle of the lower case abbreviation to avoid splitting into separate sentences
matches = re.findall(rf"[a-z{lower_case_unicode}]\.\s[a-z{lower_case_unicode}]\.", text)
for match in matches:
text = text.replace(match, match.replace(". ", "."))
# Read and split transcript by utterance (roughly, sentences)
split_pattern = (
rf"(?<!\w\.\w.)(?<![A-Z{upper_case_unicode}][a-z{lower_case_unicode}]+\.)"
rf"(?<![A-Z{upper_case_unicode}]\.)(?<=\.|\?|\!|\.”|\?”\!”)\s(?![0-9]+[a-z]*\.)"
)
sentences = regex.split(split_pattern, text)
return sentences
def normalize_text(text_f: str, normalizer: Optional['Normalizer'] = None):
"""
Pre-process and normalized text_f file.
Args:
text_f: path to .txt file to normalize
normalizer:
"""
raw_text = read_text(text_f)
processed_text = abbreviations(process(raw_text))
if normalizer is not None:
processed_text_list = normalizer.split_text_into_sentences(processed_text)
else:
processed_text_list = split_text_into_sentences(processed_text)
processed_text_list_merged = []
last_segment = ""
max_len = 7500
for i, text in enumerate(processed_text_list):
if len(last_segment) < max_len:
last_segment += " " + text
else:
processed_text_list_merged.append(last_segment.strip())
last_segment = ""
if i == len(processed_text_list) - 1 and len(last_segment) > 0:
processed_text_list_merged.append(last_segment.strip())
for i, text in enumerate(tqdm(processed_text_list_merged)):
if normalizer is not None:
processed_text_list_merged[i] = normalizer.normalize(
text=text, punct_post_process=True, punct_pre_process=True
)
else:
processed_text_list_merged[i] = re.sub(r"\d", r"", processed_text_list_merged[i])
processed_text = " ".join(processed_text_list_merged)
return processed_text
import diff_match_patch as dmp_module
dmp = dmp_module.diff_match_patch()
dmp.Diff_Timeout = 0
def is_valid(line, recovered_line):
"""Checks that the restore line matches the original line in everything but casing and punctuation marks"""
line = abbreviations(line)
line_no_punc = remove_punctuation(line, remove_spaces=True, do_lower=True, remove_accents=True)
recovered_line_no_punc = remove_punctuation(recovered_line, remove_spaces=True, do_lower=True, remove_accents=True)
is_same = line_no_punc == recovered_line_no_punc
return is_same
def process_book(book_manifest, texts_dir, submanifests_dir, output_dir, restored_text_field, normalizer):
book_id = os.path.basename(book_manifest).split(".")[0]
text_f = f"{texts_dir}/{book_id}.txt"
manifests = glob(f"{submanifests_dir}/{book_id}_*.json")
logger.info(f"{book_id} -- {len(manifests)} manifests")
# only continue (i.e. do not make early 'return') if there are {book_id}_{spk_id}.json files in submanifests_dir
# that are not in output dir - else return early
for book_id_spk_id in [os.path.basename(x).strip(".json") for x in manifests]:
if not os.path.exists(os.path.join(output_dir, f"{book_id_spk_id}.json")):
logger.info(f"Did not find {book_id_spk_id} in {output_dir} => will process this book")
break
else:
return
try:
processed_text = normalize_text(text_f, normalizer)
# re-run abbreviations since new are being added
processed_text = abbreviations(processed_text)
[
recover_lines(
manifest=manifest,
processed_text=processed_text,
output_dir=output_dir,
restored_text_field=restored_text_field,
)
for manifest in manifests
]
except:
logger.info(f"{text_f} failed")
return
class RestorePCForMLS(BaseProcessor):
"""Recovers original text from the MLS Librivox texts.
This processor can be used to restore punctuation and capitalization for the
MLS data. Uses the original data in https://dl.fbaipublicfiles.com/mls/lv_text.tar.gz.
Saves recovered text in ``restored_text_field`` field.
If text was not recovered, ``restored_text_field`` will be equal to ``n/a``.
Args:
language_long (str): the full name of the language, used for
choosing the folder of the contents of
"https://dl.fbaipublicfiles.com/mls/lv_text.tar.gz".
E.g., "english", "spanish", "italian", etc.
language_short (str or None): the short name of the language, used for
specifying the normalizer we want to use. E.g., "en", "es", "it", etc.
If set to None, we will not try to normalize the provided Librivox text.
lv_text_dir (str): the directory where the contents of
https://dl.fbaipublicfiles.com/mls/lv_text.tar.gz will be saved.
submanifests_dir (str): the directory where submanifests (one for each
combo of speaker + book) will be stored.
restored_submanifests_dir (str): the directory where restored
submanifests (one for each combo of speaker + book) will be stored.
restored_text_field (str): the field where the recovered text will be stored.
n_jobs (int): number of jobs to use for parallel processing. Defaults to -1.
show_conversion_breakdown (bool): whether to show how much of each
submanifest was restored. Defaults to True.
Returns:
All the same data as in the input manifest with an additional key::
<restored_text_field>: <restored text or n/a if match was not found>``
"""
def __init__(
self,
language_long: str,
language_short: Optional[str],
lv_text_dir: str,
submanifests_dir: str,
restored_submanifests_dir: str,
restored_text_field: str,
n_jobs: int = -1,
show_conversion_breakdown: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.language_long = language_long
self.language_short = language_short
self.lv_text_dir = Path(lv_text_dir)
self.submanifests_dir = Path(submanifests_dir)
self.restored_submanifests_dir = Path(restored_submanifests_dir)
self.restored_text_field = restored_text_field
self.n_jobs = n_jobs
self.show_conversion_breakdown = show_conversion_breakdown
def process(self):
"""Main processing happens here.
* Download & extract lv_text.
* Create submanifests.
* Restore P&C to submanifests.
* Group back submanifests into a single manifest
"""
from nemo_text_processing.text_normalization.normalize import Normalizer
os.makedirs(self.lv_text_dir, exist_ok=True)
# Download & extract lv_text.
download_file(MLS_TEXT_URL, str(self.lv_text_dir))
lv_text_data_folder = extract_archive(
str(self.lv_text_dir / os.path.basename(MLS_TEXT_URL)), str(self.lv_text_dir)
)
# Create submanifests
os.makedirs(self.submanifests_dir, exist_ok=True)
data = {}
with open(self.input_manifest_file, "r") as f:
for line in tqdm(f):
item = json.loads(line)
name = Path(item["audio_filepath"]).stem
reader_id, lv_book_id, sample_id = name.split("_")
key = f"{lv_book_id}_{reader_id}"
if key not in data:
data[key] = {}
data[key][sample_id] = line
for key, v in data.items():
with open(f"{self.submanifests_dir}/{key}.json", "w") as f_out:
for sample_id in sorted(v.keys()):
line = v[sample_id]
f_out.write(line)
# Restore P&C to submanifests.
os.makedirs(str(self.restored_submanifests_dir), exist_ok=True)
if self.language_short:
try:
normalizer = Normalizer(
input_case="cased",
lang=self.language_short,
cache_dir="CACHE_DIR",
overwrite_cache=False,
post_process=True,
)
except NotImplementedError: # some languages don't support text normalization
logger.info(
f"Could not find NeMo Normalizer for language {self.language_short}, so"
" will not normalize the Librivox text before attempting to restore punctuation"
" and capitalization."
)
normalizer = None
else:
logger.info(
f"`language_short` was not specified, so will not normalize the Librivox"
" text before attempting to restore punctuation and capitalization."
)
normalizer = None
# TODO: rename to maybe books_ids_in_datasplit
books_ids_in_submanifests = set([x.split("_")[0] for x in data.keys()])
Parallel(n_jobs=self.n_jobs)(
delayed(process_book)(
book_id,
str(Path(lv_text_data_folder) / self.language_long),
str(self.submanifests_dir),
str(self.restored_submanifests_dir),
self.restored_text_field,
normalizer,
)
for book_id in tqdm(books_ids_in_submanifests)
)
# get stats --- keep track of book/spk ids in our datasplit
book_id_spk_ids_in_datasplit = set() # set of tuples (book_id, spk_id), ...
original_manifest_duration = 0
with open(self.input_manifest_file, "r") as f:
for line in f:
line = json.loads(line)
book_id, spk_id = Path(line["audio_filepath"]).stem.split("_")[:2]
book_id_spk_ids_in_datasplit.add((book_id, spk_id))
original_manifest_duration += line["duration"]
logger.info(
f"duration ORIGINAL total (for current datasplit): {round(original_manifest_duration / 60 / 60, 2)} hrs"
)
# make dicts to record durations of manifests
filename_to_sub_manifest_durs = collections.defaultdict(float)
filename_to_restored_sub_manifest_durs = collections.defaultdict(float)
# duration in submanifests
for book_id, spk_id in book_id_spk_ids_in_datasplit:
manifest = os.path.join(self.submanifests_dir, f"{spk_id}_{book_id}.json")
with open(manifest, "r") as f:
for line in f:
line = json.loads(line)
filename_to_sub_manifest_durs[f"{spk_id}_{book_id}.json"] += line["duration"]
# duration in restored_submanifests
for book_id, spk_id in book_id_spk_ids_in_datasplit:
manifest = os.path.join(self.restored_submanifests_dir, f"{spk_id}_{book_id}.json")
if os.path.exists(manifest):
with open(manifest, "r") as f:
for line in f:
line = json.loads(line)
if line[self.restored_text_field] != NA:
filename_to_restored_sub_manifest_durs[f"{spk_id}_{book_id}.json"] += line["duration"]
else:
filename_to_restored_sub_manifest_durs[f"{spk_id}_{book_id}.json"] = 0
if self.show_conversion_breakdown:
for filename in filename_to_sub_manifest_durs.keys():
orig_dur = filename_to_sub_manifest_durs[filename]
restored_dur = filename_to_restored_sub_manifest_durs[filename]
pc_restored = 100 * restored_dur / orig_dur
logger.info(
f"{filename}: {orig_dur/60:.2f} mins -> {restored_dur/60:.2f} mins\t({pc_restored:.2f}% restored)"
)
sub_manifest_duration = sum(list(filename_to_sub_manifest_durs.values()))
restored_manifest_duration = sum(list(filename_to_restored_sub_manifest_durs.values()))
logger.info("duration in submanifests (for current datasplit): %.2f hrs", sub_manifest_duration / 60 / 60)
logger.info(
"duration restored (for current datasplit): %.2f hrs (%.2f%%), lost: %.2f hrs",
restored_manifest_duration / 60 / 60,
restored_manifest_duration / sub_manifest_duration * 100,
(sub_manifest_duration - restored_manifest_duration) / 60 / 60,
)
logger.info(
"Combining restored manifest for current datasplit into single manifest at %s", self.output_manifest_file
)
# duration in restored_submanifests
with open(self.output_manifest_file, 'w') as fout:
for book_id, spk_id in book_id_spk_ids_in_datasplit:
manifest = os.path.join(self.restored_submanifests_dir, f"{spk_id}_{book_id}.json")
if os.path.exists(manifest):
with open(manifest, "r") as fin:
for line in fin:
fout.write(line)
| NeMo-speech-data-processor-main | sdp/processors/datasets/mls/restore_pc.py |
NeMo-speech-data-processor-main | sdp/processors/datasets/mls/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from typing import Optional
import librosa
from sox import Transformer
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
from sdp.utils.common import download_file, extract_archive
MLS_URL_NO_OPUS = "https://dl.fbaipublicfiles.com/mls/mls_{language}.tar.gz"
MLS_URL_OPUS = "https://dl.fbaipublicfiles.com/mls/mls_{language}_opus.tar.gz"
class CreateInitialManifestMLS(BaseParallelProcessor):
"""Processor to create initial manifest for the Multilingual LibriSpeech (MLS) dataset.
Dataset link: https://www.openslr.org/94/
Downloads and unzips raw MLS data for the specified language,
and creates an initial manifest using the transcripts provided in the raw data.
Args:
raw_data_dir (str): the directory where the downloaded data will be/is saved.
This is also where the extracted and processed data will be.
language (str): the language of the data you wish to be downloaded.
This will be used to format the URL from which we attempt to download the data.
E.g., "english", "italian", "spanish", etc.
data_split (str): "train", "dev" or "test".
resampled_audio_dir (str or None): if specified, the directory where the resampled
wav files will be stored. If not specified, the audio will not be resampled and
the parameters ``target_samplerate`` and ``target_nchannels`` will be ignored.
target_samplerate (int): sample rate (Hz) to use for resampling. This parameter will
be ignored if ``resampled_audio_dir`` is ``None``.
Defaults to 16000.
target_nchannels (int): number of channels to create during resampling process. This
parameter will be ignored if ``resampled_audio_dir`` is ``None``.
Defaults to 1.
use_opus_archive (bool): if ``True``, will use the version of the archive file which
contains audio files saved in the OPUS format, instead of FLAC. The OPUS files take up
less memory than the FLAC files, at the cost of the OPUS files being lower quality than
the FLAC files.
If ``True``, the parameter ``resampled_audio_dir`` must be ``None``, as resampling OPUS
audio files is currently not supported.
Defaults to False.
Returns:
This processor generates an initial manifest file with the following fields::
{
"audio_filepath": <path to the audio file>,
"duration": <duration of the audio in seconds>,
"text": <transcription>,
}
"""
def __init__(
self,
raw_data_dir: str,
language: str,
data_split: str,
resampled_audio_dir: Optional[str],
target_samplerate: int = 16000,
target_nchannels: int = 1,
use_opus_archive: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.raw_data_dir = Path(raw_data_dir)
self.language = language
self.data_split = data_split
self.resampled_audio_dir = Path(resampled_audio_dir) if resampled_audio_dir else None
self.target_samplerate = target_samplerate
self.target_nchannels = target_nchannels
self.use_opus_archive = use_opus_archive
# validate params
if self.use_opus_archive and self.resampled_audio_dir:
raise ValueError(
f"`use_opus_archive` is True and `resampled_audio_dir` is not None, but we currently do not"
" support resampling OPUS-format audio, please either set `use_opus_archive` to False or"
" resampled_audio_dir to None."
)
if not resampled_audio_dir:
logger.info(
"`resampled_audio_dir` is None => will not attempt to resample audio. Please note if you have"
" specified `target_samplerate` or `target_nchannels`, they will be ignored."
)
# will be initialized in self.prepare method
self.audio_path_prefix = None
self.transcription_file = None
def prepare(self):
"""Downloading and extracting data (unless already done)."""
os.makedirs(self.raw_data_dir, exist_ok=True)
if self.use_opus_archive:
url = MLS_URL_OPUS.format(language=self.language)
if not (self.raw_data_dir / f"mls_{self.language}_opus.tar.gz").exists():
download_file(url, str(self.raw_data_dir))
else:
url = MLS_URL_NO_OPUS.format(language=self.language)
if not (self.raw_data_dir / f"mls_{self.language}.tar.gz").exists():
download_file(url, str(self.raw_data_dir))
data_folder = extract_archive(str(self.raw_data_dir / os.path.basename(url)), str(self.raw_data_dir))
self.audio_path_prefix = str(Path(data_folder) / self.data_split / "audio")
self.transcription_file = str(Path(data_folder) / self.data_split / "transcripts.txt")
def read_manifest(self):
"""Reading the initial data line-by-line."""
if self.transcription_file is None:
raise RuntimeError("self.process has to be called before processing the data.")
with open(self.transcription_file, "rt", encoding="utf8") as fin:
dataset_entries = fin.readlines()
return dataset_entries
def process_dataset_entry(self, data_entry: str):
"""Processing the data entries.
Converts all audio into wav format and outputs filepath, duration and
transcription text.
"""
if len(data_entry.split("\t")) != 2:
raise RuntimeError(f"have more than one tab in line {data_entry}")
utt_id, text = data_entry.split("\t")
transcript_text = text.strip()
# specify src_audio_path
if self.use_opus_archive:
src_audio_path = os.path.join(self.audio_path_prefix, *utt_id.split("_")[:2], utt_id + ".opus")
else:
src_audio_path = os.path.join(self.audio_path_prefix, *utt_id.split("_")[:2], utt_id + ".flac")
# specify tgt_audio_path
if self.resampled_audio_dir:
tgt_audio_path = os.path.join(self.resampled_audio_dir, *utt_id.split("_")[:2], utt_id + ".wav")
if not os.path.exists(os.path.dirname(tgt_audio_path)):
os.makedirs(os.path.dirname(tgt_audio_path), exist_ok=True)
if not os.path.exists(tgt_audio_path):
tfm = Transformer()
tfm.rate(samplerate=self.target_samplerate)
tfm.channels(n_channels=self.target_nchannels)
tfm.build(input_filepath=src_audio_path, output_filepath=tgt_audio_path)
else:
tgt_audio_path = src_audio_path
data = {
"audio_filepath": tgt_audio_path,
"duration": float(librosa.get_duration(path=tgt_audio_path)),
"text": transcript_text,
}
return [DataEntry(data=data)]
| NeMo-speech-data-processor-main | sdp/processors/datasets/mls/create_initial_manifest.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-speech-data-processor-main | sdp/processors/modify_manifest/__init__.py |
import json
import os
from typing import Dict, List
from tqdm import tqdm
from sdp.processors.base_processor import (
BaseParallelProcessor,
BaseProcessor,
DataEntry,
)
class CombineSources(BaseParallelProcessor):
"""Can be used to create a single field from two alternative sources.
E.g.::
_target_: sdp.processors.CombineSources
sources:
- field: text_pc
origin_label: original
- field: text_pc_pred
origin_label: synthetic
- field: text
origin_label: no_pc
target: text
will populate the ``text`` field with data from ``text_pc`` field if it's
present and not equal to ``n/a`` (can be customized). If ``text_pc`` is
not available, it will populate ``text`` from ``text_pc_pred`` field,
following the same rules. If both are not available, it will fall back to
the ``text`` field itself. In all cases it will specify which source was
used in the ``text_origin`` field by using the label from the
``origin_label`` field.. If non of the sources is available,
it will populate both the target and the origin fields with ``n/a``.
Args:
sources (list[dict]): list of the sources to use in order of preference.
Each element in the list should be in the following format::
{
field: <which field to take the data from>
origin_label: <what to write in the "<target>_origin"
}
target (str): target field that we are populating.
na_indicator (str): if any source field has text equal to the
``na_indicator`` it will be considered as not available. If none
of the sources are present, this will also be used as the value
for the target and origin fields. Defaults to ``n/a``.
Returns:
The same data as in the input manifest enhanced with the following fields::
<target>: <populated with data from either <source1> or <source2> \
or with <na_indicator> if none are available>
<target>_origin: <label that marks where the data came from>
"""
def __init__(
self,
sources: List[Dict[str, str]],
target: str,
na_indicator: str = "n/a",
**kwargs,
):
super().__init__(**kwargs)
self.sources = sources
self.target = target
self.na_indicator = na_indicator
def process_dataset_entry(self, data_entry: Dict):
for source_dict in self.sources:
if data_entry.get(source_dict['field'], self.na_indicator) != self.na_indicator:
data_entry[self.target] = data_entry[source_dict['field']]
data_entry[f"{self.target}_origin"] = source_dict['origin_label']
break # breaking out on the first present label
else: # going here if no break was triggered
data_entry[self.target] = self.na_indicator
data_entry[f"{self.target}_origin"] = self.na_indicator
return [DataEntry(data=data_entry)]
class AddConstantFields(BaseParallelProcessor):
"""This processor adds constant fields to all manifest entries.
E.g., can be useful to add fixed ``label: <language>`` field for downstream
language identification model training.
Args:
fields: dictionary with any additional information to add. E.g.::
fields = {
"label": "en",
"metadata": "mcv-11.0-2022-09-21",
}
Returns:
The same data as in the input manifest with added fields
as specified in the ``fields`` input dictionary.
"""
def __init__(
self,
fields: Dict,
**kwargs,
):
super().__init__(**kwargs)
self.fields = fields
def process_dataset_entry(self, data_entry: Dict):
data_entry.update(self.fields)
return [DataEntry(data=data_entry)]
class DuplicateFields(BaseParallelProcessor):
"""This processor duplicates fields in all manifest entries.
It is useful for when you want to do downstream processing of a variant
of the entry. E.g. make a copy of "text" called "text_no_pc", and
remove punctuation from "text_no_pc" in downstream processors.
Args:
duplicate_fields (dict): dictionary where keys are the original
fields to be copied and their values are the new names of
the duplicate fields.
Returns:
The same data as in the input manifest with duplicated fields
as specified in the ``duplicate_fields`` input dictionary.
"""
def __init__(
self,
duplicate_fields: Dict,
**kwargs,
):
super().__init__(**kwargs)
self.duplicate_fields = duplicate_fields
def process_dataset_entry(self, data_entry: Dict):
for field_src, field_tgt in self.duplicate_fields.items():
if not field_src in data_entry:
raise ValueError(f"Expected field {field_src} in data_entry {data_entry} but there isn't one.")
data_entry[field_tgt] = data_entry[field_src]
return [DataEntry(data=data_entry)]
class RenameFields(BaseParallelProcessor):
"""This processor renames fields in all manifest entries.
Args:
rename_fields: dictionary where keys are the fields to be
renamed and their values are the new names of the fields.
Returns:
The same data as in the input manifest with renamed fields
as specified in the ``rename_fields`` input dictionary.
"""
def __init__(
self,
rename_fields: Dict,
**kwargs,
):
super().__init__(**kwargs)
self.rename_fields = rename_fields
def process_dataset_entry(self, data_entry: Dict):
for field_src, field_tgt in self.rename_fields.items():
if not field_src in data_entry:
raise ValueError(f"Expected field {field_src} in data_entry {data_entry} but there isn't one.")
data_entry[field_tgt] = data_entry[field_src]
del data_entry[field_src]
return [DataEntry(data=data_entry)]
class SplitOnFixedDuration(BaseParallelProcessor):
"""This processor splits audio into a fixed length segments.
It does not actually create different audio files, but simply adds
corresponding ``offset`` and ``duration`` fields. These fields can
be automatically processed by NeMo to split audio on the fly during
training.
Args:
segment_duration (float): fixed desired duration of each segment.
drop_last (bool): whether to drop the last segment if total duration is
not divisible by desired segment duration. If False, the last
segment will be of a different length which is ``< segment_duration``.
Defaults to True.
drop_text (bool): whether to drop text from entries as it is most likely
inaccurate after the split on duration. Defaults to True.
Returns:
The same data as in the input manifest but all audio that's longer
than the ``segment_duration`` will be duplicated multiple times with
additional ``offset`` and ``duration`` fields. If ``drop_text=True``
will also drop ``text`` field from all entries.
"""
def __init__(
self,
segment_duration: float,
drop_last: bool = True,
drop_text: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.segment_duration = segment_duration
self.drop_last = drop_last
self.drop_text = drop_text
def process_dataset_entry(self, data_entry: Dict):
total_duration = data_entry["duration"]
total_segments = int(total_duration // self.segment_duration)
output = [None] * total_segments
for segment_idx in range(total_segments):
modified_entry = data_entry.copy() # shallow copy should be good enough
modified_entry["duration"] = self.segment_duration
modified_entry["offset"] = segment_idx * self.segment_duration
if self.drop_text:
modified_entry.pop("text", None)
output[segment_idx] = DataEntry(data=modified_entry)
remainder = total_duration - self.segment_duration * total_segments
if not self.drop_last and remainder > 0:
modified_entry = data_entry.copy()
modified_entry["duration"] = remainder
modified_entry["offset"] = self.segment_duration * total_segments
if self.drop_text:
modified_entry.pop("text", None)
output.append(DataEntry(data=modified_entry))
return output
class ChangeToRelativePath(BaseParallelProcessor):
"""This processor changes the audio filepaths to be relative.
Args:
base_dir: typically a folder where manifest file is going to be
stored. All passes will be relative to that folder.
Returns:
The same data as in the input manifest with ``audio_filepath`` key
changed to contain relative path to the ``base_dir``.
"""
def __init__(
self,
base_dir: str,
**kwargs,
):
super().__init__(**kwargs)
self.base_dir = base_dir
def process_dataset_entry(self, data_entry: Dict):
data_entry["audio_filepath"] = os.path.relpath(data_entry["audio_filepath"], self.base_dir)
return [DataEntry(data=data_entry)]
class SortManifest(BaseProcessor):
"""Processor which will sort the manifest by some specified attribute.
Args:
attribute_sort_by (str): the attribute by which the manifest will be sorted.
descending (bool): if set to False, attribute will be in ascending order.
If True, attribute will be in descending order. Defaults to True.
Returns:
The same entries as in the input manifest, but sorted based
on the provided parameters.
"""
def __init__(
self,
attribute_sort_by: str,
descending: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.attribute_sort_by = attribute_sort_by
self.descending = descending
def process(self):
with open(self.input_manifest_file, "rt", encoding="utf8") as fin:
dataset_entries = [json.loads(line) for line in fin.readlines()]
dataset_entries = sorted(dataset_entries, key=lambda x: x[self.attribute_sort_by], reverse=self.descending)
with open(self.output_manifest_file, "wt", encoding="utf8") as fout:
for line in dataset_entries:
fout.write(json.dumps(line, ensure_ascii=False) + "\n")
class KeepOnlySpecifiedFields(BaseProcessor):
"""Saves a copy of a manifest but only with a subset of the fields.
Typically will be the final processor to save only relevant fields
in the desired location.
Args:
fields_to_keep (list[str]): list of the fields in the input manifest
that we want to retain. The output file will only contain these
fields.
Returns:
The same data as in input manifest, but re-saved in the new location
with only ``fields_to_keep`` fields retained.
"""
def __init__(self, fields_to_keep: List[str], **kwargs):
super().__init__(**kwargs)
self.fields_to_keep = fields_to_keep
def process(self):
with open(self.input_manifest_file, "rt", encoding="utf8") as fin, open(
self.output_manifest_file, "wt", encoding="utf8"
) as fout:
for line in tqdm(fin):
line = json.loads(line)
new_line = {field: line[field] for field in self.fields_to_keep}
fout.write(json.dumps(new_line, ensure_ascii=False) + "\n")
| NeMo-speech-data-processor-main | sdp/processors/modify_manifest/common.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import List
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
# TODO: should be done with general sub-regex processor
class MakeLettersUppercaseAfterPeriod(BaseParallelProcessor):
"""Can be used to replace characters with upper-case version after punctuation.
Args:
punctuation (str): string with all punctuation characters to consider.
Defaults to ".!?".
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
Returns:
The same data as in the input manifest with ``<text_key>`` field changed.
"""
def __init__(
self, punctuation=".!?", text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.punctuation = punctuation
self.text_key = text_key
def process_dataset_entry(self, data_entry) -> List:
replace_word_counter = collections.defaultdict(int)
# keeping in a list, since strings are immutable
new_text = []
idx = 0
while idx < len(data_entry[self.text_key]):
character = data_entry[self.text_key][idx]
# checking that next is space and then we upper whatever is after that
# note that Python's upper correctly does not change anything that's not a letter
if (
character in self.punctuation
and idx + 2 < len(data_entry[self.text_key])
and data_entry[self.text_key][idx + 1] == " "
):
new_text.extend([character, " ", data_entry[self.text_key][idx + 2].upper()])
replace_word_counter[data_entry[self.text_key][idx : idx + 3]] += 1
idx += 2
else:
new_text.append(character)
idx += 1
data_entry[self.text_key] = "".join(new_text)
return [DataEntry(data=data_entry, metrics=replace_word_counter)]
def finalize(self, metrics):
total_counter = collections.defaultdict(int)
for counter in metrics:
for word, count in counter.items():
total_counter[word] += count
logger.info("Some of the substrings that were uppercased")
total_counter_sorted = dict(sorted(total_counter.items(), key=lambda x: x[1], reverse=True))
for word, count in total_counter_sorted.items():
if count > 1:
logger.info(f"{word} {count}")
super().finalize(metrics)
| NeMo-speech-data-processor-main | sdp/processors/modify_manifest/make_letters_uppercase_after_period.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from typing import List
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
from sdp.utils.edit_spaces import add_start_end_spaces, remove_extra_spaces
from sdp.utils.get_diff import get_diff, get_diff_with_subs_grouped
from sdp.utils.metrics_computation import (
get_cer,
get_charrate,
get_wer,
get_wmr,
get_wordrate,
)
class DropHighLowCharrate(BaseParallelProcessor):
"""Drops utterances if their character rate is too low or too high.
Character rate = ``(num of characters in self.text_key) / (duration of audio)``.
A too-low or too-high character rate often implies that the ground
truth transcription might be inaccurate.
Args:
high_charrate_threshold (float): upper character rate threshold.
If the character rate of an utterance is higher than this number,
the utterance will be dropped.
low_charrate_threshold (float): lower character rate threshold.
If the character rate of an utterance is lower than this number,
the utterance will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, high_charrate_threshold: float, low_charrate_threshold: float, text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.high_charrate_threshold = high_charrate_threshold
self.low_charrate_threshold = low_charrate_threshold
self.text_key = text_key
def process_dataset_entry(self, data_entry) -> List:
"""Drops utterances based on the provided thresholds."""
charrate = get_charrate(data_entry[self.text_key], data_entry["duration"])
if charrate > self.high_charrate_threshold:
return [DataEntry(data=None, metrics=(0, 1))]
elif charrate < self.low_charrate_threshold:
return [DataEntry(data=None, metrics=(1, 0))]
return [DataEntry(data=data_entry, metrics=(0, 0))]
def finalize(self, metrics):
"""Will report how many utterances were dropped for each threshold."""
high_drop_counter = 0
low_drop_counter = 0
for dropped_low, dropped_high in metrics:
low_drop_counter += dropped_low
high_drop_counter += dropped_high
logger.info(
"Num of utterances that were dropped due to char rate > %f: %d",
self.high_charrate_threshold,
high_drop_counter,
)
logger.info(
"Num of utterances that were dropped due to char rate < %f: %d",
self.low_charrate_threshold,
low_drop_counter,
)
super().finalize(metrics)
class DropHighLowWordrate(BaseParallelProcessor):
"""Drops utterances if their word rate is too low or too high.
Word rate = ``(num of words in self.text_key) / (duration of audio)``.
A too-low or too-high word rate often implies that the ground
truth transcription might be inaccurate.
Args:
high_wordrate_threshold (float): upper word rate threshold.
If the word rate of an utterance is higher than this number,
the utterance will be dropped.
low_wordrate_threshold (float): lower word rate threshold.
If the word rate of an utterance is lower than this number,
the utterance will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, high_wordrate_threshold: float, low_wordrate_threshold: float, text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.high_wordrate_threshold = high_wordrate_threshold
self.low_wordrate_threshold = low_wordrate_threshold
self.text_key = text_key
def process_dataset_entry(self, data_entry) -> List:
wordrate = get_wordrate(data_entry[self.text_key], data_entry["duration"])
if wordrate > self.high_wordrate_threshold:
return [DataEntry(data=None, metrics=(0, 1))]
elif wordrate < self.low_wordrate_threshold:
return [DataEntry(data=None, metrics=(1, 0))]
return [DataEntry(data=data_entry, metrics=(0, 0))]
def finalize(self, metrics):
high_drop_counter = 0
low_drop_counter = 0
for dropped_low, dropped_high in metrics:
low_drop_counter += dropped_low
high_drop_counter += dropped_high
logger.info(
"Num of utterances that were dropped due to word rate > %f: %d",
self.high_wordrate_threshold,
high_drop_counter,
)
logger.info(
"Num of utterances that were dropped due to word rate < %f: %d",
self.low_wordrate_threshold,
low_drop_counter,
)
super().finalize(metrics)
class DropHighLowDuration(BaseParallelProcessor):
"""Drops utterances if their duration is too low or too high.
Args:
high_duration_threshold (float): upper duration threshold (in seconds).
If the duration of an utterance's audio is higher than this number,
the utterance will be dropped.
low_duration_threshold (float): lower duration threshold (in seconds).
If the duration of an utterance's audio is lower than this number,
the utterance will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, high_duration_threshold: float, low_duration_threshold: float, text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.high_duration_threshold = high_duration_threshold
self.low_duration_threshold = low_duration_threshold
self.high_drop_counter = 0
self.low_drop_counter = 0
self.text_key = text_key
def process_dataset_entry(self, data_entry) -> List:
duration = data_entry["duration"]
if duration > self.high_duration_threshold:
return [DataEntry(data=None, metrics=(0, 1))]
elif duration < self.low_duration_threshold:
return [DataEntry(data=None, metrics=(1, 0))]
return [DataEntry(data=data_entry, metrics=(0, 0))]
def finalize(self, metrics):
high_drop_counter = 0
low_drop_counter = 0
for dropped_low, dropped_high in metrics:
low_drop_counter += dropped_low
high_drop_counter += dropped_high
logger.info(
"Num of utterances that were dropped due to duration > %f: %d",
self.high_duration_threshold,
high_drop_counter,
)
logger.info(
"Num of utterances that were dropped due to duration < %f: %d",
self.low_duration_threshold,
low_drop_counter,
)
super().finalize(metrics)
class DropIfNoneOfRegexMatch(BaseParallelProcessor):
"""Drops utterances if ``data[self.text_key]`` does not match any of ``regex_patterns``.
Before applying regex checks, we will add a space
character to the beginning and end of the ``text`` and ``pred_text``
keys for each data entry. After the the regex checks, assuming the utterance isn't dropped,
the extra spaces are removed. This includes the spaces in the beginning
and end of the text, as well as any double spaces ``" "``.
Args:
regex_patterns (list[str]): If ``data_entry[self.text_key]`` does not
match any of the regex patterns in the list, that utterance
will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, regex_patterns: List[str], text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.regex_patterns = regex_patterns
self.text_key = text_key
def process_dataset_entry(self, data_entry) -> List:
data_entry[self.text_key] = add_start_end_spaces(data_entry[self.text_key])
for regex_pattern in self.regex_patterns:
if re.search(regex_pattern, data_entry[self.text_key]):
break
else: # will only reach this if none of the regex match
return [DataEntry(data=None, metrics=1)]
# will reach this part of code if at least one of the regexes matches
data_entry[self.text_key] = remove_extra_spaces(data_entry[self.text_key])
return [DataEntry(data=data_entry, metrics=0)]
def finalize(self, metrics):
total_counter = 0
for value in metrics:
if value:
total_counter += value
logger.info("Num of utterances that were dropped due to not containing any of the specified regex patterns")
logger.info(f"{total_counter}")
super().finalize(metrics)
class DropNonAlphabet(BaseParallelProcessor):
"""Drops utterances if they contain characters that are not in the ``alphabet``.
Args:
alphabet (str): a string containing all of the characters in our alphabet.
If an utterance contains at least one character that is not in the
``alphabet``, then that utterance will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
.. note::
Don't forget to include spaces in your alphabet, unless you
want to make sure none of the utterances contain spaces.
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, alphabet: str, text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.alphabet = alphabet
self.text_key = text_key
def process_dataset_entry(self, data_entry) -> List:
drop_this_utt = False
non_alphabet_counter = collections.defaultdict(int)
for char in data_entry[self.text_key]:
if char not in self.alphabet:
drop_this_utt = True
non_alphabet_counter[char] += 1
if drop_this_utt:
return [DataEntry(data=None, metrics=non_alphabet_counter)]
return [DataEntry(data=data_entry, metrics=non_alphabet_counter)]
def finalize(self, metrics):
total_counter = collections.defaultdict(int)
for counter in metrics:
for char, value in counter.items():
total_counter[char] += value
logger.info("Num of non-alphabet characters")
for char, count in total_counter.items():
logger.info(f"{char}: {count}")
super().finalize(metrics)
class DropASRErrorBeginningEnd(BaseParallelProcessor):
"""Drops utterances if there is a sufficiently long ASR mismatch
at the beginning or end of the utterance.
Args:
beginning_error_char_threshold (int): if there is an insertion or deletion at
the beginning of the utterance that has more characters than this number,
then the utterance will be dropped.
If there is a substitution at the beginning of the utterance, then the
utterance will be dropped if
``abs(len(deletion) - len(insertion)) > beginning_error_char_threshold``.
end_error_char_threshold (int): if there is an insertion or deletion at
the end of the utterance that has more characters than this number,
then the utterance will be dropped.
If there is a substitution at the end of the utterance, then the
utterance will be dropped if
``abs(len(deletion) - len(insertion)) > end_error_char_threshold``.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
pred_text_key (str): a string indicating which key of the data entries
should be used to access the ASR predictions. Defaults to "pred_text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self,
beginning_error_char_threshold: int,
end_error_char_threshold: int,
text_key: str = "text",
pred_text_key: str = "pred_text",
**kwargs,
):
super().__init__(**kwargs)
self.beginning_error_char_threshold = beginning_error_char_threshold
self.end_error_char_threshold = end_error_char_threshold
self.text_key = text_key
self.pred_text_key = pred_text_key
def process_dataset_entry(self, data_entry) -> List:
orig_words, pred_words = data_entry[self.text_key], data_entry[self.pred_text_key]
diff = get_diff_with_subs_grouped(orig_words, pred_words)
if len(diff) > 0: # i.e. if there are differences between text and pred_text
first_diff_entry = diff[0]
if first_diff_entry[0] == 1 or first_diff_entry[0] == -1: # i.e. diff is purely an insertion or deletion
if len(first_diff_entry[1]) > self.beginning_error_char_threshold:
return [DataEntry(data=None, metrics=(1, 0))]
elif first_diff_entry[0] != 0: # i.e. diff should be a tuple representing substitution
len_deletion = len(first_diff_entry[0][1])
len_insertion = len(first_diff_entry[1][1])
if abs(len_deletion - len_insertion) > self.beginning_error_char_threshold:
return [DataEntry(data=None, metrics=(1, 0))]
last_diff_entry = diff[-1]
if last_diff_entry[0] == 1 or last_diff_entry[0] == -1: # i.e. diff is purely an insertion or deletion
if len(last_diff_entry[1]) > self.end_error_char_threshold:
return [DataEntry(data=None, metrics=(0, 1))]
elif last_diff_entry[0] != 0: # i.e. diff should be a tuple representing substitution
len_deletion = len(last_diff_entry[0][1])
len_insertion = len(last_diff_entry[1][1])
if abs(len_deletion - len_insertion) > self.end_error_char_threshold:
return [DataEntry(data=None, metrics=(0, 1))]
return [DataEntry(data=data_entry, metrics=(0, 0))]
def finalize(self, metrics):
beginning_drop_counter = 0
end_drop_counter = 0
for dropped_beginning, dropped_end in metrics:
beginning_drop_counter += dropped_beginning
end_drop_counter += dropped_end
logger.info(
"Num of utterances that were dropped due to asr insertions/deletions at the beginning: %d",
beginning_drop_counter,
)
logger.info(
"Num of utterances that were dropped due to asr insertions/deletions at the end: %d", end_drop_counter,
)
super().finalize(metrics)
# TODO: needs unification with above class in some way
class DropASRError(BaseParallelProcessor):
"""Drops utterances if there is a sufficiently long ASR mismatch anywhere in the utterance.
Args:
consecutive_words_threshold (int): will drop if there is a mismatch of
at least this many words in a row.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
pred_text_key (str): a string indicating which key of the data entries
should be used to access the ASR predictions. Defaults to "pred_text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, consecutive_words_threshold: int, text_key: str = "text", pred_text_key: str = "pred_text", **kwargs,
):
super().__init__(**kwargs)
self.consecutive_words_threshold = consecutive_words_threshold
self.text_key = text_key
self.pred_text_key = pred_text_key
def process_dataset_entry(self, data_entry) -> List:
orig_words, pred_words = data_entry[self.text_key], data_entry[self.pred_text_key]
diffs = get_diff(orig_words, pred_words)
for diff_entry in diffs:
if diff_entry[0] == 0:
continue
if len(diff_entry[1].split()) >= self.consecutive_words_threshold:
return []
return [DataEntry(data=data_entry)]
class DropHighCER(BaseParallelProcessor):
"""Drops utterances if there is a sufficiently high character-error-rate (CER).
CER is measured between ``data[self.text_key]`` and ``data[self.pred_text_key]``.
.. note::
We only drop the utterance if ``CER > threshold`` (i.e. strictly greater
than) so that if we set the threshold to 0, we will not remove
utterances with ``CER == 0``.
Args:
cer_threshold (float): CER threshold above which the utterance will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
pred_text_key (str): a string indicating which key of the data entries
should be used to access the ASR predictions. Defaults to "pred_text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, cer_threshold: float, text_key: str = "text", pred_text_key: str = "pred_text", **kwargs,
):
super().__init__(**kwargs)
self.cer_threshold = cer_threshold
self.text_key = text_key
self.pred_text_key = pred_text_key
def process_dataset_entry(self, data_entry) -> List:
cer = get_cer(data_entry[self.text_key], data_entry[self.pred_text_key])
if cer > self.cer_threshold:
return [DataEntry(data=None, metrics=1)]
else:
return [DataEntry(data=data_entry, metrics=0)]
def finalize(self, metrics):
drop_counter = 0
for dropped in metrics:
drop_counter += dropped
logger.info(
"Num of utterances that were dropped due to CER > %d: %d", self.cer_threshold, drop_counter,
)
super().finalize(metrics)
class DropHighWER(BaseParallelProcessor):
"""Drops utterances if there is a sufficiently high word-error-rate (WER).
WER is measured between ``data[self.text_key]`` and ``data[self.pred_text_key]``.
.. note::
We only drop the utterance if ``WER > threshold`` (i.e. strictly greater
than) so that if we set the threshold to 0, we will not remove
utterances with ``WER == 0``.
Args:
wer_threshold (float): WER threshold above which the utterance will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
pred_text_key (str): a string indicating which key of the data entries
should be used to access the ASR predictions. Defaults to "pred_text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, wer_threshold: float, text_key: str = "text", pred_text_key: str = "pred_text", **kwargs,
):
super().__init__(**kwargs)
self.wer_threshold = wer_threshold
self.text_key = text_key
self.pred_text_key = pred_text_key
def process_dataset_entry(self, data_entry) -> List:
wer = get_wer(data_entry[self.text_key], data_entry[self.pred_text_key])
if wer > self.wer_threshold:
return [DataEntry(data=None, metrics=1)]
else:
return [DataEntry(data=data_entry, metrics=0)]
def finalize(self, metrics):
drop_counter = 0
for dropped in metrics:
drop_counter += dropped
logger.info(
"Num of utterances that were dropped due to WER > %d: %d", self.wer_threshold, drop_counter,
)
super().finalize(metrics)
class DropLowWordMatchRate(BaseParallelProcessor):
"""Drops utterances if there is a sufficiently low word-match-rate (WMR).
WMR is measured between ``data[self.text_key]`` and ``data[self.pred_text_key]``.
.. note::
We only drop the utterance if ``WMR < threshold`` (i.e. strictly lower
than) so that if we set the threshold to 100, we will not remove
utterances with ``WMR == 100``.
Args:
wmr_threshold (float): WMR threshold below which the utterance will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
pred_text_key (str): a string indicating which key of the data entries
should be used to access the ASR predictions. Defaults to "pred_text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, wmr_threshold: float, text_key: str = "text", pred_text_key: str = "pred_text", **kwargs,
):
super().__init__(**kwargs)
self.wmr_threshold = wmr_threshold
self.text_key = text_key
self.pred_text_key = pred_text_key
def process_dataset_entry(self, data_entry) -> List:
orig_words, pred_words = data_entry[self.text_key], data_entry[self.pred_text_key]
wmr = get_wmr(orig_words, pred_words)
if wmr < self.wmr_threshold:
return [DataEntry(data=None, metrics=1)]
else:
return [DataEntry(data=data_entry, metrics=0)]
def finalize(self, metrics):
drop_counter = 0
for dropped in metrics:
drop_counter += dropped
logger.info(
"Num of utterances that were dropped due to WMR < %d: %d", self.wmr_threshold, drop_counter,
)
super().finalize(metrics)
class DropIfRegexMatch(BaseParallelProcessor):
"""Drops utterances if text matches a regex pattern.
Before applying regex checks, we will add a space
character to the beginning and end of the ``text`` and ``pred_text``
keys for each data entry. After the the regex checks, assuming the utterance isn't dropped,
the extra spaces are removed. This includes the spaces in the beginning
and end of the text, as well as any double spaces ``" "``.
Args:
regex_patterns (list[str]): a list of strings. The list will be
traversed in order. If ``data_entry.data[self.text_key]`` matches
the regex, the entry will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, regex_patterns: List[str], text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.regex_patterns = regex_patterns
self.text_key = text_key
def process_dataset_entry(self, data_entry) -> List:
drop_counter = collections.defaultdict(int)
data_entry[self.text_key] = add_start_end_spaces(data_entry[self.text_key])
for regex_pattern in self.regex_patterns:
if re.search(regex_pattern, data_entry[self.text_key]):
for match in re.finditer(regex_pattern, data_entry[self.text_key]):
drop_counter[regex_pattern] += 1
return [DataEntry(data=None, metrics=drop_counter)]
data_entry[self.text_key] = remove_extra_spaces(data_entry[self.text_key])
return [DataEntry(data=data_entry, metrics=drop_counter)]
def finalize(self, metrics):
total_counter = collections.defaultdict(int)
for counter in metrics:
for attribute, value in counter.items():
total_counter[attribute] += value
logger.info("Regex matches that were dropped in attribute")
for attribute, matches in total_counter.items():
logger.info(f"{attribute}, {matches}")
super().finalize(metrics)
class DropOnAttribute(BaseParallelProcessor):
"""Drops utterances if attribute is set to True/False.
Args:
key (str): which key to use for dropping utterances.
drop_if_false (bool): whether to drop if value is False. Defaults
to dropping if True.
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, key: str, drop_if_false: bool = False, **kwargs,
):
super().__init__(**kwargs)
self.key = key
self.drop_if_false = drop_if_false
def process_dataset_entry(self, data_entry) -> List:
if data_entry[self.key] is not self.drop_if_false:
return [DataEntry(data=None, metrics=1)]
return [DataEntry(data=data_entry, metrics=0)]
def finalize(self, metrics):
total_counter = 0
for counter in metrics:
total_counter += counter
logger.info("Dropped %d utterances", total_counter)
super().finalize(metrics)
class DropIfSubstringInInsertion(BaseParallelProcessor):
"""Drops utterances if a substring matches an ASR insertion.
Insertions are checked between ``data[self.text_key]`` and
``data[self.pred_text_key]``.
.. note::
We check for exact matches, so you need to be mindful of spaces, e.g.
you may wish to do ``substrings_in_insertion = ["nemo "]`` instead
of ``substrings_in_insertion = ["nemo"]``.
Args:
substrings_in_insertion (list[str]): a list of strings which might be
inserted in predicted ASR text. If the insertion matches a
string exactly, the utterance will be dropped.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
pred_text_key (str): a string indicating which key of the data entries
should be used to access the ASR predictions. Defaults to "pred_text".
Returns:
The same data as in the input manifest with some entries dropped.
"""
def __init__(
self, substrings_in_insertion: List[str], text_key: str = "text", pred_text_key: str = "pred_text", **kwargs,
):
super().__init__(**kwargs)
self.substrings_in_insertion = substrings_in_insertion
self.text_key = text_key
self.pred_text_key = pred_text_key
def process_dataset_entry(self, data_entry) -> List:
for substring_in_insertion in self.substrings_in_insertion:
if substring_in_insertion in data_entry[self.pred_text_key]:
orig_words, pred_words = data_entry[self.text_key], data_entry[self.pred_text_key]
diff = get_diff_with_subs_grouped(orig_words, pred_words)
for diff_entry in diff:
if diff_entry[0] == 1: # insertion in original string
if substring_in_insertion in diff_entry[1]:
return [DataEntry(data=None, metrics=diff_entry[1])]
return [DataEntry(data=data_entry, metrics="")]
def finalize(self, metrics):
total_counter = collections.defaultdict(int)
for diff_entry in metrics:
if diff_entry:
total_counter[diff_entry] += 1
logger.info("Some of the insertions that cause the utterance to be dropped:")
total_counter_sorted = dict(sorted(total_counter.items(), key=lambda x: x[1], reverse=True))
for insertion, count in total_counter_sorted.items():
logger.info(f"{insertion}, {count}")
super().finalize(metrics)
| NeMo-speech-data-processor-main | sdp/processors/modify_manifest/data_to_dropbool.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from typing import Dict, List
from sdp.logging import logger
from sdp.processors.base_processor import BaseParallelProcessor, DataEntry
from sdp.utils.edit_spaces import add_start_end_spaces, remove_extra_spaces
from sdp.utils.get_diff import get_diff_with_subs_grouped
class InsIfASRInsertion(BaseParallelProcessor):
"""Processor that adds substrings to transcription if they are present in ASR predictions.
Will insert substrings into ``data[self.text_key]`` if it is
present at that location in ``data[self.pred_text_key]``.
It is useful if words are systematically missing from ground truth
transcriptions.
Args:
insert_words (list[str]): list of strings that will be inserted
into ``data[self.text_key]`` if there is an insertion (containing
only that string) in ``data[self.pred_text_key]``.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
pred_text_key (str): a string indicating which key of the data entries
should be used to access the ASR predictions. Defaults to "pred_text".
.. note::
Because this processor looks for an exact match in the insertion,
we recommend including variations with different spaces in
``insert_words``, e.g. ``[' nemo', 'nemo ', ' nemo ']``.
Returns:
The same data as in the input manifest with ``<text_key>`` field changed.
"""
def __init__(
self, insert_words: List[str], text_key: str = "text", pred_text_key: str = "pred_text", **kwargs,
):
super().__init__(**kwargs)
self.insert_words = insert_words
self.text_key = text_key
self.pred_text_key = pred_text_key
def process_dataset_entry(self, data_entry) -> List:
insert_word_counter = collections.defaultdict(int)
for insert_word in self.insert_words:
if not insert_word in data_entry[self.pred_text_key]:
break
orig_words, pred_words = data_entry[self.text_key], data_entry[self.pred_text_key]
diff = get_diff_with_subs_grouped(orig_words, pred_words)
if len(diff) > 0: # ie if there are differences between text and pred_text
new_sent = ""
for diff_entry in diff:
if diff_entry[0] == 0: # no change
new_sent += diff_entry[1]
elif diff_entry[0] == -1: # deletion in original string
new_sent += diff_entry[1]
elif diff_entry[0] == 1: # insertion in original string
if diff_entry[1] == insert_word:
new_sent += insert_word
insert_word_counter[insert_word] += 1
elif isinstance(diff_entry, tuple): # i.e. diff is a substitution
new_sent += diff_entry[0][1]
else:
raise ValueError(f"unexpected item in diff_entry: {diff_entry}")
new_sent = " ".join(new_sent.split()) # remove any extra spaces
data_entry[self.text_key] = new_sent
return [DataEntry(data=data_entry, metrics=insert_word_counter)]
def finalize(self, metrics):
total_counter = collections.defaultdict(int)
for counter in metrics:
for word, count in counter.items():
total_counter[word] += count
logger.info("Num of words that were inserted")
for word, count in total_counter.items():
logger.info(f"{word} {count}")
super().finalize(metrics)
class SubIfASRSubstitution(BaseParallelProcessor):
"""Processor that substitutes substrings to transcription if they are present in ASR predictions.
Will convert a substring in ``data[self.text_key]`` to a
substring in ``data[self.pred_text_key]`` if both are located in the
same place (ie are part of a 'substitution' operation) and if the substrings
correspond to key-value pairs in ``sub_words``.
This is useful if words are systematically incorrect in ground truth
transcriptions.
Before starting to look for substitution, this processor adds spaces at the beginning and end of
``data[self.text_key]`` and ``data[self.pred_text_key]``, to ensure that an argument like
``sub_words = {"nmo ": "nemo "}`` would cause a substitution to be made even if the original
``data[self.text_key]`` ends with ``"nmo"`` and ``data[self.pred_text_key]`` ends with ``"nemo"``.
Args:
sub_words (dict): dictionary where a key is a string that might be in
``data[self.text_key]`` and the value is the string that might
be in ``data[self.pred_text_key]``. If both are located in the same
place (i.e. are part of a 'substitution' operation)
then the key string will be converted to the value string
in ``data[self.text_key]``.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
pred_text_key (str): a string indicating which key of the data entries
should be used to access the ASR predictions. Defaults to "pred_text".
.. note::
This processor looks for exact string matches of substitutions,
so you may need to be careful with spaces in ``sub_words``. E.g.
it is recommended to do ``sub_words = {"nmo ": "nemo "}``
instead of ``sub_words = {"nmo" : "nemo"}``.
Returns:
The same data as in the input manifest with ``<text_key>`` field changed.
"""
def __init__(
self, sub_words: Dict, text_key: str = "text", pred_text_key: str = "pred_text", **kwargs,
):
super().__init__(**kwargs)
self.sub_words = sub_words
self.text_key = text_key
self.pred_text_key = pred_text_key
def process_dataset_entry(self, data_entry) -> List:
sub_word_counter = collections.defaultdict(int)
data_entry[self.text_key] = add_start_end_spaces(data_entry[self.text_key])
data_entry[self.pred_text_key] = add_start_end_spaces(data_entry[self.pred_text_key])
for original_word, new_word in self.sub_words.items():
if not original_word in data_entry[self.text_key]:
break
orig_words, pred_words = data_entry[self.text_key], data_entry[self.pred_text_key]
diff = get_diff_with_subs_grouped(orig_words, pred_words)
if len(diff) > 0: # ie if there are differences between text and pred_text
new_sent = ""
for diff_entry in diff:
if diff_entry[0] == 0: # no change
new_sent += diff_entry[1]
elif diff_entry[0] == -1: # deletion in original string
new_sent += diff_entry[1]
elif diff_entry[0] == 1: # insertion in original string
# don't make changes
pass
elif isinstance(diff_entry, tuple): # substitution
if diff_entry[0][1] == original_word and diff_entry[1][1] == new_word:
# ie. substitution is one we want to use to change the original text
new_sent += new_word
sub_word_counter[original_word] += 1
else:
# ie. substitution is one we want to ignore
new_sent += diff_entry[0][1]
else:
raise ValueError(f"unexpected item in diff_entry: {diff_entry}")
new_sent = add_start_end_spaces(new_sent)
data_entry[self.text_key] = new_sent
data_entry[self.text_key] = remove_extra_spaces(data_entry[self.text_key])
data_entry[self.pred_text_key] = remove_extra_spaces(data_entry[self.pred_text_key])
return [DataEntry(data=data_entry, metrics=sub_word_counter)]
def finalize(self, metrics):
total_counter = collections.defaultdict(int)
for counter in metrics:
for word, count in counter.items():
total_counter[word] += count
logger.info("Num of words that were substituted")
for word, count in total_counter.items():
logger.info(f"{word} {count}")
super().finalize(metrics)
# TODO: replace with generic regex
class SubMakeLowercase(BaseParallelProcessor):
"""Processor to convert text to lowercase.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
Returns:
The same data as in the input manifest with ``<text_key>`` field changed.
"""
def __init__(
self, text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.text_key = text_key
def process_dataset_entry(self, data_entry) -> List:
data_entry[self.text_key] = data_entry[self.text_key].lower()
return [DataEntry(data=data_entry)]
def finalize(self, metrics):
logger.info("Made all letters lowercase")
super().finalize(metrics)
class SubRegex(BaseParallelProcessor):
"""Converts a regex match to a string, as defined by key-value pairs in ``regex_to_sub``.
Before applying regex changes, we will add a space
character to the beginning and end of the ``text`` and ``pred_text``
keys for each data entry. After the the regex changes,
the extra spaces are removed. This includes the spaces in the beginning
and end of the text, as well as any double spaces ``" "``.
Args:
regex_params_list (list[dict]): list of dicts.
Each dict must contain a ``pattern`` and a ``repl`` key,
and optionally a ``count`` key (by default, ``count`` will be 0).
This processor will go through the list in order, and apply a ``re.sub`` operation on
the input text in ``data_entry[self.text_key]``, feeding in the specified ``pattern``, ``repl``
and ``count`` parameters to ``re.sub``.
text_key (str): a string indicating which key of the data entries
should be used to find the utterance transcript. Defaults to "text".
Returns:
The same data as in the input manifest with ``<text_key>`` field changed.
"""
def __init__(
self, regex_params_list: List[Dict], text_key: str = "text", **kwargs,
):
super().__init__(**kwargs)
self.regex_params_list = regex_params_list
self.text_key = text_key
# verify all dicts in regex_params_list have "pattern" and "repl" keys
for regex_params_dict in self.regex_params_list:
if not "pattern" in regex_params_dict.keys():
raise ValueError(
f"Need to have key 'pattern' in all entries of `regex_params_list`: {self.regex_params_list}"
)
if not "repl" in regex_params_dict.keys():
raise ValueError(
f"Need to have key 'repl' in all entries of `regex_params_list`: {self.regex_params_list}"
)
def process_dataset_entry(self, data_entry) -> List:
"""Replaces each found regex match with a given string."""
replace_word_counter = collections.defaultdict(int)
text_in = data_entry[self.text_key]
text_in = add_start_end_spaces(text_in)
for regex_params in self.regex_params_list:
text_out = re.sub(
pattern=regex_params["pattern"],
repl=regex_params["repl"],
string=text_in,
# note: this count param is the maximum number of pattern occurrences to be replaced.
count=regex_params.get("count", 0),
)
if text_in != text_out:
replace_word_counter[regex_params["pattern"]] += 1
text_in = text_out
text_out = remove_extra_spaces(text_out)
data_entry[self.text_key] = text_out
return [DataEntry(data=data_entry, metrics=replace_word_counter)]
def finalize(self, metrics):
"""Reports how many substitutions were made for each pattern."""
total_counter = collections.defaultdict(int)
for counter in metrics:
for word, count in counter.items():
total_counter[word] += count
logger.info("Number of utterances which applied substitutions for the following patterns:")
total_counter_sorted = dict(sorted(total_counter.items(), key=lambda x: x[1], reverse=True))
for word, count in total_counter_sorted.items():
logger.info(f"{word} {count}")
super().finalize(metrics)
| NeMo-speech-data-processor-main | sdp/processors/modify_manifest/data_to_data.py |
import torch
from azure.core.exceptions import ResourceNotFoundError
from azure.ai.ml.entities import AmlCompute
from monai.transforms import MapTransform
class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):
"""
Convert labels to multi channels based on brats 2021 classes:
label 1 necrotic tumor core (NCR)
label 2 peritumoral edematous/invaded tissue
label 3 is not used in the new dataset version
label 4 GD-enhancing tumor
The possible classes are:
TC (Tumor core): merge labels 1 and 4
WT (Whole tumor): merge labels 1,2 and 4
ET (Enhancing tumor): label 4
"""
def __call__(self, data):
d = dict(data)
for key in self.keys:
result = []
# merge label 1 and label 4 to construct TC
result.append(torch.logical_or(d[key] == 1, d[key] == 4))
# merge labels 1, 2 and 4 to construct WT
result.append(
torch.logical_or(
torch.logical_or(d[key] == 1, d[key] == 2), d[key] == 4
)
)
# label 4 is ET
result.append(d[key] == 4)
d[key] = torch.stack(result, axis=0).float()
return d
def create_compute_cluster(ml_client, cname, csize):
try:
ml_client.compute.get(name=cname)
print("Found existing compute target {name}.")
except ResourceNotFoundError:
print("Creating a new compute target...")
cluster_low_pri = AmlCompute(
name=cname,
size=csize,
min_instances=0,
max_instances=3,
idle_time_before_scale_down=120,
tier="low_priority",
)
ml_client.begin_create_or_update(cluster_low_pri).result() | NVIDIA_AI_Enterprise_AzureML-main | samples/python/3D-image-segmentation/monai/notebooks/helper.py |
import os
import logging
import json
import numpy
import torch
from monai.networks.nets import SegResNet
from monai.data import Dataset
from monai.transforms import Activations, AsDiscrete, Compose, LoadImaged, ConcatItemsd, NormalizeIntensityd, Orientationd, Spacingd, Spacing, EnsureTyped, EnsureChannelFirstd
import torch
from monai.inferers import sliding_window_inference
import base64
device = 'cuda' if torch.cuda.is_available() else 'cpu'
VAL_AMP = True
def init():
"""
This function is called when the container is initialized/started, typically after create/update of the deployment.
You can write the logic here to perform init operations like caching the model in memory
"""
global device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
global model
model = SegResNet(
blocks_down=[1, 2, 2, 4],
blocks_up=[1, 1, 1],
init_filters=16,
in_channels=4,
out_channels=3,
dropout_prob=0.2,
).to(device)
#print(os.getenv("AZUREML_MODEL_DIR"))
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# Please provide your model's folder name if there is one
# model_path = os.path.join(
# # os.getenv("AZUREML_MODEL_DIR"), "model/extra_files/best_metric_model.pth"
# )
model_path = os.path.join(
os.getenv("AZUREML_MODEL_DIR"), "model/extra_files/", "best_metric_model.pth"
)
# model_path = os.path.join(
# os.getenv("AZUREML_MODEL_DIR"), "", "best_metric_model.pth"
# )
# model_path = os.getenv("AZUREML_MODEL_DIR");
#logging.info('Model path: {model_path}')
state_dict = torch.load(model_path, map_location=torch.device(device))
model.load_state_dict(state_dict)
logging.info("Init complete")
def inference(input):
def _compute(input):
return sliding_window_inference(
inputs=input,
roi_size=(240, 240, 160),
sw_batch_size=1,
predictor=model,
overlap=0.5,
)
if VAL_AMP:
with torch.cuda.amp.autocast():
return _compute(input)
else:
return _compute(input)
def run(raw_data):
"""
This function is called for every invocation of the endpoint to perform the actual scoring/prediction.
In the example we extract the data from the json input and call the scikit-learn model's predict()
method and return the result back
"""
logging.info("model 1: request received")
data = json.loads(raw_data)["data"][0]
flair_image = base64.b64decode(data['flair'])
t1_image = base64.b64decode(data['t1'])
t1ce_image = base64.b64decode(data['t1ce'])
t2_image = base64.b64decode(data['t2'])
# Write binary data to file with appropriate file extension
with open('flair.nii.gz', 'wb') as f:
f.write(flair_image)
with open('t1.nii.gz', 'wb') as f:
f.write(t1_image)
with open('t1ce.nii.gz', 'wb') as f:
f.write(t1ce_image)
with open('t2.nii.gz', 'wb') as f:
f.write(t2_image)
val_transform = Compose(
[
LoadImaged(keys=["flair", "t1", "t1ce", "t2"]),
EnsureChannelFirstd(keys=["flair", "t1", "t1ce", "t2"]),
ConcatItemsd(keys=["flair", "t1", "t1ce", "t2"], name="image", dim=0),
EnsureTyped(keys=["image"]),
Orientationd(keys=["image"], axcodes="RAS"),
Spacingd(
keys=["image"],
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear"),
),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
])
post_trans = Compose(
[Activations(sigmoid=True), AsDiscrete(threshold=0.5)]
)
data_list = [{'flair': 'flair.nii.gz', 't1':'t1.nii.gz', 't1ce': 't1ce.nii.gz', 't2': 't2.nii.gz'}]
val_ds = Dataset(data=data_list, transform=val_transform)
model.eval()
with torch.no_grad():
val_input = val_ds[0]['image'].unsqueeze(0).to(device)
val_output = inference(val_input)
val_output = post_trans(val_output[0])
result = val_output[:, :, :, :].detach().cpu().numpy()
logging.info("Request processed")
return result.tolist() | NVIDIA_AI_Enterprise_AzureML-main | samples/python/3D-image-segmentation/monai/notebooks/score.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
from copy import deepcopy
from functools import partial
import logging
import os
import sys
import json
from wsgiref import headers
from attrdict import AttrDict
import numpy as np
from PIL import Image
from tqdm import tqdm
import tritonclient.grpc as grpcclient
import tritonclient.grpc.model_config_pb2 as mc
import tritonclient.http as httpclient
from tritonclient.utils import InferenceServerException
from tritonclient.utils import triton_to_np_dtype
from tao_triton.python.types import Frame, UserData
from tao_triton.python.postprocessing.detectnet_processor import DetectNetPostprocessor
from tao_triton.python.postprocessing.classification_postprocessor import ClassificationPostprocessor
from tao_triton.python.postprocessing.lprnet_postprocessor import LPRPostprocessor
from tao_triton.python.postprocessing.yolov3_postprocessor import YOLOv3Postprocessor
from tao_triton.python.postprocessing.peoplesegnet_postprocessor import PeoplesegnetPostprocessor
from tao_triton.python.postprocessing.retinanet_postprocessor import RetinanetPostprocessor
from tao_triton.python.postprocessing.multitask_classification_postprocessor import MultitaskClassificationPostprocessor
from tao_triton.python.postprocessing.pose_classification_postprocessor import PoseClassificationPostprocessor
from tao_triton.python.utils.kitti import write_kitti_annotation
from tao_triton.python.utils.pose_cls_dataset_convert import pose_cls_dataset_convert
from tao_triton.python.model.detectnet_model import DetectnetModel
from tao_triton.python.model.classification_model import ClassificationModel
from tao_triton.python.model.lprnet_model import LPRModel
from tao_triton.python.model.yolov3_model import YOLOv3Model
from tao_triton.python.model.peoplesegnet_model import PeoplesegnetModel
from tao_triton.python.model.retinanet_model import RetinanetModel
from tao_triton.python.model.multitask_classification_model import MultitaskClassificationModel
from tao_triton.python.model.pose_classification_model import PoseClassificationModel
import gevent.ssl
logger = logging.getLogger(__name__)
TRITON_MODEL_DICT = {
"classification": ClassificationModel,
"detectnet_v2": DetectnetModel,
"lprnet": LPRModel,
"yolov3": YOLOv3Model,
"peoplesegnet": PeoplesegnetModel,
"retinanet": RetinanetModel,
"multitask_classification":MultitaskClassificationModel,
"pose_classification":PoseClassificationModel
}
POSTPROCESSOR_DICT = {
"classification": ClassificationPostprocessor,
"detectnet_v2": DetectNetPostprocessor,
"lprnet": LPRPostprocessor,
"yolov3": YOLOv3Postprocessor,
"peoplesegnet": PeoplesegnetPostprocessor,
"retinanet": RetinanetPostprocessor,
"multitask_classification": MultitaskClassificationPostprocessor,
"pose_classification": PoseClassificationPostprocessor
}
def completion_callback(user_data, result, error):
"""Callback function used for async_stream_infer()."""
user_data._completed_requests.put((result, error))
def convert_http_metadata_config(_metadata, _config):
"""Convert to the http metadata to class Dict."""
_model_metadata = AttrDict(_metadata)
_model_config = AttrDict(_config)
return _model_metadata, _model_config
def requestGenerator(batched_image_data, input_name, output_name, dtype, protocol,
num_classes=0):
"""Generator for triton inference requests.
Args:
batch_image_data (np.ndarray): Numpy array of a batch of images.
input_name (str): Name of the input array
output_name (list(str)): Name of the model outputs
dtype: Tensor data type for Triton
protocol (str): The protocol used to communicated between the Triton
server and TAO Toolkit client.
num_classes (int): The number of classes in the network.
Yields:
inputs
outputs
made_name (str): Name of the triton model
model_version (int): Version number
"""
if protocol == "grpc":
client = grpcclient
else:
client = httpclient
# Set the input data
inputs = [client.InferInput(input_name, batched_image_data.shape, dtype)]
inputs[0].set_data_from_numpy(batched_image_data)
outputs = [
client.InferRequestedOutput(
out_name, class_count=num_classes
) for out_name in output_name
]
yield inputs, outputs
def parse_command_line(args=None):
"""Parsing command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('-v',
'--verbose',
action="store_true",
required=False,
default=False,
help='Enable verbose output')
parser.add_argument('-a',
'--async',
dest="async_set",
action="store_true",
required=False,
default=False,
help='Use asynchronous inference API')
parser.add_argument('--streaming',
action="store_true",
required=False,
default=False,
help='Use streaming inference API. ' +
'The flag is only available with gRPC protocol.')
parser.add_argument('-m',
'--model-name',
type=str,
required=True,
help='Name of model')
parser.add_argument('-x',
'--model-version',
type=str,
required=False,
default="",
help='Version of model. Default is to use latest version.')
parser.add_argument('-b',
'--batch-size',
type=int,
required=False,
default=1,
help='Batch size. Default is 1.')
parser.add_argument('--mode',
type=str,
choices=['Classification', "DetectNet_v2", "LPRNet", "YOLOv3", "Peoplesegnet", "Retinanet", "Multitask_classification", "Pose_classification"],
required=False,
default='NONE',
help='Type of scaling to apply to image pixels. Default is NONE.')
parser.add_argument('-u',
'--url',
type=str,
required=False,
default='localhost:8000',
help='Inference server URL. Default is localhost:8000.')
parser.add_argument('-t',
'--token',
type=str,
required=False,
help='token if using AzureML endpoint. ')
parser.add_argument('-i',
'--protocol',
type=str,
required=False,
default='HTTP',
help='Protocol (HTTP/gRPC) used to communicate with ' +
'the inference service. Default is HTTP.')
parser.add_argument('image_filename',
type=str,
nargs='?',
default=None,
help='Input image / Input folder / Input JSON / Input pose sequences.')
parser.add_argument('--class_list',
type=str,
default="person,bag,face",
help="Comma separated class names",
required=False)
parser.add_argument('--output_path',
type=str,
default=os.path.join(os.getcwd(), "outputs"),
help="Path to where the inferenced outputs are stored.",
required=True)
parser.add_argument("--postprocessing_config",
type=str,
default="",
help="Path to the DetectNet_v2 clustering config.")
parser.add_argument("--dataset_convert_config",
type=str,
default="",
help="Path to the Pose Classification dataset conversion config.")
return parser.parse_args()
def main():
"""Running the inferencer client."""
FLAGS = parse_command_line(sys.argv[1:])
if FLAGS.mode.lower() == "detectnet_v2":
assert os.path.isfile(FLAGS.postprocessing_config), (
"Clustering config must be defined for DetectNet_v2."
)
log_level = "INFO"
if FLAGS.verbose:
log_level = "DEBUG"
# Configure logging to get Maglev log messages.
logging.basicConfig(format='%(asctime)s [%(levelname)s] '
'%(name)s: %(message)s',
level=log_level)
if FLAGS.streaming and FLAGS.protocol.lower() != "grpc":
raise Exception("Streaming is only allowed with gRPC protocol")
try:
if FLAGS.protocol.lower() == "grpc":
# Create gRPC client for communicating with the server
triton_client = grpcclient.InferenceServerClient(
url=FLAGS.url, verbose=FLAGS.verbose)
else:
# Specify large enough concurrency to handle the
# the number of requests.
concurrency = 20 if FLAGS.async_set else 1
#AML needs header info
api_key = FLAGS.token
print ("api key:" + api_key)
headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key), 'azureml-model-deployment': 'triton-tao-deployment-aml-1' }
triton_client = httpclient.InferenceServerClient(
url=FLAGS.url,verbose=FLAGS.verbose,ssl=True,ssl_context_factory=gevent.ssl._create_default_https_context, concurrency=concurrency)
except Exception as e:
print("client creation failed: " + str(e))
sys.exit(1)
# Make sure the model matches our requirements, and get some
# properties of the model that we need for preprocessing
try:
model_metadata = triton_client.get_model_metadata(
model_name=FLAGS.model_name, model_version=FLAGS.model_version, headers=headers)
print(model_metadata)
except InferenceServerException as e:
print("failed to retrieve the metadata: " + str(e))
sys.exit(1)
try:
model_config = triton_client.get_model_config(
model_name=FLAGS.model_name, model_version=FLAGS.model_version, headers=headers)
except InferenceServerException as e:
print("failed to retrieve the config: " + str(e))
sys.exit(1)
if FLAGS.protocol.lower() == "grpc":
model_config = model_config.config
else:
model_metadata, model_config = convert_http_metadata_config(
model_metadata, model_config)
triton_model = TRITON_MODEL_DICT[FLAGS.mode.lower()].from_metadata(model_metadata, model_config)
max_batch_size = triton_model.max_batch_size
pose_sequences = None
frames = []
if FLAGS.mode.lower() == "pose_classification":
# The input is a JSON file of pose metadata.
if os.path.splitext(FLAGS.image_filename)[-1] == ".json":
if not os.path.isfile(FLAGS.dataset_convert_config):
raise FileNotFoundError("Dataset conversion config must be defined for Pose Classification.")
pose_sequences, action_data = pose_cls_dataset_convert(FLAGS.image_filename,
FLAGS.dataset_convert_config)
# The input is a NumPy array of pose sequences.
elif os.path.splitext(FLAGS.image_filename)[-1] == ".npy":
pose_sequences = np.load(file=FLAGS.image_filename)
else:
raise NotImplementedError("The input for Pose Classification has to be a JSON file or a NumPy array.")
else:
target_shape = (triton_model.c, triton_model.h, triton_model.w)
npdtype = triton_to_np_dtype(triton_model.triton_dtype)
# The input is a folder of images.
if os.path.isdir(FLAGS.image_filename):
frames = [
Frame(os.path.join(FLAGS.image_filename, f),
triton_model.data_format,
npdtype,
target_shape)
for f in os.listdir(FLAGS.image_filename)
if os.path.isfile(os.path.join(FLAGS.image_filename, f)) and
os.path.splitext(f)[-1] in [".jpg", ".jpeg", ".png"]
]
# The input is an image.
else:
frames = [
Frame(os.path.join(FLAGS.image_filename),
triton_model.data_format,
npdtype,
target_shape)
]
# Send requests of FLAGS.batch_size images. If the number of
# images isn't an exact multiple of FLAGS.batch_size then just
# start over with the first images until the batch is filled.
requests = []
responses = []
result_filenames = []
request_ids = []
image_idx = 0
last_request = False
user_data = UserData()
class_list = FLAGS.class_list.split(",")
args_postprocessor = []
if FLAGS.mode.lower() == "pose_classification":
args_postprocessor = [
FLAGS.batch_size, pose_sequences, FLAGS.output_path
]
else:
args_postprocessor = [
FLAGS.batch_size, frames, FLAGS.output_path, triton_model.data_format
]
if FLAGS.mode.lower() == "detectnet_v2":
args_postprocessor.extend([class_list, FLAGS.postprocessing_config, target_shape])
postprocessor = POSTPROCESSOR_DICT[FLAGS.mode.lower()](*args_postprocessor)
# Holds the handles to the ongoing HTTP async requests.
async_requests = []
sent_count = 0
if FLAGS.streaming:
triton_client.start_stream(partial(completion_callback, user_data))
logger.info("Sending inference request for batches of data")
total = len(frames)
if FLAGS.mode.lower() == "pose_classification":
pbar_total = pose_sequences.shape[0]
else:
pbar_total = len(frames)
with tqdm(total=pbar_total) as pbar:
while not last_request:
batched_image_data = None
if FLAGS.mode.lower() == "pose_classification":
repeated_data = None
for idx in range(FLAGS.batch_size):
pose_sequence = pose_sequences[[image_idx], :, :, :, :]
if repeated_data is None:
repeated_data = pose_sequence
else:
repeated_data = np.concatenate((repeated_data, pose_sequence), axis=0)
image_idx = (image_idx + 1) % pose_sequences.shape[0]
if image_idx == 0:
last_request = True
if max_batch_size > 0:
batched_image_data = repeated_data
else:
batched_image_data = repeated_data[[0], :, :, :, :]
else:
repeated_image_data = []
for idx in range(FLAGS.batch_size):
frame = frames[image_idx]
if FLAGS.mode.lower() == "yolov3" or FLAGS.mode.lower() == "retinanet":
img = frame._load_img()
repeated_image_data.append(img)
elif FLAGS.mode.lower() == "multitask_classification":
img = frame._load_img_multitask_classification()
repeated_image_data.append(img)
elif FLAGS.mode.lower() == "peoplesegnet":
img = frame._load_img_maskrcnn()
repeated_image_data.append(img)
else:
img = frame.load_image()
repeated_image_data.append(
triton_model.preprocess(
frame.as_numpy(img)
)
)
image_idx = (image_idx + 1) % len(frames)
if image_idx == 0:
last_request = True
if max_batch_size > 0:
batched_image_data = np.stack(repeated_image_data, axis=0)
else:
batched_image_data = repeated_image_data[0]
# Send request
try:
req_gen_args = [batched_image_data, triton_model.input_names,
triton_model.output_names, triton_model.triton_dtype,
FLAGS.protocol.lower()]
req_gen_kwargs = {}
if FLAGS.mode.lower() in ["classification", "pose_classification"]:
req_gen_kwargs["num_classes"] = model_config.output[0].dims[0]
req_generator = requestGenerator(*req_gen_args, **req_gen_kwargs)
for inputs, outputs in req_generator:
sent_count += 1
if FLAGS.streaming:
triton_client.async_stream_infer(
FLAGS.model_name,
inputs,
request_id=str(sent_count),
model_version=FLAGS.model_version,
headers=headers,
outputs=outputs)
elif FLAGS.async_set:
if FLAGS.protocol.lower() == "grpc":
triton_client.async_infer(
FLAGS.model_name,
inputs,
partial(completion_callback, user_data),
request_id=str(sent_count),
model_version=FLAGS.model_version,
headers=headers,
outputs=outputs)
else:
async_requests.append(
triton_client.async_infer(
FLAGS.model_name,
inputs,
request_id=str(sent_count),
model_version=FLAGS.model_version,
headers=headers,
outputs=outputs))
else:
responses.append(
triton_client.infer(FLAGS.model_name,
inputs,
headers=headers,
request_id=str(sent_count),
model_version=FLAGS.model_version,
outputs=outputs))
except InferenceServerException as e:
print("inference failed: " + str(e))
if FLAGS.streaming:
triton_client.stop_stream()
sys.exit(1)
pbar.update(FLAGS.batch_size)
if FLAGS.streaming:
triton_client.stop_stream()
if FLAGS.protocol.lower() == "grpc":
if FLAGS.streaming or FLAGS.async_set:
processed_count = 0
while processed_count < sent_count:
(results, error) = user_data._completed_requests.get()
processed_count += 1
if error is not None:
print("inference failed: " + str(error))
sys.exit(1)
responses.append(results)
else:
if FLAGS.async_set:
# Collect results from the ongoing async requests
# for HTTP Async requests.
for async_request in async_requests:
responses.append(async_request.get_result())
logger.info("Gathering responses from the server and post processing the inferenced outputs.")
processed_request = 0
with tqdm(total=len(frames)) as pbar:
while processed_request < sent_count:
response = responses[processed_request]
if FLAGS.protocol.lower() == "grpc":
this_id = response.get_response().id
else:
this_id = response.get_response()["id"]
if os.path.splitext(FLAGS.image_filename)[-1] == ".json":
postprocessor.apply(
response, this_id, render=True, action_data=action_data
)
else:
postprocessor.apply(
response, this_id, render=True
)
processed_request += 1
pbar.update(FLAGS.batch_size)
if os.path.splitext(FLAGS.image_filename)[-1] == ".json":
output_file = os.path.join(FLAGS.output_path, "results.json")
for b in range(len(action_data)):
for f in range(len(action_data[b]["batches"])):
for p in range(len(action_data[b]["batches"][f]["objects"])):
action_data[b]["batches"][f]["objects"][p].pop("segment_id", None)
with open(output_file, 'w') as f:
json.dump(action_data, f, sort_keys=True, indent=2, ensure_ascii=False)
logger.info("PASS")
if __name__ == '__main__':
main()
| NVIDIA_AI_Enterprise_AzureML-main | samples/cli/tao/object_detection/detectnet_v2/scripts/auxiliary_files/tao_client_aml.py |
import argparse
import os
from pathlib import Path
import cudf
import pandas as pd
import xgboost
import matplotlib.pyplot as plt
from cuml.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.utils._testing import ignore_warnings
import optuna
import mlflow
mlflow.sklearn.autolog()
parser = argparse.ArgumentParser("train")
parser.add_argument("--training_data", type=str, help="Path to training data")
parser.add_argument("--model_output", type=str, help="Path to output model")
parser.add_argument("--test_split_ratio", type=float, help="ratio of train test split")
args = parser.parse_args()
print("Hello training world...")
df_list = []
with open(os.path.join(args.training_data), "r") as handle:
input_df = cudf.read_csv((Path(args.training_data)))
df_list.append(input_df)
print(df_list)
train_data = df_list[0]
#Split the data into input(x) and output(y)
target_column = 'tip_amount'
y = train_data[target_column]
X = train_data.drop(target_column, axis=1)
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=args.test_split_ratio, random_state=42)
train_DataAndLabelsGPU = xgboost.DMatrix(X_train,label=y_train)
test_DataAndLabelsGPU = xgboost.DMatrix(X_test, label=y_test)
#Train once
gpuMaxDepth = 1
gpuNTrees = 10
paramsGPU = {
'max_depth': gpuMaxDepth,
'objective': 'reg:squarederror', # Change the objective to regression
'tree_method': 'gpu_hist',
'random_state': 0,
};
xgBoostModelGPU = xgboost.train(dtrain=train_DataAndLabelsGPU, params=paramsGPU, num_boost_round=gpuNTrees);
nGPUs = 1
gpuMaxDepth = 3
gpuNTrees = 10
for gpuMaxDepth in [1, 3, 4, 5, 10]:
for gpuNTrees in [1, 10, 25, 50, 100]:
paramsGPU = {
'max_depth': gpuMaxDepth,
'objective': 'reg:squarederror', # Change the objective to regression
'tree_method': 'gpu_hist',
'random_state': 0,
};
xgBoostModelGPU = xgboost.train(dtrain=train_DataAndLabelsGPU, params=paramsGPU, num_boost_round=gpuNTrees);
# evaluate
yPredTrainGPU = xgBoostModelGPU.predict(train_DataAndLabelsGPU)
yPredTestGPU = xgBoostModelGPU.predict(test_DataAndLabelsGPU)
# Assuming you have yPredTestGPU and y_test (Ground truth labels)
mse = mean_squared_error(y_test.to_numpy(), yPredTestGPU)
r2 = r2_score(y_test.to_numpy(), yPredTestGPU)
# Assuming you have yPredTestGPU and y_test (Ground truth labels)
mse_train = mean_squared_error(y_train.to_numpy(), yPredTrainGPU)
r2_train = r2_score(y_train.to_numpy(), yPredTrainGPU)
print(f"{gpuMaxDepth} -- {gpuNTrees} -- Mean Squared Error: {mse} -- {mse_train} -- R-squared: {r2} -- {r2_train}")
# The below section is for optuna HPO
def objective(trial):
gpuMaxDepth = trial.suggest_int('gpuMaxDepth',1,20)
gpuNTrees = trial.suggest_int('gpuNTrees',1,300)
paramsGPU = {
'max_depth': gpuMaxDepth,
'objective': 'reg:squarederror', # Change the objective to regression
'tree_method': 'gpu_hist',
'random_state': 0,
};
xgBoostModelGPU = xgboost.train(dtrain=train_DataAndLabelsGPU, params=paramsGPU, num_boost_round=gpuNTrees);
#evaluate
yPredTrainGPU = xgBoostModelGPU.predict(train_DataAndLabelsGPU)
yPredTestGPU = xgBoostModelGPU.predict(test_DataAndLabelsGPU)
# Assuming you have yPredTestGPU and y_test (Ground truth labels)
mse = mean_squared_error(y_test.to_numpy(), yPredTestGPU)
return mse
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=10)
print("Number of finished trials:", len(study.trials))
print("Best trial:", study.best_trial.params)
best_params = study.best_trial.params
xgBoostModelGPU = xgboost.train(dtrain=train_DataAndLabelsGPU, params=best_params, num_boost_round=best_params['gpuNTrees'])
#evaluate
yPredTrainGPU = xgBoostModelGPU.predict(train_DataAndLabelsGPU)
yPredTestGPU = xgBoostModelGPU.predict(test_DataAndLabelsGPU)
#Assuming you have yPredTestGPU and y_test (Ground truth labels)
mse = mean_squared_error(y_test.to_numpy(), yPredTestGPU)
r2 = r2_score(y_test.to_numpy(), yPredTestGPU)
print(f"Mean Squared Error: {mse} -- R-squared: {r2}")
mlflow.sklearn.save_model(xgBoostModelGPU, args.model_output)
# plot the figure
plt.figure(figsize=(20,10))
n_samples_to_diff = 200
actual = y_train[0:n_samples_to_diff].to_numpy()
predicted = yPredTrainGPU[0:n_samples_to_diff]
plt.subplot(3,1,(1,2))
plt.plot(actual, color=(0,.8,0), linewidth=2)
plt.plot(predicted, linestyle='-', color=(1,.6,1), linewidth=2)
plt.legend(['actual', 'predicted'])
plt.ylabel('tip amount $')
plt.grid('on',alpha=.25)
plt.subplot(3,1,3)
plt.plot(actual-predicted,color=(0,0,0))
plt.legend(['diff in actual v/s predicted'])
plt.ylabel('predicted error $')
plt.xlabel('trip number')
plt.grid('on',alpha=0.25)
plt.savefig("output-1.jpg")
samples = plt.gcf()
try:
mlflow.log_figure(samples, 'sample-images.png')
except Exception as e:
print('Exception during mlflow image logging: {e}') | NVIDIA_AI_Enterprise_AzureML-main | src/components/RAPIDS/HPO_with_XGBoost/train_component/src/train.py |
import argparse
import os
from pathlib import Path
import cudf
import matplotlib.pylab as plt
import pandas as pd
import numpy as np
import json
parser = argparse.ArgumentParser("prep")
parser.add_argument("--raw_data", type=str, help="Path/URL to the Parquet file")
parser.add_argument("--prep_data",type=str, help="Path to the cleaned and filtered data")
args = parser.parse_args()
columns_to_read = ['passenger_count', 'trip_distance','RatecodeID','PULocationID','DOLocationID','payment_type','fare_amount','extra','mta_tax','tip_amount','tolls_amount','improvement_surcharge','congestion_surcharge']
# Read the Parquet files into the CuDF dataframe
cudf_dataframe = cudf.read_parquet(args.raw_data,columns=columns_to_read)
print(cudf_dataframe.dtypes)
#Remove rows with any missing values
#cudf_dataframe_no_total = cudf_dataframe.drop(['tpep_pickup_datetime', 'tpep_dropoff_datetime', 'store_and_fwd_flag', 'total_amount'], axis=1)
cudf_dataframe_no_total_no_missing_values = cudf_dataframe.dropna()
print(cudf_dataframe_no_total_no_missing_values.shape)
print("Dataframe columns")
print(cudf_dataframe.columns)
print(cudf_dataframe_no_total_no_missing_values)
# Save the data
clean_data = cudf_dataframe_no_total_no_missing_values.to_csv(Path(args.prep_data))
| NVIDIA_AI_Enterprise_AzureML-main | src/components/RAPIDS/HPO_with_XGBoost/prep_data_component/src/prep.py |
import os
import pandas as pd
import cudf
parquet_files = ['yellow_tripdata_2023-01.parquet',
'yellow_tripdata_2023-02.parquet',
]
dfs = [cudf.read_parquet(file) for file in parquet_files]
dfs = cudf.concat(dfs)
dfs.to_parquet('merged_data.parquet') | NVIDIA_AI_Enterprise_AzureML-main | src/components/RAPIDS/HPO_with_XGBoost/prep_data_component/src/merge_data.py |
import argparse
from pathlib import Path
from typing_extensions import Concatenate
from uuid import uuid4
from datetime import datetime
import os
import pandas as pd
import cudf
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import pickle
parser = argparse.ArgumentParser("prep")
parser.add_argument("--raw_data", type=str, help="Path to raw data")
parser.add_argument("--prep_data", type=str, help="Path of prepped data")
args = parser.parse_args()
print("hello training world...")
lines = [f"Raw data path: {args.raw_data}", f"Data output path: {args.prep_data}"]
for line in lines:
print(line)
print("mounted_path files: ")
arr = os.listdir(args.raw_data)
print(arr)
df_list = []
for filename in arr:
print("reading file: %s ..." % filename)
with open(os.path.join(args.raw_data, filename), "r") as handle:
# print (handle.read())
# ('input_df_%s' % filename) = pd.read_csv((Path(args.training_data) / filename))
#input_df = pd.read_csv((Path(args.raw_data) / filename))
input_df = cudf.read_csv((Path(args.raw_data) / filename))
df_list.append(input_df)
# Prep the green and yellow taxi data
green_data = df_list[0]
yellow_data = df_list[1]
# Define useful columns needed for the Azure Machine Learning NYC Taxi tutorial
useful_columns = str(
[
"cost",
"distance",
"dropoff_datetime",
"dropoff_latitude",
"dropoff_longitude",
"passengers",
"pickup_datetime",
"pickup_latitude",
"pickup_longitude",
"store_forward",
"vendor",
]
).replace(",", ";")
print(useful_columns)
# Rename columns as per Azure Machine Learning NYC Taxi tutorial
green_columns = str(
{
"vendorID": "vendor",
"lpepPickupDatetime": "pickup_datetime",
"lpepDropoffDatetime": "dropoff_datetime",
"storeAndFwdFlag": "store_forward",
"pickupLongitude": "pickup_longitude",
"pickupLatitude": "pickup_latitude",
"dropoffLongitude": "dropoff_longitude",
"dropoffLatitude": "dropoff_latitude",
"passengerCount": "passengers",
"fareAmount": "cost",
"tripDistance": "distance",
}
).replace(",", ";")
yellow_columns = str(
{
"vendorID": "vendor",
"tpepPickupDateTime": "pickup_datetime",
"tpepDropoffDateTime": "dropoff_datetime",
"storeAndFwdFlag": "store_forward",
"startLon": "pickup_longitude",
"startLat": "pickup_latitude",
"endLon": "dropoff_longitude",
"endLat": "dropoff_latitude",
"passengerCount": "passengers",
"fareAmount": "cost",
"tripDistance": "distance",
}
).replace(",", ";")
print("green_columns: " + green_columns)
print("yellow_columns: " + yellow_columns)
# These functions ensure that null data is removed from the dataset,
# which will help increase machine learning model accuracy.
def get_dict(dict_str):
pairs = dict_str.strip("{}").split(";")
new_dict = {}
for pair in pairs:
print(pair)
key, value = pair.strip().split(":")
new_dict[key.strip().strip("'")] = value.strip().strip("'")
return new_dict
def cleanseData(data, columns, useful_columns):
useful_columns = [
s.strip().strip("'") for s in useful_columns.strip("[]").split(";")
]
new_columns = get_dict(columns)
new_df = (data.dropna(how="all").rename(columns=new_columns))[useful_columns]
new_df.reset_index(inplace=True, drop=True)
return new_df
green_data_clean = cleanseData(green_data, green_columns, useful_columns)
yellow_data_clean = cleanseData(yellow_data, yellow_columns, useful_columns)
# Append yellow data to green data
combined_df = green_data_clean.append(yellow_data_clean, ignore_index=True)
combined_df.reset_index(inplace=True, drop=True)
output_green = green_data_clean.to_csv((Path(args.prep_data) / "green_prep_data.csv"))
output_yellow = yellow_data_clean.to_csv(
(Path(args.prep_data) / "yellow_prep_data.csv")
)
merged_data = combined_df.to_csv((Path(args.prep_data) / "merged_data.csv"))
| NVIDIA_AI_Enterprise_AzureML-main | src/components/RAPIDS/NYC_taxi_with_regression/Prep_component/prep_src/prep.py |
import argparse
from pathlib import Path
from uuid import uuid4
from datetime import datetime
import os
import pandas as pd
import cudf
import numpy as np
parser = argparse.ArgumentParser("transform")
parser.add_argument("--clean_data", type=str, help="Path to prepped data")
parser.add_argument("--transformed_data", type=str, help="Path of output data")
args = parser.parse_args()
lines = [
f"Clean data path: {args.clean_data}",
f"Transformed data output path: {args.transformed_data}",
]
for line in lines:
print(line)
print("mounted_path files: ")
arr = os.listdir(args.clean_data)
print(arr)
df_list = []
for filename in arr:
print("reading file: %s ..." % filename)
with open(os.path.join(args.clean_data, filename), "r") as handle:
# print (handle.read())
# ('input_df_%s' % filename) = pd.read_csv((Path(args.training_data) / filename))
input_df = pd.read_csv((Path(args.clean_data) / filename))
#input_df = cudf.read_csv((Path(args.clean_data) / filename))
df_list.append(input_df)
# Transform the data
combined_df = df_list[1]
# These functions filter out coordinates for locations that are outside the city border.
# Filter out coordinates for locations that are outside the city border.
# Chain the column filter commands within the filter() function
# and define the minimum and maximum bounds for each field
combined_df = combined_df.astype(
{
"pickup_longitude": "float64",
"pickup_latitude": "float64",
"dropoff_longitude": "float64",
"dropoff_latitude": "float64",
}
)
latlong_filtered_df = combined_df[
(combined_df.pickup_longitude <= -73.72)
& (combined_df.pickup_longitude >= -74.09)
& (combined_df.pickup_latitude <= 40.88)
& (combined_df.pickup_latitude >= 40.53)
& (combined_df.dropoff_longitude <= -73.72)
& (combined_df.dropoff_longitude >= -74.72)
& (combined_df.dropoff_latitude <= 40.88)
& (combined_df.dropoff_latitude >= 40.53)
]
latlong_filtered_df.reset_index(inplace=True, drop=True)
# These functions replace undefined values and rename to use meaningful names.
replaced_stfor_vals_df = latlong_filtered_df.replace(
{"store_forward": "0"}, {"store_forward": "N"}
).fillna({"store_forward": "N"})
replaced_distance_vals_df = replaced_stfor_vals_df.replace(
{"distance": ".00"}, {"distance": 0}
).fillna({"distance": 0})
normalized_df = replaced_distance_vals_df.astype({"distance": "float64"})
# These functions transform the renamed data to be used finally for training.
# Split the pickup and dropoff date further into the day of the week, day of the month, and month values.
# To get the day of the week value, use the derive_column_by_example() function.
# The function takes an array parameter of example objects that define the input data,
# and the preferred output. The function automatically determines your preferred transformation.
# For the pickup and dropoff time columns, split the time into the hour, minute, and second by using
# the split_column_by_example() function with no example parameter. After you generate the new features,
# use the drop_columns() function to delete the original fields as the newly generated features are preferred.
# Rename the rest of the fields to use meaningful descriptions.
temp = pd.DatetimeIndex(normalized_df["pickup_datetime"], dtype="datetime64[ns]")
#temp = cudf.DatetimeIndex(normalized_df["pickup_datetime"], dtype="datetime64[ns]")
normalized_df["pickup_date"] = temp.date
normalized_df["pickup_weekday"] = temp.dayofweek
normalized_df["pickup_month"] = temp.month
normalized_df["pickup_monthday"] = temp.day
normalized_df["pickup_time"] = temp.time
normalized_df["pickup_hour"] = temp.hour
normalized_df["pickup_minute"] = temp.minute
normalized_df["pickup_second"] = temp.second
temp = pd.DatetimeIndex(normalized_df["dropoff_datetime"], dtype="datetime64[ns]")
#temp = cudf.DatetimeIndex(normalized_df["dropoff_datetime"], dtype="datetime64[ns]")
normalized_df["dropoff_date"] = temp.date
normalized_df["dropoff_weekday"] = temp.dayofweek
normalized_df["dropoff_month"] = temp.month
normalized_df["dropoff_monthday"] = temp.day
normalized_df["dropoff_time"] = temp.time
normalized_df["dropoff_hour"] = temp.hour
normalized_df["dropoff_minute"] = temp.minute
normalized_df["dropoff_second"] = temp.second
del normalized_df["pickup_datetime"]
del normalized_df["dropoff_datetime"]
normalized_df.reset_index(inplace=True, drop=True)
print(normalized_df.head)
print(normalized_df.dtypes)
# Drop the pickup_date, dropoff_date, pickup_time, dropoff_time columns because they're
# no longer needed (granular time features like hour,
# minute and second are more useful for model training).
del normalized_df["pickup_date"]
del normalized_df["dropoff_date"]
del normalized_df["pickup_time"]
del normalized_df["dropoff_time"]
# Change the store_forward column to binary values
normalized_df["store_forward"] = np.where((normalized_df.store_forward == "N"), 0, 1)
# Before you package the dataset, run two final filters on the dataset.
# To eliminate incorrectly captured data points,
# filter the dataset on records where both the cost and distance variable values are greater than zero.
# This step will significantly improve machine learning model accuracy,
# because data points with a zero cost or distance represent major outliers that throw off prediction accuracy.
final_df = normalized_df[(normalized_df.distance > 0) & (normalized_df.cost > 0)]
final_df.reset_index(inplace=True, drop=True)
print(final_df.head)
# Output data
transformed_data = final_df.to_csv(
(Path(args.transformed_data) / "transformed_data.csv")
)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/RAPIDS/NYC_taxi_with_regression/Transform_component/transform_src/transform.py |
import argparse
from pathlib import Path
from uuid import uuid4
from datetime import datetime
import os
import pandas as pd
import cudf
#from sklearn.linear_model import LinearRegression
from cuml.linear_model import LinearRegression
#from sklearn.model_selection import train_test_split
from cuml.model_selection import train_test_split
import mlflow
mlflow.sklearn.autolog()
parser = argparse.ArgumentParser("train")
parser.add_argument("--training_data", type=str, help="Path to training data")
parser.add_argument("--test_data", type=str, help="Path to test data")
parser.add_argument("--model_output", type=str, help="Path of output model")
parser.add_argument("--test_split_ratio", type=float, help="ratio of train test split")
args = parser.parse_args()
print("hello training world...")
lines = [
f"Training data path: {args.training_data}",
f"Test data path: {args.test_data}",
f"Model output path: {args.model_output}",
f"Test split ratio:{args.test_split_ratio}",
]
for line in lines:
print(line)
print("mounted_path files: ")
arr = os.listdir(args.training_data)
print(arr)
df_list = []
for filename in arr:
print("reading file: %s ..." % filename)
with open(os.path.join(args.training_data, filename), "r") as handle:
# print (handle.read())
#input_df = pd.read_csv((Path(args.training_data) / filename))
input_df = cudf.read_csv((Path(args.training_data) / filename))
df_list.append(input_df)
train_data = df_list[0]
print(train_data.columns)
# Split the data into input(X) and output(y)
y = train_data["cost"]
# X = train_data.drop(['cost'], axis=1)
X = train_data[
[
"distance",
"dropoff_latitude",
"dropoff_longitude",
"passengers",
"pickup_latitude",
"pickup_longitude",
"store_forward",
"vendor",
"pickup_weekday",
"pickup_month",
"pickup_monthday",
"pickup_hour",
"pickup_minute",
"pickup_second",
"dropoff_weekday",
"dropoff_month",
"dropoff_monthday",
"dropoff_hour",
"dropoff_minute",
"dropoff_second",
]
]
# Split the data into train and test sets
trainX, testX, trainy, testy = train_test_split(
X, y, test_size=args.test_split_ratio, random_state=42
)
print(trainX.shape)
print(trainX.columns)
# Train a Linear Regression Model with the train set
model = LinearRegression().fit(trainX, trainy)
print(model.score(trainX, trainy))
mlflow.sklearn.save_model(model, args.model_output)
# test_data = pd.DataFrame(testX, columns = )
testX["cost"] = testy
print(testX.shape)
test_data = testX.to_csv(Path(args.test_data) / "test_data.csv")
| NVIDIA_AI_Enterprise_AzureML-main | src/components/RAPIDS/NYC_taxi_with_regression/Train_component/train_src/train.py |
import argparse
import pandas as pd
import cudf
import os
from pathlib import Path
from sklearn.linear_model import LinearRegression
#from sklearn.metrics import mean_squared_error, r2_score
from cuml.metrics import mean_squared_error, r2_score
import mlflow
mlflow.sklearn.autolog()
parser = argparse.ArgumentParser("score")
parser.add_argument(
"--predictions", type=str, help="Path of predictions and actual data"
)
parser.add_argument("--model", type=str, help="Path to model")
parser.add_argument("--score_report", type=str, help="Path to score report")
args = parser.parse_args()
print("hello scoring world...")
lines = [
f"Model path: {args.model}",
f"Predictions path: {args.predictions}",
f"Scoring output path: {args.score_report}",
]
for line in lines:
print(line)
# Load the test data with predicted values
print("mounted_path files: ")
arr = os.listdir(args.predictions)
print(arr)
df_list = []
for filename in arr:
print("reading file: %s ..." % filename)
with open(os.path.join(args.predictions, filename), "r") as handle:
# print (handle.read())
#input_df = pd.read_csv((Path(args.predictions) / filename))
input_df = cudf.read_csv((Path(args.predictions) / filename))
df_list.append(input_df)
test_data = df_list[0]
# Load the model from input port
model = mlflow.sklearn.load_model(args.model)
# Print the results of scoring the predictions against actual values in the test data
# The coefficients
print("Coefficients: \n", model.coef_)
actuals = test_data["actual_cost"]
predictions = test_data["predicted_cost"]
# The mean squared error
print("Mean squared error: %.2f" % mean_squared_error(actuals, predictions))
# The coefficient of determination: 1 is perfect prediction
print("Coefficient of determination: %.2f" % r2_score(actuals, predictions))
print("Model: ", model)
# Print score report to a text file
(Path(args.score_report) / "score.txt").write_text(
"Scored with the following model:\n{}".format(model)
)
with open((Path(args.score_report) / "score.txt"), "a") as f:
f.write("\n Coefficients: \n %s \n" % str(model.coef_))
f.write("Mean squared error: %.2f \n" % mean_squared_error(actuals, predictions))
f.write("Coefficient of determination: %.2f \n" % r2_score(actuals, predictions))
| NVIDIA_AI_Enterprise_AzureML-main | src/components/RAPIDS/NYC_taxi_with_regression/Score_component/score_src/score.py |
import argparse
import pandas as pd
import cudf
import os
from pathlib import Path
#from sklearn.linear_model import LinearRegression
from cuml.linear_model import LinearRegression
import mlflow
mlflow.sklearn.autolog()
parser = argparse.ArgumentParser("predict")
parser.add_argument("--model_input", type=str, help="Path of input model")
parser.add_argument("--test_data", type=str, help="Path to test data")
parser.add_argument("--predictions", type=str, help="Path of predictions")
args = parser.parse_args()
print("hello scoring world...")
lines = [
f"Model path: {args.model_input}",
f"Test data path: {args.test_data}",
f"Predictions path: {args.predictions}",
]
for line in lines:
print(line)
# Load and split the test data
print("mounted_path files: ")
arr = os.listdir(args.test_data)
print(arr)
#test_data = pd.read_csv(Path(args.test_data) / "test_data.csv")
test_data = cudf.read_csv(Path(args.test_data) / "test_data.csv")
testy = test_data["cost"]
# testX = test_data.drop(['cost'], axis=1)
testX = test_data[
[
"distance",
"dropoff_latitude",
"dropoff_longitude",
"passengers",
"pickup_latitude",
"pickup_longitude",
"store_forward",
"vendor",
"pickup_weekday",
"pickup_month",
"pickup_monthday",
"pickup_hour",
"pickup_minute",
"pickup_second",
"dropoff_weekday",
"dropoff_month",
"dropoff_monthday",
"dropoff_hour",
"dropoff_minute",
"dropoff_second",
]
]
print(testX.shape)
print(testX.columns)
# Load the model from input port
model = mlflow.sklearn.load_model(args.model_input)
# Make predictions on testX data and record them in a column named predicted_cost
predictions = model.predict(testX)
testX["predicted_cost"] = predictions
print(testX.shape)
# Compare predictions to actuals (testy)
#output_data = pd.DataFrame(testX)
output_data = cudf.DataFrame(testX)
output_data["actual_cost"] = testy
# Save the output data with feature columns, predicted cost, and actual cost in csv file
output_data = output_data.to_csv((Path(args.predictions) / "predictions.csv"))
| NVIDIA_AI_Enterprise_AzureML-main | src/components/RAPIDS/NYC_taxi_with_regression/Predict_component/predict_src/predict.py |
import argparse
import json
import logging
import os
import time
import random
import matplotlib.pyplot as plt
import mlflow
import torch
# from torch.utils.tensorboard import SummaryWriter
# MONAI imports
from monai.config import print_config
from monai.data import DataLoader, Dataset, CacheDataset, decollate_batch
from monai.inferers import sliding_window_inference
from monai.losses import DiceLoss
from monai.metrics import DiceMetric
from monai.networks.nets import SegResNet
from monai.transforms import (
Activations, AsDiscrete, Compose, ConcatItemsd, EnsureChannelFirstd, EnsureTyped, LoadImaged, MapTransform, NormalizeIntensityd,
Orientationd, RandFlipd, RandScaleIntensityd, RandShiftIntensityd, RandSpatialCropd, Spacingd,
)
from monai.utils import set_determinism
# Avoid flooding of debug messages in logs
logging.basicConfig(level=logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("azureml").setLevel(logging.WARNING)
logging.getLogger("azure").setLevel(logging.WARNING)
logging.getLogger("azure.core").setLevel(logging.WARNING)
logging.getLogger("azure.mlflow").setLevel(logging.WARNING)
# MONAI config
print_config()
start_run = time.time()
# SET CENTRAL VARIABLES
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str, help="path to input data")
parser.add_argument("--epochs", type=int, default=50, help="no of epochs")
parser.add_argument("--initial_lr", type=float, default=0.0001, help="Initial learning rate")
parser.add_argument("--best_model_name", type=str, default='best-model', help="Name of best model to register in AzureML")
parser.add_argument("--train_batch_size", type=int, default=1, help="Train loader batch size")
parser.add_argument("--val_batch_size", type=int, default=1, help="Validation loader batch size")
parser.add_argument("--model", type=str, default='model', help="Result name of best model to register in AzureML")
args = parser.parse_args()
max_epochs = args.epochs
initial_lr = args.initial_lr
best_model_name = args.best_model_name
train_batch_size = args.train_batch_size
val_batch_size = args.val_batch_size
input_data_dir = args.input_data
print(f'Input folder with images: {input_data_dir}')
# Select logging targets for metrics. tb for tensorboard and/or mlflow
# log_targets = ['tb', 'mlflow']
log_targets = ['mlflow']
# AzureML job asset folder. Will be used to store model checkpoints
azureml_output_folder = './outputs'
datalist_json_path = os.path.join(input_data_dir, 'dataset.json')
VAL_AMP = True # MONAI validation mixed precision
# Distributed training:
rank = int(os.environ["RANK"])
local_rank = int(os.environ["LOCAL_RANK"])
device = torch.device("cuda", local_rank) if torch.cuda.is_available() else 'cpu'
torch.cuda.set_device(device)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
world_size = torch.distributed.get_world_size()
# Init logging for AzureML
# if 'tb' in log_targets:
# tb_writer = SummaryWriter("./tb_logs")
# init mlflow also if not in log_targets because it is needed to register the model
mlflow.autolog(silent=True)
params = {
"Epochs": max_epochs,
"Initial lr" : initial_lr,
"Train batch size" : train_batch_size,
"Validation batch size" : val_batch_size,
"Register best model as" : best_model_name,
"Val_auto_mixed_prec" : VAL_AMP
}
# rank == 0 to let only one GPU worker perform the operation
if rank == 0 and 'mlflow' in log_targets:
try:
mlflow.log_params(params)
except Exception as e:
print('Exception during mlflow parameter logging: {e}')
# Set deterministic training for reproducibility
set_determinism(seed=0)
# Custom transform to convert the multi-classes labels into multi-labels segmentation task in One-Hot format.
class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):
"""
Convert labels to multi channels based on brats 2021 classes:
label 1 necrotic tumor core (NCR)
label 2 peritumoral edematous/invaded tissue
label 3 is not used in the new dataset version
label 4 GD-enhancing tumor
The possible classes are:
TC (Tumor core): merge labels 1 and 4
WT (Whole tumor): merge labels 1,2 and 4
ET (Enhancing tumor): label 4
"""
def __call__(self, data):
d = dict(data)
for key in self.keys:
result = []
# merge label 1 and label 4 to construct TC
result.append(torch.logical_or(d[key] == 1, d[key] == 4))
# merge labels 1, 2 and 4 to construct WT
result.append(
torch.logical_or(
torch.logical_or(d[key] == 1, d[key] == 2), d[key] == 4
)
)
# label 4 is ET
result.append(d[key] == 4)
d[key] = torch.stack(result, axis=0).float()
return d
# Generate training and validation input lists for dataloaders
image_folders = [f for f in os.listdir(input_data_dir) if os.path.isdir(os.path.join(input_data_dir, f))]
print(f'{len(image_folders)} images in image_folders.')
print(image_folders[:5])
train_frac = 0.8
train_size = int(0.8 * len(image_folders))
train_folders = image_folders[:train_size]
val_folders = image_folders[train_size:]
def create_datalist(folders):
elements = []
for folder in folders:
folder_path = os.path.join(input_data_dir, folder)
flair_file = next((f for f in os.listdir(folder_path) if f.endswith('flair.nii.gz')), None)
t1_file = next((f for f in os.listdir(folder_path) if f.endswith('t1.nii.gz')), None)
t1ce_file = next((f for f in os.listdir(folder_path) if f.endswith('t1ce.nii.gz')), None)
t2_file = next((f for f in os.listdir(folder_path) if f.endswith('t2.nii.gz')), None)
label_file = next((f for f in os.listdir(folder_path) if f.endswith('seg.nii.gz')), None)
element = {
'flair' : os.path.join(folder_path, flair_file),
't1' : os.path.join(folder_path, t1_file),
't1ce' : os.path.join(folder_path, t1ce_file),
't2' : os.path.join(folder_path, t2_file),
'label' : os.path.join(folder_path, label_file),
}
elements.append(element)
return elements
train_list = create_datalist(train_folders)
valid_list = create_datalist(val_folders)
print(f'{len(train_list)} training images and {len(valid_list)} validation images found.')
# Setup transforms for training and validation
train_transform = Compose(
[
# load 4 Nifti images and stack them together
LoadImaged(keys=["flair", "t1", "t1ce", "t2", "label"]),
EnsureChannelFirstd(keys=["flair", "t1", "t1ce", "t2"]),
ConcatItemsd(keys=["flair", "t1", "t1ce", "t2"], name="image", dim=0),
EnsureTyped(keys=["image", "label"]),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Orientationd(keys=["image", "label"], axcodes="RAS"),
Spacingd(
keys=["image", "label"],
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
RandSpatialCropd(keys=["image", "label"], roi_size=[224, 224, 144], random_size=False),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=0),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=1),
RandFlipd(keys=["image", "label"], prob=0.5, spatial_axis=2),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
RandScaleIntensityd(keys="image", factors=0.1, prob=1.0),
RandShiftIntensityd(keys="image", offsets=0.1, prob=1.0),
]
)
val_transform = Compose(
[
LoadImaged(keys=["flair", "t1", "t1ce", "t2", "label"]),
EnsureChannelFirstd(keys=["flair", "t1", "t1ce", "t2"]),
ConcatItemsd(keys=["flair", "t1", "t1ce", "t2"], name="image", dim=0),
EnsureTyped(keys=["image", "label"]),
ConvertToMultiChannelBasedOnBratsClassesd(keys="label"),
Orientationd(keys=["image", "label"], axcodes="RAS"),
Spacingd(
keys=["image", "label"],
pixdim=(1.0, 1.0, 1.0),
mode=("bilinear", "nearest"),
),
NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True),
]
)
# Load data
# adjust caching rates in case of out of memory issue
train_ds = Dataset(data=train_list, transform=train_transform)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_ds)
train_loader = DataLoader(train_ds, batch_size= train_batch_size, shuffle=False, num_workers=12, sampler=train_sampler)
val_ds = Dataset(data=valid_list, transform=val_transform)
val_loader = DataLoader(val_ds, batch_size=val_batch_size, shuffle=False, num_workers=12)
params = {
"train_samples": len(train_ds),
"val_samples" : len(val_ds)
}
if rank == 0 and 'mlflow' in log_targets:
try:
mlflow.log_params(params)
except Exception as e:
print('Exception during mlflow parameter logging: {e}')
# log sample images
val_data_example = val_ds[2]
print(f"image shape: {val_data_example['image'].shape}")
fig, axs = plt.subplots(2, 4, figsize=(20, 10))
for i in range(4):
axs[0, i].set_title(f"image channel {i}")
axs[0, i].imshow(val_data_example["image"][i, :, :, 60].detach().cpu(), cmap="gray")
# also visualize the 3 channels label corresponding to this image
print(f"label shape: {val_data_example['label'].shape}")
for i in range(3):
axs[1, i].set_title(f"label channel {i}")
axs[1, i].imshow(val_data_example["label"][i, :, :, 60].detach().cpu())
# add an empty subplot to align the last label image with the others
axs[1, 3].axis('off')
# plt.show()
samples = plt.gcf()
try:
mlflow.log_figure(samples, 'sample-images.png')
except Exception as e:
print('Exception during mlflow image logging: {e}')
# Create model, loss and optimizer
val_interval = 1
# standard PyTorch program style: create SegResNet, DiceLoss and Adam optimizer
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = torch.nn.parallel.DistributedDataParallel(module= SegResNet(
blocks_down=[1, 2, 2, 4],
blocks_up=[1, 1, 1],
init_filters=16,
in_channels=4,
out_channels=3,
dropout_prob=0.2,
).to(device), device_ids=[local_rank])
loss_function = DiceLoss(smooth_nr=0, smooth_dr=1e-5, squared_pred=True, to_onehot_y=False, sigmoid=True)
optimizer = torch.optim.Adam(model.parameters(), initial_lr, weight_decay=1e-5)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=max_epochs)
dice_metric = DiceMetric(include_background=True, reduction="mean")
dice_metric_batch = DiceMetric(include_background=True, reduction="mean_batch")
post_trans = Compose(
[Activations(sigmoid=True), AsDiscrete(threshold=0.5)]
)
# define inference method
def inference(input):
def _compute(input):
return sliding_window_inference(
inputs=input,
roi_size=(240, 240, 160),
sw_batch_size=1,
predictor=model,
overlap=0.5,
)
if VAL_AMP:
with torch.cuda.amp.autocast():
return _compute(input)
else:
return _compute(input)
# use amp to accelerate training
scaler = torch.cuda.amp.GradScaler()
# enable cuDNN benchmark
torch.backends.cudnn.benchmark = True
start_training = time.time()
preprocessing_mins = (start_training - start_run) / 60
if 'mlflow' in log_targets:
try:
mlflow.log_metric("preprocessing_mins", preprocessing_mins, 0)
except Exception as e:
print(f'Exception occured writing train metrics to mlflow: {e}')
# RUN TRAINING
best_metric = -1
best_metric_epoch = -1
best_metrics_epochs_and_time = [[], [], []]
epoch_loss_values = []
metric_values = []
metric_values_tc = []
metric_values_wt = []
metric_values_et = []
total_start = time.time()
for epoch in range(max_epochs):
epoch_start = time.time()
train_sampler.set_epoch(epoch) # parallel training
print("-" * 10)
print(f"epoch {epoch + 1}/{max_epochs}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step_start = time.time()
step += 1
inputs, labels = (
batch_data["image"].to(device),
batch_data["label"].to(device),
)
optimizer.zero_grad()
with torch.cuda.amp.autocast():
outputs = model(inputs)
loss = loss_function(outputs, labels)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
epoch_loss += loss.item()
lr_scheduler.step()
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"Epoch {epoch + 1} average loss: {epoch_loss:.4f}")
# Log epoch train metrics
# if 'tb' in log_targets:
# try:
# tb_writer.add_scalar("train_loss", epoch_loss, epoch+1)
# except Exception as e:
# print(f'Exception occured writing train metrics to tensorboard: {e}')
if 'mlflow' in log_targets:
try:
mlflow.log_metric("train_loss", epoch_loss, epoch+1)
except Exception as e:
print(f'Exception occured writing train metrics to mlflow: {e}')
# VALIDATION
if (epoch + 1) % val_interval == 0:
val_epoch_loss = 0
val_step = 0
model.eval()
with torch.no_grad():
for val_data in val_loader:
val_step += 1
val_inputs, val_labels = (
val_data["image"].to(device),
val_data["label"].to(device),
)
val_outputs = inference(val_inputs)
val_loss = loss_function(val_outputs, val_labels)
val_outputs = [post_trans(i) for i in decollate_batch(val_outputs)]
dice_metric(y_pred=val_outputs, y=val_labels)
dice_metric_batch(y_pred=val_outputs, y=val_labels)
val_epoch_loss += val_loss.item()
val_epoch_loss /= val_step
metric = dice_metric.aggregate().item()
#metric_values.append(metric)
metric_batch = dice_metric_batch.aggregate()
metric_tc = metric_batch[0].item()
#metric_values_tc.append(metric_tc)
metric_wt = metric_batch[1].item()
#metric_values_wt.append(metric_wt)
metric_et = metric_batch[2].item()
#metric_values_et.append(metric_et)
dice_metric.reset()
dice_metric_batch.reset()
# Let one instance 0 save model if validation loss improved
if rank == 0:
if metric > best_metric:
best_metric = metric
best_metric_epoch = epoch + 1
model_checkpoint = os.path.join(azureml_output_folder, "best_metric_model.pth")
torch.save(model.module.state_dict(), model_checkpoint)
print("saved new best metric model")
print(
f"current epoch: {epoch + 1} current mean dice: {metric:.4f}"
f" tc: {metric_tc:.4f} wt: {metric_wt:.4f} et: {metric_et:.4f}"
f"\nbest mean dice: {best_metric:.4f}"
f" at epoch: {best_metric_epoch}"
)
epoch_duration_s = time.time() - epoch_start
# Log epoch val metrics:
epoch_val_metrics = {
'val_loss' : val_epoch_loss,
'val_mean_dice' : metric,
'val_dice_tc' : metric_tc,
'val_dice_wt' : metric_wt,
'val_dice_et' : metric_et,
'epoch_duration_s' : epoch_duration_s
}
# if 'tb' in log_targets:
# try:
# for name, value in epoch_val_metrics.items():
# tb_writer.add_scalar(tag= name, scalar_value= value, global_step= epoch+1)
# except:
# print(f'Exception occured writing validation metrics to tensorboard: {e}')
if 'mlflow' in log_targets:
try:
mlflow.log_metrics(metrics= epoch_val_metrics, step= epoch+1)
except:
print(f'Exception occured writing validation metrics to MLFLow: {e}')
print(f"time consuming of epoch {epoch + 1} is: {epoch_duration_s:.4f}")
total_time_mins = (time.time() - total_start) / 60
if rank == 0:
print(f"Training completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch},"
f"total time: {total_time_mins:.2f} mins ({total_time_mins/max_epochs:.2f} per epoch).")
# Load the best model into memory
model = SegResNet(
blocks_down=[1, 2, 2, 4],
blocks_up=[1, 1, 1],
init_filters=16,
in_channels=4,
out_channels=3,
dropout_prob=0.2,
).to(device)
model.load_state_dict(torch.load(model_checkpoint))
print("Registering the model via MLFlow")
args.model = best_model_name
try:
mlflow.pytorch.log_model(
pytorch_model=model,
registered_model_name= best_model_name,
artifact_path= 'model',
extra_files=[model_checkpoint])
except Exception as e:
print(e)
# End mlflow run and tb writer
if mlflow.active_run():
mlflow.end_run()
# if tb_writer is not None:
# tb_writer.close() | NVIDIA_AI_Enterprise_AzureML-main | src/components/monai/3D_image_segmentation/train_segmentation/src/run.py |
import argparse
import logging
import sys
import os
import shutil
import tarfile
def run(args) -> str:
# extract
with tarfile.open(args.blob_file_location, "r") as tar:
tar.extractall(path=args.image_data_folder)
def get_arg_parser(parser=None):
"""Parse the command line arguments for merge using argparse.
Args:
inputs:
blob_file_location:
type: uri_folder
description: the input blob .tar file location
mode: "ro_mount"
overwrite:
type: boolean
description: overwrire local data
default: true
optional: true
outputs:
image_data_folder:
type: uri_folder
description: the output folder where the uncompressed data will be written
mode: "rw_mount"
mode: "upload"
"""
# add arguments that are specific to the component
if parser is None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--blob_file_location",
type=str,
required=True,
help="Azure blob name",
)
parser.add_argument(
"--overwrite",
type=bool,
required=False,
help="Overwrite files",
)
parser.add_argument(
"--image_data_folder",
type=str,
required=True,
help="Location of results",
)
return parser
def main(cli_args=None):
"""Component main function.
It parses arguments and executes run() with the right arguments.
Args:
cli_args (List[str], optional): list of args to feed script, useful for debugging. Defaults to None.
"""
# build an arg parser
parser = get_arg_parser()
# run the parser on cli args
args = parser.parse_args(cli_args)
logging.info(f"Running script with arguments: {args}")
# args.image_data_folder = run(args)
run(args)
if __name__ == "__main__":
# Set logging to sys.out
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
log_format = logging.Formatter("[%(asctime)s] [%(levelname)s] - %(message)s")
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(log_format)
logger.addHandler(handler)
main() | NVIDIA_AI_Enterprise_AzureML-main | src/components/monai/3D_image_segmentation/upload_from_blob/src/run.py |
import argparse
import os
import time
#os.system('pip install azure-storage-blob')
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Model
#
os.chdir(args.model)
for filename in os.listdir(args.model):
print(filename)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4' or ext == '.mov':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/'+ filename, "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/apps/tao_others/deepstream-bodypose2d-app')
os.system('LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./deepstream-bodypose2d-app 1 ../../../configs/bodypose2d_tao/sample_bodypose2d_model_config.txt 0 0 file:///tmp/' + filename + ' /tmp/'+ filename_no_ext)
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/' + filename_no_ext + '.264',"rb") as f:
blob.upload_blob(f)
print('body2dout.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/faciallandmark/src/run.py |
import argparse
import os
import time
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--model_name", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_model_name path: %s" % args.model_name)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Data Asset
#
os.chdir(args.model)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/input.mp4', "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/apps/tao_others/deepstream-mdx-perception-app')
if args.model_name == 'retail_object_detection_recognition' :
os.system('CUDA_MODULE_LOADING=LAZY LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./deepstream-mdx-perception-app -c ../../../configs/app/retail_object_detection_recognition.txt')
elif args.model_name == 'peoplenet_reidentification':
os.system('CUDA_MODULE_LOADING=LAZY LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./deepstream-mdx-perception-app -c ../../../configs/app/peoplenet_reidentification.txt')
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/apps/tao_others/deepstream-mdx-perception-app/out.mp4',"rb") as f:
blob.upload_blob(f)
print('mdx-perception_out.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/mdx-perception/src/run.py |
import argparse
import os
import time
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--model_name", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_model_name path: %s" % args.model_name)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Data Asset
#
os.chdir(args.model)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4' or ext == '.mov':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/'+ filename, "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/apps/tao_segmentation')
if args.model_name == 'city':
os.system('./ds-tao-segmentation -c ../../configs/citysemsegformer_tao/pgie_citysemsegformer_tao_config.txt -i file:///tmp/' + filename)
elif args.model_name == 'shuffle_people':
os.system('./ds-tao-segmentation -c ../../configs/peopleSemSegNet_tao/shuffle/pgie_peopleSemSegShuffleUnet_tao_config.txt -i file:///tmp/' + filename)
elif args.model_name == 'vanilla_people':
os.system('./ds-tao-segmentation -c ../../configs/peopleSemSegNet_tao/vanilla/pgie_peopleSemSegVanillaUnet_tao_config.txt -i file:///tmp/' + filename)
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/apps/tao_segmentation/out.mp4',"rb") as f:
blob.upload_blob(f)
print('segmentationout.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/ds-tao-segmentation/src/run.py |
import argparse
import os
import time
#os.system('pip install azure-storage-blob')
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Model
#
os.chdir(args.model)
for filename in os.listdir(args.model):
print(filename)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4' or ext == '.mov':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/'+ filename, "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/apps/tao_others/deepstream-bodypose2d-app')
os.system('LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./deepstream-bodypose2d-app 1 ../../../configs/bodypose2d_tao/sample_bodypose2d_model_config.txt 0 0 file:///tmp/' + filename + ' /tmp/'+ filename_no_ext)
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/' + filename_no_ext + '.264',"rb") as f:
blob.upload_blob(f)
print('body2dout.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/bodypose2d/src/run.py |
import argparse
import os
import time
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--model_name", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_model_name path: %s" % args.model_name)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Data Asset
#
os.chdir(args.model)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4' or ext == '.mov':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/'+ filename, "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/apps/tao_detection')
if args.model_name == 'peoplenet_transformer':
# os.system('LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./ds-tao-detection -c ../../../configs/peoplenet_transformer_tao/pgie_peoplenet_transformer_tao_config.txt 1 file:///tmp/' + filename + ' /tmp/'+ filename_no_ext)
os.system('./ds-tao-detection -c ../../configs/peoplenet_transformer_tao/pgie_peoplenet_transformer_tao_config.txt -i file:///tmp/' + filename)
elif args.model_name == 'retail_object_detection':
# os.system('LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./ds-tao-detection 1 file:///tmp/' + filename + ' /tmp/'+ filename_no_ext)
os.system('./ds-tao-detection -c ../../configs/retail_object_detection_tao/pgie_retail_object_detection_100_tao_config.txt -i file:///tmp/' + filename)
elif args.model_name == 'retail_object_detection_binary':
# os.system('LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./ds-tao-detection 1 file:///tmp/' + filename + ' /tmp/'+ filename_no_ext)
os.system('./ds-tao-detection -c ../../configs/retail_object_detection_tao/pgie_retail_object_detection_binary_tao_config.txt -i file:///tmp/' + filename)
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/apps/tao_detection/out.mp4',"rb") as f:
blob.upload_blob(f)
print('hardrateout.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/ds-tao-detection/src/run.py |
import argparse
import os
import time
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Data Asset
#
os.chdir(args.model)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4' or ext == '.mov':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/'+ filename, "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/apps/tao_others/deepstream-gesture-app')
os.system('LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./deepstream-gesture-app 1 3 ../../../configs/bodypose2d_tao/sample_bodypose2d_model_config.txt file:///tmp/' + filename + ' /tmp/'+ filename_no_ext)
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/' + filename_no_ext + '.264',"rb") as f:
blob.upload_blob(f)
print('gestureout.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/gesture/src/run.py |
NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/deepstream-app/src/run.py |
|
import argparse
import os
import time
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Data Asset
#
os.chdir(args.model)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4' or ext == '.mov':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/'+ filename, "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/apps/tao_others/deepstream-gaze-app')
os.system('LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./deepstream-gaze-app 1 ../../../configs/facial_tao/sample_faciallandmarks_config.txt file:///tmp/' + filename + ' /tmp/'+ filename_no_ext)
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/' + filename_no_ext + '.264',"rb") as f:
blob.upload_blob(f)
print('gazeout.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/gaze/src/run.py |
import argparse
import os
import time
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Data Asset
#
os.chdir(args.model)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4' or ext == '.mov':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/'+ filename, "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/deepstream-lpr-app')
os.system('./deepstream-lpr-app 1 1 0 infer /tmp/' + filename + ' /tmp/'+ filename_no_ext)
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/' + filename_no_ext + '.264',"rb") as f:
blob.upload_blob(f)
print('lprout.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/lpr/src/run.py |
import argparse
import os
import time
parser = argparse.ArgumentParser()
parser.add_argument("--input_data", type=str)
parser.add_argument("--conn_str", type=str)
parser.add_argument("--container_name", type=str)
parser.add_argument("--model", type=str)
parser.add_argument("--output_data", type=str)
args = parser.parse_args()
print("sample_input_data path: %s" % args.input_data)
print("sample_conn_str path: %s" % args.conn_str)
print("sample_container_name path: %s" % args.container_name)
print("sample_model path: %s" % args.model)
print("sample_output_data path: %s" % args.output_data)
os.chdir(args.input_data)
os.system('cp -r * /tmp')
os.chdir('/tmp')
from azure.storage.blob import BlobClient
#
# Copy Model from Data Asset
#
os.chdir(args.model)
os.system('cp -r * /tmp/models')
os.chdir('/tmp')
from azure.storage.blob import BlobServiceClient
blob_service_client = BlobServiceClient.from_connection_string(args.conn_str)
container_client = blob_service_client.get_container_client(args.container_name)
while(True):
b = 0
for file in container_client.walk_blobs('inputs/', delimiter='/'):
filename = os.path.basename(file.name)
filename_no_ext = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1].lower()
if ext == '.end':
b = 1
if ext == '.mp4' or ext == '.mov':
# Read input file
try:
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name=file.name)
with open('/tmp/'+ filename, "wb") as f:
f.write(blob.download_blob().readall())
except Exception as e:
print(e)
# Run Deepstream
filename_no_ext = filename_no_ext + '_' + time.strftime("%Y%m%d-%H%M%S")
os.chdir('/tmp/apps/tao_others/deepstream-emotion-app')
os.system('LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/nvidia/deepstream/deepstream/lib/cvcore_libs ./deepstream-emotion-app 1 ../../../configs/facial_tao/sample_faciallandmarks_config.txt file:///tmp/' + filename + ' /tmp/'+ filename_no_ext)
# Copy output to storage blob
blob = BlobClient.from_connection_string(conn_str=args.conn_str,container_name=args.container_name, blob_name= 'outputs/'+ filename_no_ext + '.264')
try:
with open('/tmp/' + filename_no_ext + '.264',"rb") as f:
blob.upload_blob(f)
print('emotionout.264 uploaded to container: ' + args.container_name + ' successfully')
except Exception as e:
print(e)
blob_client = blob_service_client.get_blob_client(container=args.container_name, blob=file.name)
blob_client.delete_blob()
time.sleep(1)
if b == 1:
break
time.sleep(10)
| NVIDIA_AI_Enterprise_AzureML-main | src/components/DeepStream/emotion/src/run.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
"""Script to transform Wider face dataset to kitti format for Facenet tutorial."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import cv2
import numpy as np
def letterbox_image(image, target_size):
"""Resize image preserving aspect ratio using padding.
Args:
image (numpy.ndarray): Input image to be resized
target_size (tuple): Target image dimensions in (H,W,C) format.
Return:
new_image (numpy.ndarray): Output Image post resize.
scale (float): Scale factor of resize.
dx (int): Padding along x dimension to main aspect ratio.
dy (int): Padding along y dimension to main aspect ratio.
"""
iw, ih = image.shape[0:2][::-1]
w, h = target_size[1], target_size[0]
scale = min(float(w)/float(iw), float(h)/float(ih))
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_CUBIC)
new_image = np.zeros(target_size, dtype=np.uint8)
dx = (w-nw)//2
dy = (h-nh)//2
new_image[dy:dy+nh, dx:dx+nw, :] = image
return new_image, scale, dx, dy
def adjust_box_coords(x1, y1, x2, y2, scale, dx, dy, image_height, image_width):
"""Adjust bounding box coordinates based on resize.
Args:
x1 (int): Top left x-coordinate of bounding box before resize.
y1 (int): Top left y-coordinate of bounding box before resize.
x2 (int): Bottom right x-coordinate of bounding box before resize.
y2 (int): Bottom right y-coordinate of bounding box before resize.
scale (int): Scale factor of resize.
dx (int): Padding along x dimension to main aspect ratio.
dy (int): Padding along y dimension to main aspect ratio.
image_height (int): Height of resized image.
image_width (int): Width of resized image.
Return:
x1 (int): Top left x-coordinate of bounding box after resize.
y1 (int): Top left y-coordinate of bounding box after resize.
x2 (int): Bottom right x-coordinate of bounding box after resize.
y2 (int): Bottom right y-coordinate of bounding box after resize.
"""
x1 = (int(dx + x1*scale))
x1 = min(max(x1, 0), image_width)
y1 = (int(dy + y1*scale))
y1 = min(max(y1, 0), image_height)
x2 = (int(dx + x2*scale))
x2 = min(max(x2, 0), image_width)
y2 = (int(dy + y2*scale))
y2 = min(max(y2, 0), image_height)
return x1, y1, x2, y2
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Transform Wider dataset for Facenet tutorial')
parser.add_argument(
"--input_image_dir",
type=str,
required=True,
help="Input directory to Wider dataset images."
)
parser.add_argument(
"--input_label_file",
type=str,
required=True,
help="Input path to Wider dataset labels."
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Ouput directory to TLT dataset."
)
parser.add_argument(
"--image_height",
type=int,
required=True,
help="Height of output image."
)
parser.add_argument(
"--image_width",
type=int,
required=True,
help="Width of output image."
)
parser.add_argument(
"--grayscale",
required=False,
action='store_true',
help='Convert images to grayscale.'
)
return parser.parse_args(args)
def main(args=None):
"""Main function for data preparation."""
args = parse_args(args)
target_img_path = os.path.join(args.output_dir, "images")
target_label_path = os.path.join(args.output_dir, "labels")
target_size = (args.image_height, args.image_width, 3)
if not os.path.exists(target_img_path):
os.makedirs(target_img_path)
else:
print("This script will not run as output image path already exists.")
return
if not os.path.exists(target_label_path):
os.makedirs(target_label_path)
else:
print("This script will not run as output label path already exists.")
return
# read wider ground truth file
fd_gt_file = os.path.join(args.input_label_file)
f = open(fd_gt_file, 'r')
fd_gt = f.readlines()
f.close()
total_cnt = 0
i = 0
image_name = None
while i < len(fd_gt):
line = fd_gt[i].strip()
if "jpg" in line:
# start of new image
total_cnt += 1
image_name = line
image_prefix = image_name.split("/")[-1].split(".")[0]
image_path = os.path.join(args.input_image_dir, line)
if not os.path.exists(image_path):
print("Error reading image, Please check data")
return
# Transform Image
img = cv2.imread(image_path)
new_image, scale, dx, dy = letterbox_image(img, target_size)
if args.grayscale:
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2GRAY)
new_image = np.expand_dims(new_image, axis=-1)
new_image = np.repeat(new_image, 3, axis=-1)
i += 1
num_bbox_in_image = int(fd_gt[i].strip())
i += 1
labels = []
for k in range(num_bbox_in_image):
label = fd_gt[i].strip()
label_parts = label.split(" ")
kitti_output = [0]*15
kitti_output[0] = "face"
kitti_output[2] = label_parts[8]
x1 = int(label_parts[0])
y1 = int(label_parts[1])
x2 = int(label_parts[2]) + x1
y2 = int(label_parts[3]) + y1
x1, y1, x2, y2 = adjust_box_coords(
x1, y1, x2, y2, scale, dx, dy, args.image_height, args.image_width)
kitti_output[4:8] = x1, y1, x2, y2
kitti_output = [str(x) for x in kitti_output]
labels.append(" ".join(kitti_output))
i += 1
if len(labels) != num_bbox_in_image:
print("Error parsing label, skipping")
continue
# save image and label
cv2.imwrite(os.path.join(target_img_path, image_prefix+".png"), new_image)
# save label
with open(os.path.join(target_label_path, image_prefix+".txt"), 'w') as f:
for item in labels:
f.write("%s\n" % item)
elif set(line.split(" ")) == {'0'}:
# no faces in image, continuing
i += 1
else:
print("Error parsing labels, Please check data")
return
print("Total {} samples in dataset".format(total_cnt))
if __name__ == "__main__":
main()
| NVIDIA_AI_Enterprise_AzureML-main | src/components/tao/tools/convert_multiple_streams_wider_to_kitti/src/convert_wider_to_kitti.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
import argparse
import re
import mlflow
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Tool to get parameters and variables from train logfile and upload them as AzureML metrics')
parser.add_argument(
"--logfile",
type=str,
required=True,
help="Full path to train logfile"
)
parser.add_argument(
"--class_list",
type=str,
required=True,
help="Comma separated list of classes."
)
parser.add_argument(
"--num_epochs",
type=int,
required=True,
help="Total Number of Epochs."
)
return parser.parse_args(args)
def buildRegExs(class_list,num_epochs):
epochpattern = 'Epoch (?P<current_epoch>\d+)/{}: loss: (?P<loss>\d+\.\d+) learning rate: (?P<learning_rate>\d+\.\d+) '.format(num_epochs)
meanavgprecpattern = 'Mean average_precision \(in %\): (?P<mean_average_precision>\d+\.\d+)'
class_list = class_list.replace(',','|')
classpattern = '(?P<class>{})\s+\|\s+(?P<class_precision>\d+\.\d+) '.format(class_list)
return epochpattern, meanavgprecpattern, classpattern
def getEpochInfo(epochpattern,line,current_epoch):
matches = re.finditer(epochpattern, line)
for match in matches:
current_epoch = int(match.groupdict()['current_epoch'])
loss = float(match.groupdict()['loss'])
mlflow.log_metric('loss', loss, step=current_epoch)
learning_rate = float(match.groupdict()['learning_rate'])
mlflow.log_metric('learning_rate', learning_rate, step=current_epoch)
return current_epoch
def getClassInfo(classpattern,line,current_epoch):
matches = re.finditer(classpattern, line)
for match in matches:
classs = match.groupdict()['class']
class_precision = float(match.groupdict()['class_precision'])
mlflow.log_metric('{}_precision'.format(classs), class_precision, step=current_epoch+1)
return
def getAveragePrecisionInfo(meanavgprecpattern ,line,current_epoch):
matches = re.finditer(meanavgprecpattern, line)
for match in matches:
mean_average_precision = float(match.groupdict()['mean_average_precision'])
mlflow.log_metric('mean_average_precision', mean_average_precision, step=current_epoch+1)
return
def logMetrics(logfile,class_list,num_epochs):
epochpattern, meanavgprecpattern, classpattern = buildRegExs(class_list,num_epochs)
logfile = open(logfile, 'r')
lines = logfile.readlines()
current_epoch = 0
for line in lines:
current_epoch = getEpochInfo(epochpattern,line,current_epoch)
getAveragePrecisionInfo(meanavgprecpattern,line,current_epoch)
getClassInfo(classpattern,line,current_epoch)
return
def main(args=None):
args = parse_args(args)
logfile = args.logfile
class_list = args.class_list
num_epochs = args.num_epochs
logMetrics(logfile,class_list,num_epochs)
if __name__ == "__main__":
main() | NVIDIA_AI_Enterprise_AzureML-main | src/components/tao/object_detection/detectnet_v2/tao_detectnet_v2_train/src/parse_info.py |
# Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
import argparse
import os
from PIL import Image
import mlflow
def parse_args(args=None):
"""parse the arguments."""
parser = argparse.ArgumentParser(description='Tool to upload images to be previewed on AzureML')
parser.add_argument(
"--image_dir",
type=str,
required=True,
help="Directory to find images to upload."
)
parser.add_argument(
"--num_images",
type=int,
required=True,
help="Number of images to upload."
)
return parser.parse_args(args)
def getImagesFilenames(dirName,numFiles):
listOfFile = os.listdir(dirName)
allFiles = list()
# Iterate over all the entries until enough have been captured
sofar=0
for entry in listOfFile:
prefix = entry.split(".")[1]
# Create full path
if prefix=='png':
if sofar<numFiles:
# Create full path
fullPath = os.path.join(dirName, entry)
print(fullPath)
allFiles.append(fullPath)
sofar = sofar+1
else:
return allFiles
return allFiles
def uploadImages(imagesToUpload):
for image in imagesToUpload:
img = Image.open(image)
filename= image.split("/")[-1]
mlflow.log_image(img, filename)
def main(args=None):
args = parse_args(args)
image_dir = args.image_dir
num_images = args.num_images
if not os.path.exists(image_dir):
print("This provided image directory does not exist.")
return
imagesToUpload = getImagesFilenames(image_dir,num_images)
uploadImages(imagesToUpload)
if __name__ == "__main__":
main()
| NVIDIA_AI_Enterprise_AzureML-main | src/components/tao/object_detection/detectnet_v2/tao_detectnet_v2_inference/src/preview_images.py |
import unittest
import os
# run all tests
if __name__ == '__main__':
loader = unittest.TestLoader()
suite = loader.discover(os.path.dirname(os.path.abspath(__file__)))
runner = unittest.TextTestRunner()
runner.run(suite) | MDL-SDK-master | examples/mdl_python/tests/__init__.py |
import pymdlsdk
import os
class SDK():
neuray: pymdlsdk.INeuray = None
transaction: pymdlsdk.ITransaction = None
def _get_examples_search_path(self):
"""Try to get the example search path or returns 'mdl' sub folder of the current directory if it failed."""
# get the environment variable that is used in all MDL SDK examples
example_sp = os.getenv('MDL_SAMPLES_ROOT')
# fall back to a path relative to this script file
if example_sp == None or not os.path.exists(example_sp):
example_sp = os.path.join(os.path.dirname(os.path.realpath(__file__)))
if example_sp == None or not os.path.exists(example_sp):
example_sp = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
# go down into the mdl folder
example_sp = os.path.join(example_sp, 'mdl')
# fall back to the current folder
if not os.path.exists(example_sp):
example_sp = './mdl'
return os.path.abspath(example_sp)
def load(self, addExampleSearchPath: bool = True, loadImagePlugins: bool = True):
"""Initialize the SDK and get some common interface for basic testing"""
# load neuray
self.neuray = pymdlsdk.load_and_get_ineuray('')
if not self.neuray.is_valid_interface():
raise Exception('Failed to load the MDL SDK.')
# add MDL search paths
with self.neuray.get_api_component(pymdlsdk.IMdl_configuration) as cfg:
# add default search paths
cfg.add_mdl_system_paths()
cfg.add_mdl_user_paths()
# get the example search path that is used for all MDL SDK examples
# falls back to `mdl` in the current working directory
if addExampleSearchPath:
example_sp: str = self.get_examples_search_path()
cfg.add_mdl_path(example_sp)
# Load plugins
if loadImagePlugins:
if not pymdlsdk.load_plugin(self.neuray, 'nv_openimageio'):
raise Exception('Failed to load the \'nv_openimageio\' plugin.')
if not pymdlsdk.load_plugin(self.neuray, 'dds'):
raise Exception('Failed to load the \'dds\' plugin.')
# start neuray
resultCode = self.neuray.start()
if resultCode != 0:
raise Exception('Failed to initialize the SDK. Result code: ' + resultCode)
# create a DB transaction
with self.neuray.get_api_component(pymdlsdk.IDatabase) as database, \
database.get_global_scope() as scope:
self.transaction = scope.create_transaction()
def unload(self, commitTransaction: bool = True):
"""Release all components created in the 'load' function"""
if commitTransaction:
self.transaction.commit()
self.transaction = None
self.neuray = None
pymdlsdk._print_open_handle_statistic()
# Unload the MDL SDK
if not pymdlsdk.unload():
raise Exception('Failed to unload the SDK.')
| MDL-SDK-master | examples/mdl_python/tests/setup.py |
import unittest
import pymdlsdk
from setup import SDK
class Main(unittest.TestCase):
sdk: SDK = None
@classmethod
def setUpClass(self):
print(f"Running tests in {__file__}")
self.sdk = SDK()
self.sdk.load(addExampleSearchPath=False, loadImagePlugins=False)
@classmethod
def tearDownClass(self):
self.sdk.unload()
self.sdk = None
print(f"\nFinished tests in {__file__}\n")
def test_setupIsDone(self):
self.assertIsNotNone(self.sdk)
self.assertIsNotNone(self.sdk.neuray)
self.assertIsNotNone(self.sdk.transaction)
def test_IString(self):
i: pymdlsdk.IInterface = self.sdk.transaction.create("String", 0, None) # using 'create' at least once
self.assertTrue(i.is_valid_interface(), "Invalid IInterface") # returns an IInterface
a: pymdlsdk.IString = i.get_interface(pymdlsdk.IString) # needs casting
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_c_str()
self.assertEqual(value, "")
value = "MDL"
a.set_c_str(value)
value2 = a.get_c_str()
self.assertEqual(value, value2)
def test_ISize(self):
a: pymdlsdk.ISize = self.sdk.transaction.create_as(pymdlsdk.ISize, "Size", 0, None)
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value, 0)
value = 12345
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value, value2)
def test_IFloat32(self):
a: pymdlsdk.IFloat32 = self.sdk.transaction.create_as(pymdlsdk.IFloat32, "Float32")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value, 0)
value = -1.2
a.set_value(value)
value2 = a.get_value()
self.assertAlmostEqual(value, value2) # floating point off by 0.0000000476837158
def test_IUint8(self):
a: pymdlsdk.IUint8 = self.sdk.transaction.create_as(pymdlsdk.IUint8, "Uint8")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value, 0)
value = 234
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value, value2)
def test_IColor(self):
a: pymdlsdk.IColor = self.sdk.transaction.create_as(pymdlsdk.IColor, "Color")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value.r, 0)
self.assertEqual(value.g, 0)
self.assertEqual(value.b, 0)
self.assertEqual(value.a, 0)
value.r = 0.25
value.g = 0.5
value.b = 0.75
value.a = 1.0
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value.r, value2.r)
self.assertEqual(value.g, value2.g)
self.assertEqual(value.b, value2.b)
self.assertEqual(value.a, value2.a)
def test_IColor3(self):
a: pymdlsdk.IColor3 = self.sdk.transaction.create_as(pymdlsdk.IColor3, "Color3")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value.r, 0)
self.assertEqual(value.g, 0)
self.assertEqual(value.b, 0)
self.assertEqual(value.a, 1)
value.r = 0.25
value.g = 0.5
value.b = 0.75
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value.r, value2.r)
self.assertEqual(value.g, value2.g)
self.assertEqual(value.b, value2.b)
def test_IFloat32_3(self):
a: pymdlsdk.IFloat32_3 = self.sdk.transaction.create_as(pymdlsdk.IFloat32_3, "Float32<3>")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value.x, 0)
self.assertEqual(value.y, 0)
self.assertEqual(value.z, 0)
value.x = 1.2
value.y = 3.4
value.z = -5.6
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value.x, value2.x)
self.assertEqual(value.y, value2.y)
self.assertEqual(value.z, value2.z)
def test_IFloat64_2(self):
a: pymdlsdk.IFloat64_2 = self.sdk.transaction.create_as(pymdlsdk.IFloat64_2, "Float64<2>")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value.x, 0)
self.assertEqual(value.y, 0)
value.x = 1.2
value.y = 3.4
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value.x, value2.x)
self.assertEqual(value.y, value2.y)
def test_IBoolean_4(self):
a: pymdlsdk.IBoolean_4 = self.sdk.transaction.create_as(pymdlsdk.IBoolean_4, "Boolean<4>")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertFalse(value.x)
self.assertFalse(value.y)
self.assertFalse(value.z)
self.assertFalse(value.w)
value.x = True
value.y = False
value.z = True
value.w = False
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value.x, value2.x)
self.assertEqual(value.y, value2.y)
self.assertEqual(value.z, value2.z)
self.assertEqual(value.w, value2.w)
def test_ISint32_2_3(self):
a: pymdlsdk.ISint32_2_3 = self.sdk.transaction.create_as(pymdlsdk.ISint32_2_3, "Sint32<2,3>")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value.xx, 0)
self.assertEqual(value.xy, 0)
self.assertEqual(value.xz, 0)
self.assertEqual(value.yx, 0)
self.assertEqual(value.yy, 0)
self.assertEqual(value.yz, 0)
value.xx = -11
value.xy = 12
value.xz = -13
value.yx = 14
value.yy = -15
value.yz = 16
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value.xx, value2.xx)
self.assertEqual(value.xy, value2.xy)
self.assertEqual(value.xz, value2.xz)
self.assertEqual(value.yx, value2.yx)
self.assertEqual(value.yy, value2.yy)
self.assertEqual(value.yz, value2.yz)
def test_IFloat32_3_3(self):
a: pymdlsdk.IFloat32_3_3 = self.sdk.transaction.create_as(pymdlsdk.IFloat32_3_3, "Float32<3,3>")
self.assertTrue(a.is_valid_interface(), "Invalid IInterface")
value = a.get_value()
self.assertEqual(value.xx, 0)
self.assertEqual(value.xy, 0)
self.assertEqual(value.xz, 0)
self.assertEqual(value.yx, 0)
self.assertEqual(value.yy, 0)
self.assertEqual(value.yz, 0)
self.assertEqual(value.zx, 0)
self.assertEqual(value.zy, 0)
self.assertEqual(value.zz, 0)
value.xx = -11.0
value.xy = 12.0
value.xz = -13.0
value.yx = 14.0
value.yy = -15.0
value.yz = 16.0
value.zx = -17.0
value.zy = 18.0
value.zz = -19.0
a.set_value(value)
value2 = a.get_value()
self.assertEqual(value.xx, value2.xx)
self.assertEqual(value.xy, value2.xy)
self.assertEqual(value.xz, value2.xz)
self.assertEqual(value.yx, value2.yx)
self.assertEqual(value.yy, value2.yy)
self.assertEqual(value.yz, value2.yz)
self.assertEqual(value.zx, value2.zx)
self.assertEqual(value.zy, value2.zy)
self.assertEqual(value.zz, value2.zz)
def test_IArray(self):
array: pymdlsdk.IArray = self.sdk.transaction.create_as(pymdlsdk.IArray, "Float32[4]")
self.assertTrue(array.is_valid_interface(), "Invalid IInterface")
self.assertEqual(array.get_length(), 4)
element0: pymdlsdk.IFloat32 = array.get_element_as(pymdlsdk.IFloat32, 0)
element1: pymdlsdk.IFloat32 = array.get_element(1).get_interface(pymdlsdk.IFloat32)
element2: pymdlsdk.IFloat32 = array.get_element(2).get_interface(pymdlsdk.IFloat32)
element3: pymdlsdk.IFloat32 = array.get_element(3).get_interface(pymdlsdk.IFloat32)
self.assertTrue(element0.is_valid_interface(), "Invalid IInterface")
self.assertTrue(element1.is_valid_interface(), "Invalid IInterface")
self.assertTrue(element2.is_valid_interface(), "Invalid IInterface")
self.assertTrue(element3.is_valid_interface(), "Invalid IInterface")
self.assertEqual(element0.get_value(), 0)
self.assertEqual(element1.get_value(), 0)
self.assertEqual(element2.get_value(), 0)
self.assertEqual(element3.get_value(), 0)
element0.set_value(-1.0)
element1.set_value(2.0)
element2.set_value(-3.0)
element3.set_value(-4.0)
array.set_element(0, element0)
array.set_element(1, element1)
array.set_element(2, element2)
array.set_element(3, element3)
self.assertEqual(element0.get_value(), array.get_element(0).get_interface(pymdlsdk.IFloat32).get_value())
self.assertEqual(element1.get_value(), array.get_element_as(pymdlsdk.IFloat32, 1).get_value())
self.assertEqual(element2.get_value(), array.get_element_as(pymdlsdk.IFloat32, 2).get_value())
self.assertEqual(element3.get_value(), array.get_element_as(pymdlsdk.IFloat32, 3).get_value())
def test_IDynamic_array(self):
array: pymdlsdk.IDynamic_array = self.sdk.transaction.create_as(pymdlsdk.IDynamic_array, "String[]")
self.assertTrue(array.is_valid_interface(), "Invalid IInterface")
element0: pymdlsdk.IString = self.sdk.transaction.create_as(pymdlsdk.IString, "String")
element1: pymdlsdk.IString = self.sdk.transaction.create_as(pymdlsdk.IString, "String")
element0.set_c_str("foo")
element1.set_c_str("bar")
array.push_back(element0)
array.push_back(element1)
self.assertEqual(array.get_length(), 2)
self.assertEqual(element0.get_c_str(), array.front().get_interface(pymdlsdk.IString).get_c_str())
self.assertEqual(element0.get_c_str(), array.front_as(pymdlsdk.IString).get_c_str())
self.assertEqual(element1.get_c_str(), array.back().get_interface(pymdlsdk.IString).get_c_str())
self.assertEqual(element1.get_c_str(), array.back_as(pymdlsdk.IString).get_c_str())
self.assertEqual(element0.get_c_str(), array.get_element(0).get_interface(pymdlsdk.IString).get_c_str())
self.assertEqual(element1.get_c_str(), array.get_element_as(pymdlsdk.IString, 1).get_c_str())
array.clear()
self.assertEqual(array.get_length(), 0)
def test_IStructure(self):
tile: pymdlsdk.IStructure = self.sdk.transaction.create_as(pymdlsdk.IStructure, "Uvtile")
self.assertTrue(tile.is_valid_interface(), "Invalid IInterface")
u: pymdlsdk.ISint32 = self.sdk.transaction.create_as(pymdlsdk.ISint32, "Sint32")
u.set_value(1)
self.assertEqual(u.get_value(), 1)
v: pymdlsdk.ISint32 = self.sdk.transaction.create_as(pymdlsdk.ISint32, "Sint32")
v.set_value(2)
self.assertEqual(v.get_value(), 2)
tile.set_value("u", u)
tile.set_value("v", v)
self.assertTrue(tile.has_key("u"))
self.assertTrue(tile.has_key("v"))
self.assertFalse(tile.has_key("not_a_member"))
self.assertEqual(u.get_value(), tile.get_value("u").get_interface(pymdlsdk.ISint32).get_value())
self.assertEqual(v.get_value(), tile.get_value_as(pymdlsdk.ISint32, "v").get_value())
# run all tests of this file
if __name__ == '__main__':
unittest.main() | MDL-SDK-master | examples/mdl_python/tests/test_mi_idata.py |
#*****************************************************************************
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
import sys
import os
import gc
import traceback
import time
# load the binding module
print("About to load the MDL Python Bindings")
import pymdlsdk
print("Loaded the MDL Python Bindings")
#--------------------------------------------------------------------------------------------------
# Utilities
#--------------------------------------------------------------------------------------------------
def get_examples_search_path():
"""Try to get the example search path or returns 'mdl' sub folder of the current directory if it failed."""
# get the environment variable that is used in all MDL SDK examples
example_sp = os.getenv('MDL_SAMPLES_ROOT')
# fall back to a path relative to this script file
if example_sp == None or not os.path.exists(example_sp):
example_sp = os.path.join(os.path.dirname(os.path.realpath(__file__)))
if example_sp == None or not os.path.exists(example_sp):
example_sp = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
# go down into the mdl folder
example_sp = os.path.join(example_sp, 'mdl')
# fall back to the current folder
if not os.path.exists(example_sp):
example_sp = './mdl'
return os.path.abspath(example_sp)
#--------------------------------------------------------------------------------------------------
# MDL Python Example
#--------------------------------------------------------------------------------------------------
def load_module(neuray: pymdlsdk.INeuray, transaction: pymdlsdk.ITransaction, mdl_module_name: str) -> str:
"""Load the module given it's db name"""
with neuray.get_api_component(pymdlsdk.IMdl_impexp_api) as imp_exp, \
neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory, \
mdl_factory.create_execution_context() as context:
if imp_exp.load_module(transaction, mdl_module_name, context) < 0:
return "" # loading failed
with mdl_factory.get_db_module_name(mdl_module_name) as istring:
return istring.get_c_str()
#--------------------------------------------------------------------------------------------------
def get_function_by_name(neuray: pymdlsdk.INeuray, transaction: pymdlsdk.ITransaction, module_db_name: str, mdl_function_name: str) -> str:
"""Access an MDL module and find the first function that matches a given name"""
with transaction.access_as(pymdlsdk.IModule, module_db_name) as mdl_module:
if not mdl_module.is_valid_interface():
return ""
# check if the function name is provided with signature (we don't do overload resolution then)
if mdl_function_name[-1] == ')':
function_db_name: str = module_db_name + "::" + mdl_function_name
with transaction.access_as(pymdlsdk.IFunction_definition, function_db_name) as mdl_function:
if not mdl_function.is_valid_interface():
return ""
else:
return function_db_name
# do overload resolution otherwise and return the first function with that name
# if more than overload is expected this function should be extended to narrow down a unique
# function by specifying parameter types and names. here we simply return the first
with mdl_module.get_function_overloads(mdl_function_name, None) as overloads:
if overloads.get_length() == 0:
return ""
if overloads.get_length() != 1:
print(f"WARNING: the selected function `{mdl_function}` is not unique in module `{mdl_module.get_mdl_name()}`")
with overloads.get_element(0) as element:
with element.get_interface(pymdlsdk.IString) as istring_db_name:
return istring_db_name.get_c_str()
#--------------------------------------------------------------------------------------------------
def run_example(neuray):
# since the MDL SDK has limited database with no scopes and only one transaction
# we access the transaction in the beginning and keep it open
with neuray.get_api_component(pymdlsdk.IDatabase) as database, \
database.get_global_scope() as scope, \
scope.create_transaction() as transaction:
# load the two modules we want to use
core_definitions_module_db_name: str = load_module(neuray, transaction, "::nvidia::core_definitions")
tutorials_module_db_name: str = load_module(neuray, transaction, "::nvidia::sdk_examples::tutorials")
if not core_definitions_module_db_name or not tutorials_module_db_name:
print("Failed to load MDL modules.")
return
# get the function definition names
plastic_db_name: str = get_function_by_name(neuray, transaction, core_definitions_module_db_name, "scratched_plastic_v2")
checker_db_name: str = get_function_by_name(neuray, transaction, tutorials_module_db_name, "color_checker(float,color,color)")
# get the actual function definitions
with transaction.access_as(pymdlsdk.IFunction_definition, plastic_db_name) as plastic_definition, \
transaction.access_as(pymdlsdk.IFunction_definition, checker_db_name) as checker_definition, \
neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory, \
mdl_factory.create_expression_factory(transaction) as ef, \
mdl_factory.create_value_factory(transaction) as vf:
# create the color_checker call
checker_call_db_name = "mdlexample::color_checker"
with ef.create_expression_list() as parameters, \
vf.create_float(5.0) as scale_value, \
ef.create_constant(scale_value) as scale_arg, \
vf.create_color(0.25, 0.5, 0.75) as b_value, \
ef.create_constant(b_value) as b_arg:
# we leave the parameter 'a' at its default value (white)
# add the created constant expressions to the new parameter list
parameters.add_expression("scale", scale_arg)
parameters.add_expression("b", b_arg)
# create a function call and pass the parameter list.
# Values that are not specified stay at their defaults.
# Parameter without default need to be specified (visible in the definitions).
with checker_definition.create_function_call(parameters) as checker_call:
transaction.store(checker_call, checker_call_db_name)
# create the plastic call
plastic_call_db_name = "mdlexample::pastic"
with ef.create_expression_list() as parameters, \
ef.create_call(checker_call_db_name) as call_arg: # connect the checker_call
# add the call expression to the new parameter list and create a function call
parameters.add_expression("diffuse_color", call_arg)
with plastic_definition.create_function_call(parameters) as plastic_call:
transaction.store(plastic_call, plastic_call_db_name)
# access the call
with transaction.access_as(pymdlsdk.IFunction_call, plastic_call_db_name) as plastic_call:
if not plastic_call.is_valid_interface():
return
# from here we can trigger compilation and code generation
# alternatively, we can write out the created material graph as an MDL
# Create the module builder.
# Note, the MDL modules have a `mdl::` prefix and the module name has to be unique
module_name = "mdl::new_module_9339b384_b05c_11ec_b909_0242ac120002"
with neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory, \
mdl_factory.create_execution_context() as execution_context, \
mdl_factory.create_expression_factory(transaction) as ef, \
ef.create_annotation_block() as empty_anno_block:
# create a module builder and add the material as a variant
with mdl_factory.create_module_builder(transaction, module_name, pymdlsdk.MDL_VERSION_1_6, pymdlsdk.MDL_VERSION_LATEST, execution_context) as module_builder:
if not module_builder.is_valid_interface():
print("Error: Failed to create a module builder.")
return
if module_builder.add_variant(
"MyGraphMaterial", # material/function name
plastic_call.get_function_definition(), # prototype
plastic_call.get_arguments(), # default arguments, basically our graph
empty_anno_block, # drop annotations
empty_anno_block, # drop annotations
True, # export the material/function
execution_context) != 0:
print("Error: Failed to add variant to module builder")
for i in range(execution_context.get_messages_count()):
print(execution_context.get_message(i).get_string())
return
# export to file
filename = os.path.join(os.getcwd(), "MyGraphModule.mdl")
with neuray.get_api_component(pymdlsdk.IMdl_impexp_api) as imp_exp:
execution_context.set_option("bundle_resources", True)
if imp_exp.export_module(transaction, module_name, filename, execution_context) != 0:
print(f"Error: Could not export material graph to module '{filename}'")
else:
print(f"Success: exported material graph to module '{filename}'")
# close the transaction
transaction.commit()
#--------------------------------------------------------------------------------------------------
# Entry Point
#--------------------------------------------------------------------------------------------------
def main():
# Get the INeuray interface in a suitable smart pointer that works as context manager
with pymdlsdk.load_and_get_ineuray('') as neuray:
if not neuray.is_valid_interface():
raise Exception('Failed to load the MDL SDK.')
# configuration settings go here
# get the component using a context manager
with neuray.get_api_component(pymdlsdk.IMdl_configuration) as cfg:
# get the example search path that is used for all MDL SDK examples
# falls back to `mdl` in the current working directory
cfg.add_mdl_system_paths()
cfg.add_mdl_user_paths()
example_sp = get_examples_search_path()
cfg.add_mdl_path(example_sp)
# Load the 'nv_openimageio' and 'dds' plug-ins
if not pymdlsdk.load_plugin(neuray, 'nv_openimageio'):
raise Exception('Failed to load the \'nv_openimageio\' plugin.')
if not pymdlsdk.load_plugin(neuray, 'dds'):
raise Exception('Failed to load the \'dds\' plugin.')
# after the configuration is done, start neuray.
resultCode = neuray.start()
if resultCode != 0:
raise Exception('Failed to initialize the SDK. Result code: ' + resultCode)
# the actual example that should be illustrated
run_example(neuray)
# Shutting the MDL SDK down. Again, a return code of 0 indicates success.
resultCode = neuray.shutdown()
if resultCode != 0:
raise Exception('Failed to shutdown the SDK. Result code: ' + resultCode)
# Unload the MDL SDK
if not pymdlsdk.unload():
raise Exception('Failed to unload the SDK.')
print('Unloaded the MDL SDK.')
if __name__ == "__main__":
try:
# optional binding debugging:
# some more simple memory debugging output
# pymdlsdk._enable_print_ref_counts(False)
main()
# optional binding debugging:
# some more simple memory debugging output
# unreachable = gc.collect()
# print("\n%d unreachable objects detected during garbage collection.\n" % unreachable)
# pymdlsdk._print_open_handle_statistic()
# sleep to be able to read the output when starting from VS
time.sleep(2.0)
except Exception as e:
print("Unexpected error: ", sys.exc_info()[0])
print("Exception: ", e)
print("Traceback: ", traceback.format_exc())
input("\nPress Enter to continue...")
| MDL-SDK-master | examples/mdl_python/create_expression_graph/example_create_expression_graph.py |
#*****************************************************************************
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
import sys
import os
import gc
import traceback
import time
print("Current working directory: " + os.getcwd())
print("\n")
# load the binding module
print("About to load the MDL Python Bindings")
# Note, if that import fails. Make sure you have selected the same python runtime version as
# you have used while building the binding.
import pymdlsdk
print("Loaded the MDL Python Bindings")
#--------------------------------------------------------------------------------------------------
# Utilities
#--------------------------------------------------------------------------------------------------
def get_examples_search_path():
"""Try to get the example search path or returns 'mdl' sub folder of the current directory if it failed."""
# get the environment variable that is used in all MDL SDK examples
example_sp = os.getenv('MDL_SAMPLES_ROOT')
# fall back to a path relative to this script file
if example_sp == None or not os.path.exists(example_sp):
example_sp = os.path.join(os.path.dirname(os.path.realpath(__file__)))
if example_sp == None or not os.path.exists(example_sp):
example_sp = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
# go down into the mdl folder
example_sp = os.path.join(example_sp, 'mdl')
# fall back to the current folder
if not os.path.exists(example_sp):
example_sp = './mdl'
return os.path.abspath(example_sp)
#--------------------------------------------------------------------------------------------------
# MDL Python Example
#--------------------------------------------------------------------------------------------------
def inspect_definition(neuray, transaction, function_db_name):
# Here, we don't use the `with`-blocks in order see what happens
# without the `with`s or manual releases the objects are to be disposed by the garbage collection
# assuming that for instance the `function_def` object is disposed when leaving the
# current function as python ref-counting should be able to see that there is no variable that
# points to the object anymore.
# access the module by querying it from the DB
function_def = transaction.access_as(pymdlsdk.IFunction_definition, function_db_name)
if not function_def.is_valid_interface():
return
print("\nThe Functions is a material: %s" % ("true" if function_def.is_material() else "false"))
# get some factories that allow to dump values and types as strings to the console
mdl_factory = neuray.get_api_component(pymdlsdk.IMdl_factory)
expression_factory = mdl_factory.create_expression_factory(transaction)
# iterate over parameters and get some info on them
param_count = function_def.get_parameter_count()
types = function_def.get_parameter_types()
defaults = function_def.get_defaults()
function_annotations = function_def.get_annotations() # not used here
parameter_annotations = function_def.get_parameter_annotations()
print("\nThe Functions has %d parameters:" % param_count)
for i in range(param_count):
# get parameter name
param_name = function_def.get_parameter_name(i)
print("* Name: %s" % param_name)
# get the parameter type
param_type = types.get_type(param_name).skip_all_type_aliases()
kind = param_type.get_kind()
print(" Type Kind: %s" % kind)
# get annotations for this parameter
anno_block = parameter_annotations.get_annotation_block(param_name)
# check if the block is valid, if not it means there are no annotations for this parameter
if anno_block.is_valid_interface():
print(" Parameter has %d Annotations" % anno_block.get_size())
# loop over the annotations, same schema as always, get name, use the name access details
for a in range(anno_block.get_size()):
anno = anno_block.get_annotation(a)
print(" + Name: %s" % anno.get_name())
anno_arguments = anno.get_arguments()
# not every annotation has arguments, in that case the object is invalid
if anno_arguments.is_valid_interface():
# loop over the arguments, get name, get details
for p in range(anno_arguments.get_size()):
anno_arg_name = anno_arguments.get_name(p)
print(" - Argument: %s" % anno_arg_name)
anno_arg_expr = anno_arguments.get_expression(anno_arg_name)
# Assuming constants here
if anno_arg_expr.get_kind() == pymdlsdk.IExpression.Kind.EK_CONSTANT:
anno_arg_value = anno_arg_expr.get_interface(pymdlsdk.IExpression_constant).get_value()
anno_arg_value_kind = anno_arg_value.get_kind()
print(" Value Kind: %s" % anno_arg_value_kind)
if anno_arg_value_kind == pymdlsdk.IValue.Kind.VK_FLOAT:
v = anno_arg_value.get_interface(pymdlsdk.IValue_float).get_value()
print(" Value: %f" % v)
# get the default value
# Note, compared to C++ API we can return the specialized value directly so
# manual casting based on the kind is not required, handling based on the kind is of course
param_default = defaults.get_expression(param_name)
if param_default.is_valid_interface():
# the simplest default value is a constant value
expr_kind = param_default.get_kind()
print(" Default Expression Kind: %s" % expr_kind)
if expr_kind == pymdlsdk.IExpression.Kind.EK_CONSTANT:
param_default_value = param_default.get_interface(pymdlsdk.IExpression_constant).get_value()
param_default_value_kind = param_default_value.get_kind()
# Note, calling get_interface() is not required here, just like for the expressions
print(" Default Value Kind: %s" % param_default_value_kind)
dump = False
if param_default_value_kind == pymdlsdk.IValue.Kind.VK_BOOL:
print(" Value: %s" % ("true" if param_default_value.get_interface(pymdlsdk.IValue_bool).get_value() else "false"))
elif param_default_value_kind == pymdlsdk.IValue.Kind.VK_INT:
print(" Value: %d" % param_default_value.get_interface(pymdlsdk.IValue_int).get_value())
# ...
elif param_default_value_kind == pymdlsdk.IValue.Kind.VK_FLOAT:
print(" Value: %f" % param_default_value.get_interface(pymdlsdk.IValue_float).get_value())
elif param_default_value_kind == pymdlsdk.IValue.Kind.VK_DOUBLE:
print(" Value: %f" % param_default_value.get_interface(pymdlsdk.IValue_double).get_value())
# ...
elif param_default_value_kind == pymdlsdk.IValue.Kind.VK_COLOR:
param_default_value_color = param_default_value.get_interface(pymdlsdk.IValue_color)
r = param_default_value_color.get_value(0).get_interface(pymdlsdk.IValue_float).get_value()
g = param_default_value_color.get_value(1).get_interface(pymdlsdk.IValue_float).get_value()
b = param_default_value_color.get_value(2).get_interface(pymdlsdk.IValue_float).get_value()
print(" Value: %f / %f / %f" % (r, g, b))
# ...
else:
dump = True
# if the default value is a function call we get a database name of that call
elif expr_kind == pymdlsdk.IExpression.Kind.EK_CALL:
function_call_db_name = param_default.get_interface(pymdlsdk.IExpression_call).get_call()
print(" Default Attached Call: %s" % function_call_db_name)
if dump:
# Note, this dumping of defaults here is very primitive
# There are more sophisticated ways to deal with parameters, defaults and annotations to come
default_text = expression_factory.dump(param_default, None, 1)
print(" Default (dump): %s" % default_text.get_c_str())
else:
print(" Default: (None)")
#--------------------------------------------------------------------------------------------------
def inspect_module(neuray, transaction, module_db_name):
# access the module by querying it from the DB
# also get some factories that allow to dump values and types as strings to the console
with transaction.access_as(pymdlsdk.IModule, module_db_name) as module, \
neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory, \
mdl_factory.create_type_factory(transaction) as type_factory, \
mdl_factory.create_value_factory(transaction) as value_factory:
if not module.is_valid_interface():
# little caveat here, there is no break in a 'with'-block (language short coming?)
return # so return will exit the `inspect_module` function
print("MDL Module filename: %s" % module.get_filename())
print("\nThe Module imports the following %d modules:" % module.get_import_count())
for i in range(module.get_import_count()):
print("* %s" % module.get_import(i))
# Dump exported types.
with module.get_types() as types:
print("\nThe module contains the following %d types:" % types.get_size())
for i in range(types.get_size()):
with types.get_type(i) as ttype:
with type_factory.dump(ttype, 1) as istring:
print("* %s" % istring.get_c_str())
# Dump exported constants.
with module.get_constants() as constants:
print("\nThe module contains the following %d constants:" % constants.get_size())
for i in range(constants.get_size()):
name = constants.get_name(i)
with constants.get_value(i) as constant:
with value_factory.dump(constant, None, 1) as result:
print("* {} = {}".format(name, result.get_c_str()))
# Dump annotation definitions of the module.
print("\nThe module contains the following %d annotations:" % module.get_annotation_definition_count())
for i in range(module.get_annotation_definition_count()):
with module.get_annotation_definition(i) as anno_def:
print("* %s" % anno_def.get_mdl_simple_name())
print(" MDL Name: %s" % anno_def.get_name())
for p in range(anno_def.get_parameter_count()):
if p == 0:
print(" Parameters:")
print(" - '{}' of type '{}'".format(anno_def.get_parameter_name(p), anno_def.get_mdl_parameter_type_name(p)))
# Dump function definitions of the module.
print("\nThe module contains the following %d function definitions:" % module.get_function_count())
for i in range(module.get_function_count()):
print("* %s" % module.get_function(i))
# Dump material definitions of the module.
print("\nThe module contains the following %d material definitions:" % module.get_material_count())
for i in range(module.get_material_count()):
matdbname = module.get_material(i)
material_def = transaction.access_as(pymdlsdk.IFunction_definition, matdbname)
simple_name = material_def.get_mdl_simple_name()
print("* {}: {}".format(simple_name, matdbname))
#--------------------------------------------------------------------------------------------------
def get_db_module_name(neuray, module_mdl_name):
"""Return the db name of the given module."""
# When the module is loaded we can access it and all its definitions by accessing the DB
# for that we need to get a the database name of the module using the factory
with neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory:
with mdl_factory.get_db_module_name(module_mdl_name) as istring:
module_db_name = istring.get_c_str() # note, even though this name is simple and could
# be constructed by string operations, use the
# factory to be save in case of unicode encodings
# and potential upcoming changes in the future
# shortcut for the function above
# this chaining is a bit special. In the C++ interface it's not possible without
# leaking memory. Here we create the smart pointer automatically. However, the IString
# which is created temporary here is released at the end the `load_module` function, right?
# This might be unexpected, especially when we rely on the RAII pattern and that
# objects are disposed at certain points in time (usually before committing a transaction)
module_db_name_2 = mdl_factory.get_db_module_name(module_mdl_name).get_c_str()
# note, we plan to map compatible types to python. E.g. the IString class may disappear
return module_db_name_2
#--------------------------------------------------------------------------------------------------
def get_db_definition_name(neuray, function_mdl_name):
"""Return the db name of the given function definition."""
with neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory:
with mdl_factory.get_db_definition_name(function_mdl_name) as istring:
return istring.get_c_str()
#--------------------------------------------------------------------------------------------------
def load_module(neuray, transaction, module_mdl_name):
"""Load the module given its name.
Returns true if the module is loaded to database"""
with neuray.get_api_component(pymdlsdk.IMdl_impexp_api) as imp_exp:
with neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory:
# for illustration, we don't use a `with` block for the `context`, instead we release manually
context = mdl_factory.create_execution_context()
res = imp_exp.load_module(transaction, module_mdl_name, context)
context.release() # same as: context = None
return res >= 0
#--------------------------------------------------------------------------------------------------
def run_example(neuray):
# since the MDL SDK has limited database with no scopes and only one transaction
# we access the transaction in the beginning and keep it open
with neuray.get_api_component(pymdlsdk.IDatabase) as database:
with database.get_global_scope() as scope:
# this transaction is not used in a `with`-block so it lives in the function scope
# see the end of the function for the `release()` function
transaction = scope.create_transaction()
print("Expectation: 'transaction' handle is not freed and still valid. [Actually: is valid = {}]".format(transaction.is_valid_interface()))
# load an MDL module, wrapped in a function
# the module is loaded using it's MDL name
# in OV the module would be loaded on the C++ side (rtx.neuraylib.plugin)
module_mdl_name = "::nvidia::sdk_examples::tutorials"
if load_module(neuray, transaction, module_mdl_name):
# When the module is loaded we can access it and all its definitions by accessing the DB
module_db_name = get_db_module_name(neuray, module_mdl_name)
print("\nMDL Module name: %s" % module_mdl_name)
print("MDL Module DB name: %s" % module_db_name)
# after loading was successful, we can inspect materials, functions and other stuff
inspect_module(neuray, transaction, module_db_name)
# lets look at a definition
# lets go for a functions, soon material definitions will behave exactly like functions
# function_mdl_name = module_mdl_name + "::example_function(color,float)"
function_mdl_name = module_mdl_name + "::example_df(float,float,color,float,color,float,float,float,float,::nvidia::sdk_examples::tutorials::Options,color,float,string,texture_2d)"
function_db_name = get_db_definition_name(neuray, function_mdl_name)
print("\nMDL Function name: %s" % function_mdl_name)
print("MDL Function DB name: %s" % function_db_name)
inspect_definition(neuray, transaction, function_db_name)
print("Inspection Done")
# load an invalid MDL module
if load_module(neuray, transaction, "::invalid"):
# this will not be reached...
inspect_module(neuray, transaction, "::invalid")
# changes made with this transaction are committed (here currently we did not edit anything)
# if that is not done, you will get a warning when the transaction is released
# Note, we run garbage collection here first in order to release MDL objects that have not been
# freed after a `with` or released manually. For instance the `function_def` in `inspect_definition`
# seems to still live, especially when a debugger is attached.
# Question would be if we should run `gc.collect()` internally before committing a transaction
# (or calling pymdlsdk.shutdown) or if that is very bad practice.
gc.collect()
transaction.commit()
# this manually releases the transaction. It has the same effect as the __exit__ function that
# is called at the end of a `with`-block
# if release is not called, python would clean up the object when all references to the object
# go out of scope, the GC finds out the object is not used anymore or when the application exits
# transaction.release()
# this behaves like calling release()
transaction = None
#--------------------------------------------------------------------------------------------------
# Entry Point
#--------------------------------------------------------------------------------------------------
def main():
# Get the INeuray interface in a suitable smart pointer that works as context manager
with pymdlsdk.load_and_get_ineuray('') as neuray:
if not neuray.is_valid_interface():
raise Exception('Failed to load the MDL SDK.')
# configuration settings go here
# get the component using a context manager
with neuray.get_api_component(pymdlsdk.IMdl_configuration) as cfg:
# get the example search path that is used for all MDL SDK examples
# falls back to `mdl` in the current working directory
example_sp = get_examples_search_path()
cfg.add_mdl_path(example_sp)
# Load the 'nv_openimageio' and 'dds' plug-ins
if not pymdlsdk.load_plugin(neuray, 'nv_openimageio'):
raise Exception('Failed to load the \'nv_openimageio\' plugin.')
if not pymdlsdk.load_plugin(neuray, 'dds'):
raise Exception('Failed to load the \'dds\' plugin.')
# after the configuration is done, start neuray.
resultCode = neuray.start()
if resultCode != 0:
raise Exception('Failed to initialize the SDK. Result code: ' + resultCode)
# the actual example that should be illustrated
run_example(neuray)
# Shutting the MDL SDK down. Again, a return code of 0 indicates success.
resultCode = neuray.shutdown()
if resultCode != 0:
raise Exception('Failed to shutdown the SDK. Result code: ' + resultCode)
# Unload the MDL SDK
if not pymdlsdk.unload():
raise Exception('Failed to unload the SDK.')
print('Unloaded the MDL SDK.')
if __name__ == "__main__":
try:
# optional binding debugging:
# some more simple memory debugging output
# pymdlsdk._enable_print_ref_counts(False)
main()
# optional binding debugging:
# some more simple memory debugging output
# unreachable = gc.collect()
# print("\n%d unreachable objects detected during garbage collection.\n" % unreachable)
# pymdlsdk._print_open_handle_statistic()
# sleep to be able to read the output when starting from VS
time.sleep(2.0)
except Exception as e:
print("Unexpected error: ", sys.exc_info()[0])
print("Exception: ", e)
print("Traceback: ", traceback.format_exc())
input("\nPress Enter to continue...")
| MDL-SDK-master | examples/mdl_python/modules/example_modules.py |
#*****************************************************************************
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
import sys
import os
import gc
import traceback
import time
print("Current working directory: " + os.getcwd())
print("\n")
# load the binding module
print("About to load the MDL Python Bindings")
# Note, if that import fails. Make sure you have selected the same python runtime version as
# you have used while building the binding.
import pymdlsdk
print("Loaded the MDL Python Bindings")
#--------------------------------------------------------------------------------------------------
# Entry Point
#--------------------------------------------------------------------------------------------------
def main():
# Get the INeuray interface in a suitable smart pointer.
neuray = pymdlsdk.load_and_get_ineuray('')
if not neuray.is_valid_interface():
raise Exception('Failed to load the MDL SDK.')
print('Loaded the MDL SDK.')
# configuration settings go here, none in this example
# after the configuration is done, start neuray.
resultCode = neuray.start()
if resultCode != 0:
raise Exception('Failed to initialize the SDK. Result code: ' + resultCode)
print('Started the MDL SDK. Status: ' + str(neuray.get_status()))
# scene graph manipulations and rendering calls go here, none in this example.
# ...
# Shutting the MDL SDK down. Again, a return code of 0 indicates success.
resultCode = neuray.shutdown()
if resultCode != 0:
raise Exception('Failed to shutdown the SDK. Result code: ' + resultCode)
print('Shutdown the MDL SDK.')
# make sure the object is disposed.
neuray = None
# alternatively, neuray as well as other pymdlsdk objects can be created using a context
# manager by wrapping them in with-statements, e.g.:
#
# with pymdlsdk.INeuray.load_and_get_ineuray() as neuray:
# # do something with neuray
# before shutting down, we might have objects not collected yet by the garbage collection
# this can lead to crashes as shutdown can potentially make C++ objects invalid without
# Python noticing it. Using context managers or setting objects to 'None' should make
# this unnecessary.
# gc.collect()
# Unload the MDL SDK
if not pymdlsdk.unload():
raise Exception('Failed to unload the SDK.')
print('Unloaded the MDL SDK.')
if __name__ == "__main__":
try:
# optional binding debugging:
# some more simple memory debugging output
# pymdlsdk._enable_print_ref_counts(False)
main()
# optional binding debugging:
# some more simple memory debugging output
# unreachable = gc.collect()
# print("\n%d unreachable objects detected during garbage collection.\n" % unreachable)
# pymdlsdk._print_open_handle_statistic()
# sleep to be able to read the output when starting from VS
time.sleep(2.0)
except Exception as e:
print("Unexpected error: ", sys.exc_info()[0])
print("Exception: ", e)
print("Traceback: ", traceback.format_exc())
input("\nPress Enter to continue...")
| MDL-SDK-master | examples/mdl_python/start_shutdown/example_start_shutdown.py |
#*****************************************************************************
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
import sys
import os
import gc
import traceback
import time
print("Current working directory: " + os.getcwd())
print("\n")
# load the binding module
print("About to load the MDL Python Bindings")
# Note, if that import fails. Make sure you have selected the same python runtime version as
# you have used while building the binding.
import pymdlsdk
import pymdl
print("Loaded the MDL Python Bindings")
#--------------------------------------------------------------------------------------------------
# Utilities
#--------------------------------------------------------------------------------------------------
def get_examples_search_path():
"""Try to get the example search path or returns 'mdl' sub folder of the current directory if it failed."""
# get the environment variable that is used in all MDL SDK examples
example_sp = os.getenv('MDL_SAMPLES_ROOT')
# fall back to a path relative to this script file
if example_sp == None or not os.path.exists(example_sp):
example_sp = os.path.join(os.path.dirname(os.path.realpath(__file__)))
if example_sp == None or not os.path.exists(example_sp):
example_sp = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..')
# go down into the mdl folder
example_sp = os.path.join(example_sp, 'mdl')
# fall back to the current folder
if not os.path.exists(example_sp):
example_sp = './mdl'
return os.path.abspath(example_sp)
#--------------------------------------------------------------------------------------------------
# MDL Python Example
#--------------------------------------------------------------------------------------------------
def get_db_module_name(neuray, module_mdl_name):
"""Return the db name of the given module."""
# When the module is loaded we can access it and all its definitions by accessing the DB
# for that we need to get a the database name of the module using the factory
with neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory:
with mdl_factory.get_db_module_name(module_mdl_name) as istring:
module_db_name = istring.get_c_str() # note, even though this name is simple and could
# be constructed by string operations, use the
# factory to be save in case of unicode encodings
# and potential upcoming changes in the future
# shortcut for the function above
# this chaining is a bit special. In the C++ interface it's not possible without
# leaking memory. Here we create the smart pointer automatically. However, the IString
# which is created temporary here is released at the end the `load_module` function, right?
# This might be unexpected, especially when we rely on the RAII pattern and that
# objects are disposed at certain points in time (usually before committing a transaction)
module_db_name_2 = mdl_factory.get_db_module_name(module_mdl_name).get_c_str()
# note, we plan to map compatible types to python. E.g. the IString class may disappear
return module_db_name_2
#--------------------------------------------------------------------------------------------------
def get_db_definition_name(neuray, function_mdl_name):
"""Return the db name of the given function definition."""
with neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory:
with mdl_factory.get_db_definition_name(function_mdl_name) as istring:
return istring.get_c_str()
#--------------------------------------------------------------------------------------------------
def load_module(neuray, transaction, module_mdl_name):
"""Load the module given its name.
Returns true if the module is loaded to database"""
with neuray.get_api_component(pymdlsdk.IMdl_impexp_api) as imp_exp:
with neuray.get_api_component(pymdlsdk.IMdl_factory) as mdl_factory:
# for illustration, we don't use a `with` block for the `context`, instead we release manually
context = mdl_factory.create_execution_context()
res = imp_exp.load_module(transaction, module_mdl_name, context)
context.release() # same as: context = None
return res >= 0
#--------------------------------------------------------------------------------------------------
def run_example(neuray):
# since the MDL SDK has limited database with no scopes and only one transaction
# we access the transaction in the beginning and keep it open
with neuray.get_api_component(pymdlsdk.IDatabase) as database, \
database.get_global_scope() as scope, \
scope.create_transaction() as transaction:
# load an MDL module, wrapped in a function
# the module is loaded using it's MDL name
# in OV the module would be loaded on the C++ side (rtx.neuraylib.plugin)
module_mdl_name = "::nvidia::sdk_examples::tutorials"
if load_module(neuray, transaction, module_mdl_name):
# When the module is loaded we can access it and all its definitions by accessing the DB
module_db_name = get_db_module_name(neuray, module_mdl_name)
print("\nMDL Module name: %s" % module_mdl_name)
print("MDL Module DB name: %s" % module_db_name)
# when the module is loaded we can use the high-level python binding to inspect the module
module: pymdl.Module = pymdl.Module._fetchFromDb(transaction, module_db_name)
# inspect the module
if module:
print(f" filename: {module.filename}")
print(f" dbName: {module.dbName}")
print(f" mdlName: {module.mdlName}")
print(f" mdlSimpleName: {module.mdlSimpleName}")
print(f" types:")
for type in module.types: # exported types are structs or enums
print(f" - type name: {type.name}")
print(f" kind: {type.kind}")
if type.kind == pymdlsdk.IType.Kind.TK_ENUM:
print(f" enum values: {type.enumValues}")
print(f" functions:")
for funcName in module.functions:
print(f" * simple name: {funcName}")
overloads = module.functions[funcName]
for func in overloads:
print(f" - name with signature (overload): {funcName}{func.parameterTypeNames}")
matOrFunc = "Material" if func.isMaterial else "Function"
print(f" material or function: {matOrFunc}")
print(f" isExported: {func.isExported}")
print(f" semantics: {func.semantics}")
print(f" returns: ")
print(f" kind: {func.returnValue.type.kind}")
print(f" type name: {func.returnValue.type.name}")
print(f" uniform: {func.returnValue.type.uniform}")
print(f" varying: {func.returnValue.type.varying}")
print(f" parameters:")
for param in func.parameters.items():
argument = param[1]
print(f" [{param[0]}] parameter name tuple: {func.parameterTypeNames}")
print(f" kind: {argument.type.kind}")
print(f" type name: {argument.type.name}")
print(f" uniform: {argument.type.uniform}")
print(f" varying: {argument.type.varying}")
continue ### ONLY TO REDUCE THE AMOUNT OF OUTPUT, feel free to remove the 'continue'
# Annotations
if func.annotations:
print(f" Annotations:")
anno: pymdl.Annotation
for anno in func.annotations:
print(f" - Simple Name: {anno.simpleName}")
print(f" Qualified Name: {anno.name}")
arg: pymdl.Argument
for arg_name, arg in anno.arguments.items():
print(f" ({arg.type.kind}) {arg_name}: {arg.value}")
# close transaction before destroying it
transaction.commit()
#--------------------------------------------------------------------------------------------------
# Entry Point
#--------------------------------------------------------------------------------------------------
def main():
# Get the INeuray interface in a suitable smart pointer that works as context manager
with pymdlsdk.load_and_get_ineuray('') as neuray:
if not neuray.is_valid_interface():
raise Exception('Failed to load the MDL SDK.')
# configuration settings go here
# get the component using a context manager
with neuray.get_api_component(pymdlsdk.IMdl_configuration) as cfg:
# add default search paths
cfg.add_mdl_system_paths()
cfg.add_mdl_user_paths()
# get the example search path that is used for all MDL SDK examples
# falls back to `mdl` in the current working directory
example_sp = get_examples_search_path()
cfg.add_mdl_path(example_sp)
# Load the 'nv_openimageio' and 'dds' plug-ins
if not pymdlsdk.load_plugin(neuray, 'nv_openimageio'):
raise Exception('Failed to load the \'nv_openimageio\' plugin.')
if not pymdlsdk.load_plugin(neuray, 'dds'):
raise Exception('Failed to load the \'dds\' plugin.')
# after the configuration is done, start neuray.
resultCode = neuray.start()
if resultCode != 0:
raise Exception('Failed to initialize the SDK. Result code: ' + resultCode)
# the actual example that should be illustrated
run_example(neuray)
# Shutting the MDL SDK down. Again, a return code of 0 indicates success.
resultCode = neuray.shutdown()
if resultCode != 0:
raise Exception('Failed to shutdown the SDK. Result code: ' + resultCode)
# Unload the MDL SDK
if not pymdlsdk.unload():
raise Exception('Failed to unload the SDK.')
print('Unloaded the MDL SDK.')
if __name__ == "__main__":
try:
# optional binding debugging:
# some more simple memory debugging output
# pymdlsdk._enable_print_ref_counts(True)
main()
# optional binding debugging:
# some more simple memory debugging output
unreachable = gc.collect()
print("\n%d unreachable objects detected during garbage collection.\n" % unreachable)
# pymdlsdk._print_open_handle_statistic()
# sleep to be able to read the output when starting from VS
time.sleep(2.0)
except Exception as e:
print("Unexpected error: ", sys.exc_info()[0])
print("Exception: ", e)
print("Traceback: ", traceback.format_exc())
input("\nPress Enter to continue...")
| MDL-SDK-master | examples/mdl_python/pymdl_inspection/example_pymdl_inspection.py |
#!/usr/bin/env python3
#*****************************************************************************
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
'''Remove font-related CSS directives such that the directives in
doxygen_custom_stylesheet_1_9_4.css can take effect.'''
import os
import re
import sys
for filename in 'html/doxygen.css html/tabs.css html/navtree.css'.split():
if os.path.exists(filename):
buffer = open(filename, 'r', encoding="utf-8").readlines()
text = ''.join(buffer)
text = re.compile('font-family:.*?;', re.S|re.M).sub('', text)
text = re.compile('font-size:.*?;', re.S|re.M).sub('', text)
text = re.compile('font:.*?;', re.S|re.M).sub('', text)
open(filename, 'w', encoding="utf-8").write(text)
| MDL-SDK-master | doc/build/doxygen_postprocess_css.py |
#!/usr/bin/env python3
#*****************************************************************************
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
'''Modify some settings in Doxyfile according to dot availability and directory structure.'''
import re
import sys
assert len(sys.argv) >= 1+5
input_file = sys.argv[1]
output_file = sys.argv[2]
have_dot_arg = sys.argv[3]
dot_path_arg = sys.argv[4]
image_path_arg = sys.argv[5]
# A non-empty path causes a warning, even if dot support itself is disabled.
if have_dot_arg == "NO":
dot_path_arg = ""
buffer = open(input_file, 'r', encoding="utf-8").readlines()
text = ''.join(buffer)
text = re.compile('(HAVE_DOT\\s*=).*$', re.M).sub('\\1 %s' % have_dot_arg, text)
text = re.compile('(DOT_PATH\\s*=).*$', re.M).sub('\\1 %s' % dot_path_arg, text)
text = re.compile('(IMAGE_PATH\\s*=).*$', re.M).sub('\\1 %s' % image_path_arg, text)
open(output_file, 'w', encoding="utf-8").write(text)
| MDL-SDK-master | doc/build/doxygen_configure_doxyfile.py |
#!/bin/env python
#*****************************************************************************
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
# This script generates the get_semantics_name() function to get enum names for enum values.
import os
import sys
class EnumParser:
def __init__(self, header_path, enum_name, imported_enum=None, import_prefix=None):
self.header_path = header_path
self.enum_name = enum_name
self.enum_values = []
self.enum_value_map = {}
if imported_enum:
for name, val in imported_enum.enum_values:
self.enum_value_map[import_prefix + name] = val
self.parse()
# Normalize an enum entry into the form "<name>['='<value>('+'<value>)*]"
def normalize_entry(self, line, f):
comment_idx = line.find("//")
if comment_idx != -1:
line = line[:comment_idx].strip()
# ignore directives so far
if line.startswith("#"):
line = ""
end_of_enum = False
while True:
# end of entry?
if line.endswith(","):
line = line[:-1].strip()
break
# end of enum?
rbrace_idx = line.find("}")
if rbrace_idx != -1:
line = line[:rbrace_idx].strip()
end_of_enum = True
break
# add next line
nextline = next(f, "") #.next()
# ignore directives so far
if nextline.startswith("#"):
nextline = ""
comment_idx = nextline.find("//")
if comment_idx != -1:
nextline = nextline[:comment_idx].strip()
line += " " + nextline.strip()
# remove any spaces
line = line.replace(" ","")
return line, end_of_enum
# Parse the header and extract the name/value mapping of the enum name given in the constructor.
def parse(self):
in_enum = False
cur_val = 0
with open(self.header_path, "rt") as f:
for line in f:
line = line.strip()
if not in_enum and line.startswith("enum " + self.enum_name):
in_enum = True
elif in_enum:
entry, end_of_enum = self.normalize_entry(line, f)
if entry:
if "=" in entry:
# "<name>'='<value>('+'<value>)*" case
name, val = entry.split("=")
cur_val = 0
for added_val_name in val.split("+"):
try:
cur_val += int(added_val_name, 0)
except:
# not a number, so it should be a known enum
cur_val += self.enum_value_map[added_val_name]
else:
# "<name>" case
name = entry
val = cur_val
cur_val += 1
self.enum_value_map[name] = val
self.enum_values.append((name, val))
if end_of_enum:
break
#
# Main function
#
def main():
if len(sys.argv) < 3 or len(sys.argv) > 4 or (len(sys.argv) == 4 and sys.argv[3] != "--include-nvidia-dfs"):
print("Usage: %s public_header_dir output_file [--include-nvidia-dfs]" % sys.argv[0])
sys.exit(1)
public_header_dir = sys.argv[1]
output_file = sys.argv[2]
include_nvidia_dfs = len(sys.argv) == 4
expressions_path = os.path.join(public_header_dir, "mi", "mdl", "mdl_expressions.h")
definitions_path = os.path.join(public_header_dir, "mi", "mdl", "mdl_definitions.h")
if not os.path.exists(expressions_path) or not os.path.exists(definitions_path):
print('Invalid public_header_dir parameter, "%s" or "%s" not found!' %
(expressions_path, definitions_path))
sys.exit(1)
# IDefintion::Semantics depends of IExpression::Operator, so parse "Operator" first
# and use it when parsing "Semantics".
# Note: this will use the first "Operator" enum it finds in mdl_expresions.h.
operator_enums = EnumParser(expressions_path, "Operator")
semantics_enums = EnumParser(definitions_path, "Semantics", operator_enums, "IExpression::")
with open(output_file, "wt") as f:
f.write(
"//*****************************************************************************\n"
"// Copyright 2023 NVIDIA Corporation. All rights reserved.\n"
"//*****************************************************************************\n"
"// Generated by gen_mdltlc_enum_names.py\n"
"\n"
"#include <mi/mdl/mdl_definitions.h> // mi::mdl::IDefinition::Semantics\n"
"\n"
"char const *get_semantics_name(mi::mdl::IDefinition::Semantics semantics)\n"
"{\n"
" switch (semantics) {\n")
for name, val in semantics_enums.enum_values:
# ignore enum entries ending with "_FIRST" or "_LAST" in entry list
# to avoid duplicate switch cases
if not name.endswith("_FIRST") and not name.endswith("_LAST") and (include_nvidia_dfs or (not ("NVIDIA_DF" in name))):
f.write(' case mi::mdl::IDefinition::%s: return "%s";\n' % (name, name))
f.write(' default: return "<INVALID SEMANTICS>";\n');
# let mdltlc generate invalid code for unknown semantics
f.write(
" }\n"
' return "<INVALID SEMANTICS>";\n'
"}\n")
if __name__ == '__main__':
main()
| MDL-SDK-master | src/prod/bin/mdltlc/gen_mdltl_enum_names.py |
#*****************************************************************************
# Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
import pymel.core as pm
import mtoa.utils as utils
import mtoa.ui.ae.utils as aeUtils
from mtoa.ui.ae.shaderTemplate import ShaderAETemplate
class AEaiMDLTemplate(ShaderAETemplate):
def setup(self):
# Add the shader swatch to the AE
self.addSwatch()
self.beginScrollLayout()
# Add a list that allows to replace the shader for other one
self.addCustom('message', 'AEshaderTypeNew', 'AEshaderTypeReplace')
self.beginLayout("Material Selection", collapse=False)
self.addControl("mdl_module_name", label="MDL Module Name")
self.addControl("mdl_function_name", label="MDL Function Name")
self.addControl("qualified_name", label="Qualified Name (deprecated)")
self.endLayout()
# include/call base class/node attributes
pm.mel.AEdependNodeTemplate(self.nodeName)
# Add Section for the extra controls not displayed before
self.addExtraControls()
self.endScrollLayout()
| MDL-SDK-master | src/prod/lib/mdl_arnold/aiMDLTemplate.py |
#*****************************************************************************
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
"""MDL Python Bindings - a high-level wrapper for a more efficient and user-friendly usage.
Note, this is an experimental library to simply a few of the use cases of the MDL SDK.
For a complete access to the MDL SDK API, please refer to the low-level binding, `pymdlsdk.py`.
"""
# low level MDL Python Binding
if __package__ or "." in __name__:
from . import pymdlsdk
else:
import pymdlsdk
try:
# used to represent Vectors, Matrices, and Colors
import numpy
except ImportError as e:
print("`numpy` is not installed but required by pymdl.")
raise e
# Type annotation improves type safety and supports code completing in IDEs like VS Code
from typing import List, Set, Dict, Tuple, FrozenSet, Optional
#--------------------------------------------------------------------------------------------------
# Value Conversions
#--------------------------------------------------------------------------------------------------
def DowncastIType(itype : pymdlsdk.IType) -> pymdlsdk.IType: # or derived type
r"""
Cast the input type into the proper derived interface depending on the kind of type.
Here is the mapping from the type kind to the returned interface:
- TK_ALIAS --> IType_alias
- TK_BOOL --> IType_bool
- TK_INT --> IType_int
- TK_ENUM --> IType_enumeration
- TK_FLOAT --> IType_float
- TK_DOUBLE --> IType_double
- TK_STRING --> IType_string
- TK_VECTOR --> IType_vector
- TK_MATRIX --> IType_matrix
- TK_COLOR --> IType_color
- TK_ARRAY --> IType_array
- TK_STRUCT --> IType_structure
- TK_TEXTURE --> IType_texture
- TK_LIGHT_PROFILE --> IType_light_profile
- TK_BSDF_MEASUREMENT --> IType_bsdf_measurement
- TK_BSDF --> IType_bsdf
- TK_HAIR_BSDF --> IType_hair_bsdf
- TK_EDF --> IType_edf
- TK_VDF --> IType_vdf
"""
if not itype.is_valid_interface():
return itype
k = itype.get_kind()
if k == pymdlsdk.IType.Kind.TK_ALIAS: t = itype.get_interface(pymdlsdk.IType_alias)
elif k == pymdlsdk.IType.Kind.TK_BOOL: t = itype.get_interface(pymdlsdk.IType_bool)
elif k == pymdlsdk.IType.Kind.TK_INT: t = itype.get_interface(pymdlsdk.IType_int)
elif k == pymdlsdk.IType.Kind.TK_ENUM: t = itype.get_interface(pymdlsdk.IType_enumeration)
elif k == pymdlsdk.IType.Kind.TK_FLOAT: t = itype.get_interface(pymdlsdk.IType_float)
elif k == pymdlsdk.IType.Kind.TK_DOUBLE: t = itype.get_interface(pymdlsdk.IType_double)
elif k == pymdlsdk.IType.Kind.TK_STRING: t = itype.get_interface(pymdlsdk.IType_string)
elif k == pymdlsdk.IType.Kind.TK_VECTOR: t = itype.get_interface(pymdlsdk.IType_vector)
elif k == pymdlsdk.IType.Kind.TK_MATRIX: t = itype.get_interface(pymdlsdk.IType_matrix)
elif k == pymdlsdk.IType.Kind.TK_COLOR: t = itype.get_interface(pymdlsdk.IType_color)
elif k == pymdlsdk.IType.Kind.TK_ARRAY: t = itype.get_interface(pymdlsdk.IType_array)
elif k == pymdlsdk.IType.Kind.TK_STRUCT: t = itype.get_interface(pymdlsdk.IType_structure)
elif k == pymdlsdk.IType.Kind.TK_TEXTURE: t = itype.get_interface(pymdlsdk.IType_texture)
elif k == pymdlsdk.IType.Kind.TK_LIGHT_PROFILE: t = itype.get_interface(pymdlsdk.IType_light_profile)
elif k == pymdlsdk.IType.Kind.TK_BSDF_MEASUREMENT: t = itype.get_interface(pymdlsdk.IType_bsdf_measurement)
elif k == pymdlsdk.IType.Kind.TK_BSDF: t = itype.get_interface(pymdlsdk.IType_bsdf)
elif k == pymdlsdk.IType.Kind.TK_HAIR_BSDF: t = itype.get_interface(pymdlsdk.IType_hair_bsdf)
elif k == pymdlsdk.IType.Kind.TK_EDF: t = itype.get_interface(pymdlsdk.IType_edf)
elif k == pymdlsdk.IType.Kind.TK_VDF: t = itype.get_interface(pymdlsdk.IType_vdf)
itype = None
return t
def DowncastIExpression(iexpression : pymdlsdk.IExpression) -> pymdlsdk.IExpression: # or derived type
r"""
Cast the input expression into the proper derived interface depending on the kind of expression.
Here is the mapping from the expression kind to the returned interface:
- EK_CONSTANT --> IExpression_constant
- EK_CALL --> IExpression_call
- EK_PARAMETER --> IExpression_parameter
- EK_DIRECT_CALL --> IExpression_direct_call
- EK_TEMPORARY --> IExpression_temporary
"""
if not iexpression.is_valid_interface():
return iexpression
k = iexpression.get_kind()
if k == pymdlsdk.IExpression.Kind.EK_CONSTANT: t = iexpression.get_interface(pymdlsdk.IExpression_constant)
elif k == pymdlsdk.IExpression.Kind.EK_CALL: t = iexpression.get_interface(pymdlsdk.IExpression_call)
elif k == pymdlsdk.IExpression.Kind.EK_PARAMETER: t = iexpression.get_interface(pymdlsdk.IExpression_parameter)
elif k == pymdlsdk.IExpression.Kind.EK_DIRECT_CALL: t = iexpression.get_interface(pymdlsdk.IExpression_direct_call)
elif k == pymdlsdk.IExpression.Kind.EK_TEMPORARY: t = iexpression.get_interface(pymdlsdk.IExpression_temporary)
iexpression = None
return t
def DowncastIValue(ivalue : pymdlsdk.IValue) -> pymdlsdk.IValue: # or derived type
r"""
Cast the input value into the proper derived interface depending on the kind of value.
Here is the mapping from the ivalue kind to the returned interface:
- VK_BOOL --> IValue_bool
- VK_INT --> IValue_int
- VK_ENUM --> IValue_enumeration
- VK_FLOAT --> IValue_float
- VK_DOUBLE --> IValue_double
- VK_STRING --> IValue_string
- VK_VECTOR --> IValue_vector
- VK_MATRIX --> IValue_matrix
- VK_COLOR --> IValue_color
- VK_ARRAY --> IValue_array
- VK_STRUCT --> IValue_structure
- VK_INVALID_DF --> IValue_invalid_df
- VK_TEXTURE --> IValue_texture
- VK_LIGHT_PROFILE --> IValue_light_profile
- VK_BSDF_MEASUREMENT --> IValue_bsdf_measurement
"""
if not ivalue.is_valid_interface():
return ivalue
k = ivalue.get_kind()
if k == pymdlsdk.IValue.Kind.VK_BOOL: t = ivalue.get_interface(pymdlsdk.IValue_bool)
elif k == pymdlsdk.IValue.Kind.VK_INT: t = ivalue.get_interface(pymdlsdk.IValue_int)
elif k == pymdlsdk.IValue.Kind.VK_ENUM: t = ivalue.get_interface(pymdlsdk.IValue_enumeration)
elif k == pymdlsdk.IValue.Kind.VK_FLOAT: t = ivalue.get_interface(pymdlsdk.IValue_float)
elif k == pymdlsdk.IValue.Kind.VK_DOUBLE: t = ivalue.get_interface(pymdlsdk.IValue_double)
elif k == pymdlsdk.IValue.Kind.VK_STRING: t = ivalue.get_interface(pymdlsdk.IValue_string)
elif k == pymdlsdk.IValue.Kind.VK_VECTOR: t = ivalue.get_interface(pymdlsdk.IValue_vector)
elif k == pymdlsdk.IValue.Kind.VK_MATRIX: t = ivalue.get_interface(pymdlsdk.IValue_matrix)
elif k == pymdlsdk.IValue.Kind.VK_COLOR: t = ivalue.get_interface(pymdlsdk.IValue_color)
elif k == pymdlsdk.IValue.Kind.VK_ARRAY: t = ivalue.get_interface(pymdlsdk.IValue_array)
elif k == pymdlsdk.IValue.Kind.VK_STRUCT: t = ivalue.get_interface(pymdlsdk.IValue_structure)
elif k == pymdlsdk.IValue.Kind.VK_INVALID_DF: t = ivalue.get_interface(pymdlsdk.IValue_invalid_df)
elif k == pymdlsdk.IValue.Kind.VK_TEXTURE: t = ivalue.get_interface(pymdlsdk.IValue_texture)
elif k == pymdlsdk.IValue.Kind.VK_LIGHT_PROFILE: t = ivalue.get_interface(pymdlsdk.IValue_light_profile)
elif k == pymdlsdk.IValue.Kind.VK_BSDF_MEASUREMENT: t = ivalue.get_interface(pymdlsdk.IValue_bsdf_measurement)
ivalue = None
return t
def IValueToPyValues(ivalue : pymdlsdk.IValue):
r"""
Converts low level IValues to python friendly data types.
Here is the mapping from the ivalue kind to the returned values:
- VK_BOOL --> ivalue.get_value()
- VK_INT --> ivalue.get_value()
- VK_FLOAT --> ivalue.get_value()
- VK_DOUBLE --> ivalue.get_value()
- VK_STRING --> ivalue.get_value()
- VK_LIGHT_PROFILE --> ivalue.get_value()
- VK_BSDF_MEASUREMENT --> ivalue.get_value()
- VK_ENUM --> (ivalue.get_name(), ivalue.get_value())
- VK_TEXTURE --> (ivalue.get_value(), ivalue.get_gamma())
- VK_COLOR --> numpy.array(color components)
- VK_ARRAY --> [IValueToPyValues(array values)]
- VK_STRUCT --> dict(structure field names, ArgumentConstant(structure fields))
- VK_INVALID_DF --> None
For vectors (kind is VK_VECTOR), each value is converted using IValueToPyValues().
A numpy.array is returned with type depending on the vector element kind.
Here is the mapping between the vector element kind and the array type.
- TK_FLOAT --> numpy.float32
- TK_DOUBLE --> numpy.float64
- TK_INT --> numpy.int32
- TK_BOOL --> bool
For matrices (kind is VK_MATRIX), each value is converted using IValueToPyValues().
A numpy.array is returned with type depending on the vector element kind.
Here is the mapping between the matrix element kind and the array type.
- TK_FLOAT --> numpy.float32
- TK_DOUBLE --> numpy.float64
- TK_INT --> numpy.int32
- TK_BOOL --> bool
"""
ivalue = DowncastIValue(ivalue)
kind = ivalue.get_kind()
if kind == pymdlsdk.IValue.Kind.VK_BOOL or \
kind == pymdlsdk.IValue.Kind.VK_INT or \
kind == pymdlsdk.IValue.Kind.VK_FLOAT or \
kind == pymdlsdk.IValue.Kind.VK_DOUBLE or \
kind == pymdlsdk.IValue.Kind.VK_STRING:
return ivalue.get_value()
elif kind == pymdlsdk.IValue.Kind.VK_ENUM:
name = ivalue.get_name()
code = ivalue.get_value()
return (name, code)
elif kind == pymdlsdk.IValue.Kind.VK_VECTOR:
element_kind = ivalue.get_type().get_element_type().get_kind()
if element_kind == pymdlsdk.IType.Kind.TK_FLOAT:
dtype = numpy.float32
elif element_kind == pymdlsdk.IType.Kind.TK_DOUBLE:
dtype = numpy.float64
elif element_kind == pymdlsdk.IType.Kind.TK_INT:
dtype = numpy.int32
elif element_kind == pymdlsdk.IType.Kind.TK_BOOL:
dtype = bool
components = []
rows = ivalue.get_size()
for i in range(rows):
components.append(IValueToPyValues(ivalue.get_value(i)))
return numpy.array(components, dtype = dtype).reshape(rows, 1)
elif kind == pymdlsdk.IValue.Kind.VK_MATRIX:
col_type = DowncastIType(ivalue.get_type().get_element_type())
rows = col_type.get_size()
cols = ivalue.get_size()
col_kind = col_type.get_kind()
if col_kind == pymdlsdk.IType.Kind.TK_VECTOR:
element_kind = col_type.get_component_type(0).get_kind()
if element_kind == pymdlsdk.IType.Kind.TK_FLOAT:
dtype = numpy.float32
elif element_kind == pymdlsdk.IType.Kind.TK_DOUBLE:
dtype = numpy.float64
elif element_kind == pymdlsdk.IType.Kind.TK_INT:
dtype = numpy.int32
elif element_kind == pymdlsdk.IType.Kind.TK_BOOL:
dtype = bool
components = []
for c in range(cols):
column = ivalue.get_value(c)
for r in range(rows):
components.append(IValueToPyValues(column.get_value(r)))
return numpy.transpose(numpy.array(components, dtype = dtype).reshape(cols, rows))
elif kind == pymdlsdk.IValue.Kind.VK_COLOR:
components = []
for i in range(ivalue.get_size()):
components.append(ivalue.get_value(i).get_interface(pymdlsdk.IValue_float).get_value())
return numpy.array(components)
elif kind == pymdlsdk.IValue.Kind.VK_ARRAY:
array = []
for i in range(ivalue.get_size()):
elem = ivalue.get_value(i)
array.append(IValueToPyValues(elem))
return array
elif kind == pymdlsdk.IValue.Kind.VK_STRUCT:
struct = {}
type = DowncastIType(ivalue.get_type())
for e in range(type.get_size()):
e_name = type.get_field_name(e)
e_value = ivalue.get_field(e_name)
struct[e_name] = ArgumentConstant(e_value)
return struct
elif kind == pymdlsdk.IValue.Kind.VK_INVALID_DF:
return None
elif kind == pymdlsdk.IValue.Kind.VK_TEXTURE:
return ( ivalue.get_value(), ivalue.get_gamma() )
elif kind == pymdlsdk.IValue.Kind.VK_LIGHT_PROFILE or \
kind == pymdlsdk.IValue.Kind.VK_BSDF_MEASUREMENT:
return ivalue.get_value()
return None
#--------------------------------------------------------------------------------------------------
# High Level Wrappers
#--------------------------------------------------------------------------------------------------
class Type(object):
r"""
Wrapper around MDL type.
Type gives access to:
kind: Kind
The kind of type.
uniform: bool
True if the type has a uniform modifier.
varying: bool
True if the type has a varying modifier.
symbol: str
If type is enum or struct, this is the qualified name of the enum or struct type.
Otherwise this is None.
name: str
In case of enums or structs, this is equal to the symbol name. Scalar, vector, matrix,
array and resource types that don't have a symbol name can use this type name.
enumValues: (str,int)
If type is enum, this list of pairs holds the enum values along with their numeric value.
Otherwise this is empty.
"""
def __init__(self, itype: pymdlsdk.IType) -> None:
super().__init__()
modifierMask = itype.get_all_type_modifiers()
self.uniform = (modifierMask & 2) > 0
self.varying = (modifierMask & 4) > 0
# drops all modifiers
itype = DowncastIType(itype.skip_all_type_aliases())
self.kind = itype.get_kind()
self.symbol = None
self.enumValues = []
if self.kind == pymdlsdk.IType.Kind.TK_ENUM:
self.symbol = itype.get_symbol()
for i in range(itype.get_size()):
name = itype.get_value_name(i)
code = itype.get_value_code(i)
self.enumValues.append((name, code))
elif self.kind == pymdlsdk.IType.Kind.TK_STRUCT:
self.symbol = itype.get_symbol()
self.name = self.__printName(itype)
r"""
Helper function to print a type.
Note, this will be moved into the MDL SDK soon.
"""
def __printName(self, itype: pymdlsdk.IType) -> str:
itype = DowncastIType(itype)
if itype.get_kind() == pymdlsdk.IType.Kind.TK_BOOL:
return 'bool'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_INT:
return 'int'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_FLOAT:
return 'float'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_DOUBLE:
return 'double'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_STRING:
return 'string'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_COLOR:
return 'color'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_LIGHT_PROFILE:
return 'light_profile'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_BSDF_MEASUREMENT:
return 'bsdf_measurement'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_ENUM or \
itype.get_kind() == pymdlsdk.IType.Kind.TK_STRUCT:
return itype.get_symbol()
if itype.get_kind() == pymdlsdk.IType.Kind.TK_VECTOR:
with itype.get_element_type() as elementType:
return self.__printName(elementType) + str(itype.get_size())
if itype.get_kind() == pymdlsdk.IType.Kind.TK_MATRIX:
with itype.get_element_type() as rowType, \
DowncastIType(rowType) as rowTypeVector, \
rowTypeVector.get_element_type() as elementType:
return self.__printName(elementType) + str(itype.get_size()) + 'x' + str(rowType.get_size())
if itype.get_kind() == pymdlsdk.IType.Kind.TK_ARRAY:
with itype.get_element_type() as elementType:
if itype.is_immediate_sized():
return self.__printName(elementType) + '[' + str(itype.get_size()) + ']'
else:
return self.__printName(elementType) + '[N]'
if itype.get_kind() == pymdlsdk.IType.Kind.TK_TEXTURE:
shape = itype.get_shape()
if shape == pymdlsdk.IType_texture.Shape.TS_2D:
return 'texture_2d'
elif shape == pymdlsdk.IType_texture.Shape.TS_3D:
return 'texture_3d'
elif shape == pymdlsdk.IType_texture.Shape.TS_CUBE:
return 'texture_cube'
elif shape == pymdlsdk.IType_texture.Shape.TS_PTEX:
return 'texture_ptex'
elif shape == pymdlsdk.IType_texture.Shape.TS_BSDF_DATA:
return 'texture_bsdf_data'
class Argument(object):
r"""
Wrapper around an MDL parameter.
Argument combines type information, annotations, and optionally the value of an MDL parameter.
Argument is used:
- To describe the parameters of a FunctionCall. The value of the Argument is the value of the corresponding function call parameter.
- To describe the parameters of a FunctionDefinition. In this case, the value of the Argument is the default value of the corresponding definition parameter if applicable (i.e. default value exists.)
- To describe the Parameters of an Annotation along with its value.
- To describe return types of functions and their annotations. In this case, the value is not used.
There are two kinds of Argument: ArgumentConstant and ArgumentCall.
ArgumentConstant holds an actual value.
ArgumentCall refers to other function calls which allows to construct expression graphs.
Argument gives access to:
type: Type
The parameter type.
annotations: tuple(Annotation)
The parameter annotations.
"""
def __init__(self, type: Type, annotations: tuple() = tuple()):
self.type = type
self.value = None
self.annotations = annotations
class ArgumentConstant(Argument):
r"""
Wrapper around an MDL parameter value.
Type gives access to:
value: See IValueToPyValues()
Holds an actual parameter value.
The MDL value is transformed using IValueToPyValues()
"""
def __init__(self, ivalue: pymdlsdk.IValue, annotations: tuple() = tuple()):
super(ArgumentConstant, self).__init__(Type(ivalue.get_type()), annotations)
self.value = IValueToPyValues(ivalue)
class ArgumentCall(Argument):
r"""
Wrapper around an MDL FunctionCall parameter.
ArgumentCall gives access to:
value: str
The DB name of the referenced function call.
"""
def __init__(self, iexpression: pymdlsdk.IExpression_call, annotations = tuple()):
super(ArgumentCall, self).__init__(Type(iexpression.get_type()), annotations)
self.value = iexpression.get_call()
#--------------------------------------------------------------------------------------------------
# High Level Wrappers - Annotations
#--------------------------------------------------------------------------------------------------
class Annotation(object):
"""Wrapper around an MDL annotation."""
def __init__(self, iannotation: pymdlsdk.IAnnotation):
arguments = {}
with iannotation.get_arguments() as expr_list:
for i in range(expr_list.get_size()):
arg_name = expr_list.get_name(i)
with expr_list.get_expression(arg_name) as arg_expr:
if arg_expr.get_kind() == pymdlsdk.IExpression.Kind.EK_CONSTANT:
arguments[arg_name] = ArgumentConstant(arg_expr.get_interface(pymdlsdk.IExpression_constant).get_value())
else:
pass # should not happen
self._name = iannotation.get_name() #TODO make immutable
self._arguments = arguments
paramTypeNames = []
with iannotation.get_definition() as definition:
for p in range(definition.get_parameter_count()):
paramTypeNames.append(definition.get_mdl_parameter_type_name(p))
self._simpleName = definition.get_mdl_simple_name()
self._parameterTypeNames = tuple(paramTypeNames)
self._moduleDbName = definition.get_module()
@property
def name(self):
"""The full name of the annotation consisting of the module name, the simple name and the parameter list."""
return self._name
@property
def moduleDbName(self)-> str:
"""The database of the module in which the annotation is defined."""
return self._moduleDbName
@property
def simpleName(self)-> str:
"""The name of the annotation within the module it is defined in without its parameter list."""
return self._simpleName
@property
def parameterTypeNames(self) -> List[str]:
"""The list of parameter type names of the signature of this annotation."""
return self._parameterTypeNames
@property
def arguments(self) -> Dict[str, Argument]:
"""Dictionary with the annotations arguments and their values."""
return self._arguments
def AnnotationBlock(iannotation_block: pymdlsdk.IAnnotation_block) -> Tuple[Annotation, ...]:
r"""
Creates an immutable list of annotations from an MDL annotation block.
Parameters
----------
iannotation_block : pymdlsdk.IAnnotation_block
Low level MDL annotation block proxy generated for the Python binding.
Returns
-------
tuple(Annotation)
A list of high level wrapped annotation information read from the annotation block.
"""
annos = []
if iannotation_block and iannotation_block.is_valid_interface():
for i in range(iannotation_block.get_size()) :
with iannotation_block.get_annotation(i) as anno:
if anno.is_valid_interface():
a = Annotation(anno)
annos.append(a)
return tuple(annos)
#--------------------------------------------------------------------------------------------------
# High Level Wrappers - Functions
#--------------------------------------------------------------------------------------------------
class FunctionCall(object):
r"""
Wrapper around MDL function call.
FunctionCall gives access to:
functionDefinition: str
The DB name of the corresponding function definition.
mdlFunctionDefinition: str
The MDL name of the corresponding function definition.
parameters: Dict[str, Argument]
Dictionary of the function call parameters as Argument.
Key is parameter name corresponding to the Argument.
"""
def __init__(self, func: pymdlsdk.IFunction_call, dbName: str) -> None:
super(FunctionCall, self).__init__()
parameters = {}
param_anno_block = None
with func.get_arguments() as arguments, \
func.get_parameter_types() as param_types:
for i in range(func.get_parameter_count()):
param_name = func.get_parameter_name(i)
with arguments.get_expression(i) as param_default:
if param_default.is_valid_interface():
if param_default.get_kind() == pymdlsdk.IExpression.Kind.EK_CONSTANT:
with param_default.get_interface(pymdlsdk.IExpression_constant) as param_default_constant, \
param_default_constant.get_value() as param_default_value:
parameters[param_name] = ArgumentConstant(param_default_value, AnnotationBlock(param_anno_block))
elif param_default.get_kind() == pymdlsdk.IExpression.Kind.EK_CALL:
parameters[param_name] = ArgumentCall(param_default.get_interface(pymdlsdk.IExpression_call), AnnotationBlock(param_anno_block))
else:
with param_types.get_type(param_name) as param_default_type:
parameters[param_name] = Argument(Type(param_default_type), AnnotationBlock(param_anno_block))
param_anno_block = None
self.parameters: Dict[Argument]
self.parameters = parameters #TODO make immutable
self.functionDefinition = func.get_function_definition()
self.mdlFunctionDefinition = func.get_mdl_function_definition()
@staticmethod
def _fetchFromDb(transaction: pymdlsdk.ITransaction, dbName) -> "FunctionCall":
with transaction.access_as(pymdlsdk.IFunction_call, dbName) as f:
if f.is_valid_interface():
return FunctionCall(f, dbName)
return None
class FunctionDefinition(object):
r"""
Wrapper around MDL function definition.
FunctionDefinition gives access to:
annotations: AnnotationBlock
The annotations of the function definition itself, or None if there are no such annotations.
dbName: str
DB name of the function definitions.
mdlModuleName:str
The MDL name of the module containing this function definition.
mdlName: str
The MDL name of the function definition.
mdlSimpleName: str
The simple MDL name of the function definition.
The simple name is the last component of the MDL name, i.e., without any packages and scope qualifiers, and without the parameter type names.
moduleDbName: str
The DB name of the module containing this function definition.
isExported: bool
Indicates whether the function definition is exported by its module.
isMaterial: bool
True in case the function is a material, i.e., the return type is a material structure.
semantics: IFunction_definition.Semantics
Get the semantic of known function. User-defined functions an all materials will return DS_UNKNOWN (0).
Note, do not rely on the numeric values of the enumerators since they may change without further notice.
parameters: Dict[str, Argument]
Dictionary of the function definition parameters as Argument.
Key is parameter name corresponding to the Argument.
parameterTypeNames: tuple(str)
The type name of all the parameters.
returnValue: Argument
The return type as an Argument.
"""
def __init__(self, func: pymdlsdk.IFunction_definition, dbName: str) -> None:
super(FunctionDefinition, self).__init__()
parameters = {}
with func.get_parameter_annotations() as param_annotations, \
func.get_defaults() as param_defaults, \
func.get_parameter_types() as param_types:
paramTypeNames = []
for i in range(func.get_parameter_count()):
param_name = func.get_parameter_name(i)
paramTypeNames.append(func.get_mdl_parameter_type_name(i))
if param_annotations.is_valid_interface():
param_anno_block = param_annotations.get_annotation_block(param_name)
else:
param_anno_block = None
with param_defaults.get_expression(param_name) as param_default:
if param_default.is_valid_interface():
if param_default.get_kind() == pymdlsdk.IExpression.Kind.EK_CONSTANT:
with param_default.get_interface(pymdlsdk.IExpression_constant) as param_default_constant, \
param_default_constant.get_value() as param_default_value:
parameters[param_name] = ArgumentConstant(param_default_value, AnnotationBlock(param_anno_block))
elif param_default.get_kind() == pymdlsdk.IExpression.Kind.EK_CALL:
parameters[param_name] = ArgumentCall(param_default.get_interface(pymdlsdk.IExpression_call), AnnotationBlock(param_anno_block))
else:
with param_types.get_type(param_name) as param_default_type:
parameters[param_name] = Argument(Type(param_default_type), AnnotationBlock(param_anno_block))
param_anno_block = None
self.isMaterial = func.is_material()
self.isExported = func.is_exported()
self.semantics = func.get_semantic()
with func.get_return_type() as ret_type, \
func.get_return_annotations() as ret_annos:
self.returnValue = Argument(Type(ret_type), AnnotationBlock(ret_annos))
self.parameters: Dict[Argument]
self.parameters = parameters #TODO make immutable
with func.get_annotations() as func_annos:
self.annotations = AnnotationBlock(func_annos)
self.mdlName = func.get_mdl_name()
self.dbName = dbName
self.mdlSimpleName = func.get_mdl_simple_name()
self.parameterTypeNames = tuple(paramTypeNames)
self.mdlModuleName = func.get_mdl_module_name()
self.moduleDbName = func.get_module()
@staticmethod
def _fetchFromDb(transaction: pymdlsdk.ITransaction, dbName) -> "FunctionDefinition":
with transaction.access_as(pymdlsdk.IFunction_definition, dbName) as f:
if f.is_valid_interface():
return FunctionDefinition(f, dbName)
return None
#--------------------------------------------------------------------------------------------------
# High Level Wrappers - Modules
#--------------------------------------------------------------------------------------------------
class Module(object):
r"""
Wrapper around an MDL module.
Module gives access to:
dbName: str
The DB name of the module.
filename: str
The name of the MDL source file from which the module was created.
functionDbNames: tuple(str)
DB names of all the function definitions and the material definitions from this module.
functions: Dict[str, FunctionDefinition]
Dictionary of FunctionDefinition.
Keys is FunctionDefinition simple name.
mdlName: str
The MDL name of the module.
mdlSimpleName: str
The simple MDL name of the module.
The simple name is the last component of the MDL name, i.e., without any packages and scope qualifiers.
"""
def __init__(self, transaction: pymdlsdk.ITransaction, module: pymdlsdk.IModule, dbName: str) -> None:
super(Module, self).__init__()
self.filename = module.get_filename()
self.dbName = dbName
self.mdlName = module.get_mdl_name()
self.mdlSimpleName = module.get_mdl_simple_name()
moduleTypes = []
with module.get_types() as moduleTypeList:
for i in range(moduleTypeList.get_size()):
moduleTypes.append(Type(moduleTypeList.get_type(i)))
self.types = moduleTypes #TODO make immutable
functionDbNames = []
for i in range(module.get_function_count()):
functionDbNames.append(module.get_function(i))
for i in range(module.get_material_count()):
functionDbNames.append(module.get_material(i))
self.functionDbNames = tuple(functionDbNames)
functionMap = {}
if transaction and transaction.is_valid_interface():
for dbName in self.functionDbNames:
func = FunctionDefinition._fetchFromDb(transaction, dbName)
if not func.mdlSimpleName in functionMap:
functionMap[func.mdlSimpleName] = []
functionMap[func.mdlSimpleName].append(func)
self.functions: Dict[str, FunctionDefinition]
self.functions = functionMap #TODO make immutable
with module.get_annotations() as module_annos:
self.annotations = AnnotationBlock(module_annos)
@staticmethod
def _fetchFromDb(transaction: pymdlsdk.ITransaction, dbName) -> "Module":
with transaction.access_as(pymdlsdk.IModule, dbName) as m:
if m.is_valid_interface():
return Module(transaction, m, dbName)
return None
| MDL-SDK-master | src/prod/bindings/mdl_python/pymdl.py |
#!/usr/bin/env python
#*****************************************************************************
# Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
# Preprocessing script for DiCE and IndeX header files to handle SWIG template
# instantiation. It parses SWIG .i files recursively, looking for definitions of
# interfaces (using "NVINDEX_INTERFACE()") as well as for all .h files that are
# included. It then searches the class declarations of these interfaces in the
# .h files and inserts SWIG "%template" statements, writing the modified .h
# files to a separate output directory.
from __future__ import print_function
import errno
import os
import os.path
import re
import sys
# Creates directories recursively, does not fail when they already exist (same
# functionality as exists_ok=True in Python 3.5)
def makedirs_safe(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class Processor:
start_file = None
base_path_index = None
base_path_dice = None
output_path = None
output_dependency_file = None
headers = set()
nvindex_interfaces = {}
dice_interfaces = {}
log = set()
dependencies = []
# .i files provided by SWIG, these will be ignored
SWIG_FILES = [
"exception.i",
"typemaps.i",
"carrays.i"
]
# Matches the UUID template argument to "Interface_declare" with optional base class as last parameter
RE_UUID = '([0-9a-fxA-FX,\s]+)([^0-9\s][\w_:<>,\s]*)?'
def __init__(self, start_file, base_path_index, base_path_dice, output_path, output_dependency_file):
self.start_file = start_file
self.base_path_index = base_path_index
self.base_path_dice = base_path_dice
self.output_path = output_path
self.output_dependency_file = output_dependency_file
def process_swig_file(self, filename, level = 0):
if not os.path.exists(filename):
if filename in self.SWIG_FILES:
return # skip standard .i files from SWIG
sys.stderr.write("Included file not found: %s\n" % filename)
if filename.endswith(".i"):
sys.stderr.write("If this is a file provided by SWIG then please adapt 'swig_files'.")
sys.exit(1)
self.dependencies.append(filename)
regex_swig_include = re.compile('^\s*%include\s*"([^"]+\.i)"')
regex_header_include = re.compile('^\s*%include\s*"([^"]+\.h)"')
regex_header_include_nvindex = re.compile('^\s*NVINDEX_INCLUDE\(([^\)]+\.h)\)')
regex_nvindex_interface = re.compile('^\s*NVINDEX_INTERFACE[1]?\s*\(([\w_]+)\s*\)')
regex_dice_interface = re.compile('^\s*DICE_INTERFACE(_BASE|_MI)?\s*\(([\w_]+)\s*\)')
regex_dice_implement = re.compile('^\s*DICE_IMPLEMENT\s*\(([\w_]+)\s*,\s*([\w_:]+)\)')
regex_index_implement = re.compile('^\s*NVINDEX_IMPLEMENT\s*\(([\w_]+)\s*,\s*([\w_:]+)\)')
lines = open(filename).readlines()
for line in lines:
match = regex_swig_include.match(line)
if match:
self.process_swig_file(match.group(1), level + 1)
match = regex_header_include.match(line)
if match:
self.headers.add(match.group(1))
match = regex_header_include_nvindex.match(line)
if match:
self.headers.add("nv/index/" + match.group(1))
match = regex_nvindex_interface.match(line)
if match and match.group(1) != "CLASS_TYPE": # skip internal definition
self.nvindex_interfaces[match.group(1)] = 'nv::index'
match = regex_dice_interface.match(line)
if match:
cmd = match.group(1)
if not cmd: prefix = 'mi::neuraylib'
elif cmd == '_BASE': prefix = 'mi::base'
elif cmd == '_MI': prefix = 'mi'
self.dice_interfaces[match.group(2)] = prefix
match = regex_dice_implement.match(line)
if match:
self.dice_interfaces[match.group(1)] = 'mi::neuraylib'
match = regex_index_implement.match(line)
if match:
self.nvindex_interfaces[match.group(1)] = 'nv::index'
def process_header_file(self, filename, base_dir, interfaces):
self.dependencies.append(base_dir + "/" + filename)
RE_CLASS = 'class\s*([\w_]+)'
# Matches the beginning of a template declaration
regex_template = re.compile('^\s*template\s*<')
# Matches the beginning of a class declaration (but not a forward declaration)
regex_class_only = re.compile('^\s*' + RE_CLASS + '\s*[^;]*\s*$')
# Matches a class declaration using "Interface_declare"
regex_class_interface_declare = re.compile('^\s*' + RE_CLASS + '\s*:\s*public\s+' +
'([\w::]*)Interface_declare<\s*' + self.RE_UUID + '\s*>' )
# Matches a class declaration using "Interface_implement"
regex_class_interface_implement = re.compile('^\s*' + RE_CLASS + '\s*:\s*public\s+' +
'([\w::]*)Interface_implement<\s*([\w_:]+)\s*>' )
# Matche the start of a "{"-block
regex_block_begin = re.compile('^([^{]*){')
class_declaration = ""
class_name = ""
class_prefix = ""
template = ""
namespace_begin = "";
namespace_end = "";
namespace_prefix = "";
output = ""
file_modified = False
# skip neuraylib.h or mdl_sdk.h
filename_to_open = base_dir + "/" + filename
if filename_to_open.endswith("/mi/neuraylib.h") or filename_to_open.endswith("/mi/mdl_sdk.h"):
return
# Iterate over the lines in the header files
lines = open(base_dir + "/" + filename).readlines()
for line in lines:
# We are currently not inside a class declaration
if class_name == "":
# Look for class declaration
match = regex_class_only.match(line)
if match and match.group(1) in interfaces:
# Found declaration of a class that should be wrapped
class_declaration = line
class_name = match.group(1)
class_prefix = interfaces[class_name] # e.g. "nv::index" or "mi::base"
# Set up the current namespace
namespace_prefix = class_prefix + "::"
namespace_begin = ""
namespace_end = ""
for i in class_prefix.split("::"):
namespace_begin += "namespace " + i + " {\n";
namespace_end += "}\n";
else:
# Look for a template declaration
match = regex_template.match(line)
if match:
# Template declaration found: Delay output, as a
# relevant class declaration may follow, and the SWIG
# statements need to be added before the template
output += template
template = line
else:
# Neither class not template declaration, just output directly.
# First output any previous template declaration.
if template != "":
output += template
template = ""
output += line;
else:
# We are inside a class declaration for a class that should be wrapped
# Look for first "{" after "class"
match = regex_block_begin.match(line)
if match:
# Collect everything starting at "class"
output_todo = class_declaration + line
class_declaration += match.group(1)
done = False
def fixup_base(base):
if not base:
base = ""
if base.startswith("base::"): #FIXME: remove
base = "mi::" + base
if base.startswith("neuraylib::"): #FIXME: remove
base = "mi::" + base
if base != "" and not (base.startswith("mi::") or base.startswith("nv::")):
base = namespace_prefix + base
return base
def remove_extra_whitespace(s):
return re.sub('\s+', ' ', s).strip()
#
# Handle Interface_declare
#
match = regex_class_interface_declare.match(class_declaration)
if match and not done:
(name, uuid, base) = match.group(1, 3, 4)
base = fixup_base(base)
uuid = remove_extra_whitespace(uuid)
instantiation = ""
if "<" in base:
instantiation += "%template(Interface_declare_" + name + "_base_template) " + base + ";\n"
instantiation += "%nodefaultctor Interface_declare_" + name + "_base_template;\n"
instantiation += "%nodefaultdtor Interface_declare_" + name + "_base_template;\n"
instantiation += "%template(Interface_declare_" + name + ") mi::base::Interface_declare<" + uuid + base + ">;\n"
instantiation += "%nodefaultctor Interface_declare_" + name + ";\n"
instantiation += "%nodefaultdtor Interface_declare_" + name + ";\n"
# #instantiation += "%ignore " + name + ";\n"
instantiation += "%rename(_" + name + ") " + name + ";\n"
instantiation += "%nodefaultctor " + name + ";\n"
instantiation += "%nodefaultdtor " + name + ";\n"
#
# if base:
#
# #instantiation += "%ignore mi::base::Interface_declare<" + uuid + base + ">;\n"
#
# base_wo_prefix = base[len(class_prefix) + 2:]
# prefix_current = ""
# for i in reversed(class_prefix.split("::")):
# prefix_current = i + "::" + prefix_current
# instantiation += "%ignore mi::base::Interface_declare<" + uuid + prefix_current + base_wo_prefix + ">;\n"
#
# instantiation += "%rename(\"%s\", %$isenum) " + class_prefix + "::" + name + "::Kind;\n"
# instantiation += "%template(Interface_declare_" + name + ") mi::base::Interface_declare<" + uuid + base + ">;\n"
self.log.add(instantiation)
output += namespace_end + instantiation + namespace_begin;
file_modified = True
done = True
#
# Handle Interface_implement
#
match = regex_class_interface_implement.match(class_declaration)
if match and not done:
(name, base) = match.group(1, 3)
base = fixup_base(base)
base = remove_extra_whitespace(base)
implementation = "%template(Interface_implement_" + name + ") mi::base::Interface_implement<" + base + ">;\n"
instantiation += "%nodefaultctor Interface_implement_" + name + ";\n"
instantiation += "%nodefaultdtor Interface_implement_" + name + ";\n"
# implementation = "%ignore mi::base::Interface_implement<" + base + ">;\n"
self.log.add(implementation)
output += namespace_end + implementation + namespace_begin;
file_modified = True
done = True
if not done:
# Found "class" but couldn't understand declaration
raise Exception("Could not parse declaration of class '" + class_name +
"' in file '" + filename + "': " + class_declaration)
del interfaces[class_name] # This has been handled
class_declaration = ""
class_name = ""
# Output any previous template declaration.
if template != "":
output += template
template = ""
output += output_todo
else:
# Still inside the class declaration, before "{"
class_declaration += line
# Write file if it was modified
if file_modified:
output_filename = self.output_path + "/" + filename
makedirs_safe(os.path.dirname(output_filename))
open(output_filename, "w").write(output)
def process(self):
self.process_swig_file(self.start_file)
# Iterate over all found headers
for h in self.headers:
if h.startswith("nv/"):
# Handle IndeX headers
interfaces = self.nvindex_interfaces
base = self.base_path_index
elif h.startswith("mi/"):
# Handle DiCE headers
interfaces = self.dice_interfaces
base = self.base_path_dice
else:
# Skip other headers (i.e. headers defined as part of the SWIG wrapper)
continue
self.process_header_file(h, base, interfaces)
# Debug info about what interfaces haven been found
sys.stderr.write("\nprocessed interfaces\n")
self.log = sorted(self.log)
# for interface in self.log:
# sys.stderr.write("\n%s" % interface)
# Check for interfaces that were defined (e.g. with NVINDEX_INTERFACE()) but not found in the header files
if self.nvindex_interfaces:
sys.stderr.write("Warning: Defined IndeX interfaces not found in headers: %s\n" % self.nvindex_interfaces)
if self.dice_interfaces:
sys.stderr.write("Warning: Defined DiCE interfaces not found in headers: %s\n" % self.dice_interfaces)
# Write dependency file
with open(self.output_dependency_file + ".d", "w") as f:
f.write("# dependency file generated by " + sys.argv[0] + "\n")
f.write(os.path.abspath(self.output_dependency_file) + ": \\\n")
for d in self.dependencies:
f.write(" " + os.path.abspath(d) + " \\\n")
f.write("\n\n")
# Placeholder file must be written after dependency file
with open(self.output_dependency_file, "w") as f:
f.write("/* placeholder file generated by " + sys.argv[0] + " */\n")
if __name__ == "__main__":
if len(sys.argv) != 6:
sys.stderr.write("usage: %s <swig-file> <index-header-dir> <dice-header-dir> <output-dir> <output-dependencies>\n" % sys.argv[0])
sys.exit(1)
# pass parameters unpacked from argument list
Processor(*sys.argv[1:]).process()
| MDL-SDK-master | src/prod/bindings/mdl_python/instantiate_templates_inline.py |
#===- gen-msvc-exports.py - Generate C API export file -------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
#
# Generate an export file from a list of given LIB files. This only exports symbols
# that start with LLVM, so it only exports the LLVM C API.
#
# To have CMake run this, set LLVM_BUILD_LLVM_C_DYLIB to on while
# building on Windows.
#
# To run manually, build LLVM with Visual Studio, use a Command prompt
# to navigate to the directory with the .lib files (Debug\lib etc). Then run
# python C:\Path\To\gen-msvc-exports.py --nm ..\bin\llvm-nm.exe LLVM*.lib
#
# If you're generating a 32 bit DLL, use the `--underscore` flag.
# If you want to use a different `llvm-nm` executable, pass the path
# with the `--nm` flag.
#
# You can use the --output flag to set the name of the export file.
#
#===------------------------------------------------------------------------===#
from tempfile import mkstemp
from contextlib import contextmanager
from subprocess import check_call
import argparse
import os
import re
_UNDERSCORE_REGEX = {
False: re.compile(r"^\w+\s+T\s+(LLVM.*)$"),
True: re.compile(r"^\w+\s+T\s+_(LLVM.*)$")
}
@contextmanager
def removing(path):
try:
yield path
finally:
os.unlink(path)
def touch_tempfile(*args, **kwargs):
fd, name = mkstemp(*args, **kwargs)
os.close(fd)
return name
def gen_llvm_c_export(output, underscore, libs, nm):
"""Generate the export file for the LLVM-C DLL.
Run `nm` for each lib in `libs`, and output an export file
to `output`. If `underscore` is true, symbols will
be assumed to be prefixed with an underscore.
"""
with removing(touch_tempfile(prefix='dumpout', suffix='.txt')) as dumpout:
# Get the right regex.
p = _UNDERSCORE_REGEX[underscore]
with open(output, 'w+t') as output_f:
# For each lib get the LLVM* functions it exports.
for lib in libs:
# Call dumpbin.
with open(dumpout, 'w+t') as dumpout_f:
check_call([nm, '-g', lib], stdout=dumpout_f)
# Get the matching lines.
with open(dumpout) as dumpbin:
for line in dumpbin:
m = p.match(line)
if m is not None:
output_f.write(m.group(1) + '\n')
def main():
parser = argparse.ArgumentParser('gen-msvc-exports')
parser.add_argument(
'-i', '--libsfile', help='file with list of libs, new line separated',
action='store', default=None
)
parser.add_argument(
'-o', '--output', help='output filename', default='LLVM-C.exports'
)
parser.add_argument('-u', '--underscore',
help='labels are prefixed with an underscore (use for 32 bit DLLs)',
action='store_true'
)
parser.add_argument(
'--nm', help='path to the llvm-nm executable', default='llvm-nm'
)
parser.add_argument(
'libs', metavar='LIBS', nargs='*', help='list of libraries to generate export from'
)
ns = parser.parse_args()
libs = ns.libs
# Add if we where given a libsfile add it to the libs.
if ns.libsfile:
with open(ns.libsfile) as f:
libs.extend(f.read().splitlines())
gen_llvm_c_export(ns.output, ns.underscore, libs, ns.nm)
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/tools/llvm-shlib/gen-msvc-exports.py |
#!/usr/bin/env python3
#===- symcov-report-server.py - Coverage Reports HTTP Serve --*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
'''(EXPERIMENTAL) HTTP server to browse coverage reports from .symcov files.
Coverage reports for big binaries are too huge, generating them statically
makes no sense. Start the server and go to localhost:8001 instead.
Usage:
./tools/sancov/symcov-report-server.py \
--symcov coverage_data.symcov \
--srcpath root_src_dir
Other options:
--port port_number - specifies the port to use (8001)
--host host_name - host name to bind server to (127.0.0.1)
'''
from __future__ import print_function
import argparse
import http.server
import json
import socketserver
import time
import html
import os
import string
import math
import urllib
INDEX_PAGE_TMPL = """
<html>
<head>
<title>Coverage Report</title>
<style>
.lz { color: lightgray; }
</style>
</head>
<body>
<table>
<tr><th>File</th><th>Coverage</th></tr>
<tr><td><em>Files with 0 coverage are not shown.</em></td></tr>
$filenames
</table>
</body>
</html>
"""
CONTENT_PAGE_TMPL = """
<html>
<head>
<title>$path</title>
<style>
.covered { background: lightgreen; }
.not-covered { background: lightcoral; }
.partially-covered { background: navajowhite; }
.lz { color: lightgray; }
</style>
</head>
<body>
<pre>
$content
</pre>
</body>
</html>
"""
class SymcovData:
def __init__(self, symcov_json):
self.covered_points = frozenset(symcov_json['covered-points'])
self.point_symbol_info = symcov_json['point-symbol-info']
self.file_coverage = self.compute_filecoverage()
def filenames(self):
return self.point_symbol_info.keys()
def has_file(self, filename):
return filename in self.point_symbol_info
def compute_linemap(self, filename):
"""Build a line_number->css_class map."""
points = self.point_symbol_info.get(filename, dict())
line_to_points = dict()
for fn, points in points.items():
for point, loc in points.items():
line = int(loc.split(":")[0])
line_to_points.setdefault(line, []).append(point)
result = dict()
for line, points in line_to_points.items():
status = "covered"
covered_points = self.covered_points & set(points)
if not len(covered_points):
status = "not-covered"
elif len(covered_points) != len(points):
status = "partially-covered"
result[line] = status
return result
def compute_filecoverage(self):
"""Build a filename->pct coverage."""
result = dict()
for filename, fns in self.point_symbol_info.items():
file_points = []
for fn, points in fns.items():
file_points.extend(points.keys())
covered_points = self.covered_points & set(file_points)
result[filename] = int(math.ceil(
len(covered_points) * 100 / len(file_points)))
return result
def format_pct(pct):
pct_str = str(max(0, min(100, pct)))
zeroes = '0' * (3 - len(pct_str))
if zeroes:
zeroes = '<span class="lz">{0}</span>'.format(zeroes)
return zeroes + pct_str
class ServerHandler(http.server.BaseHTTPRequestHandler):
symcov_data = None
src_path = None
def do_GET(self):
norm_path = os.path.normpath(urllib.parse.unquote(self.path[1:]))
if self.path == '/':
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
filelist = []
for filename in sorted(self.symcov_data.filenames()):
file_coverage = self.symcov_data.file_coverage[filename]
if not file_coverage:
continue
filelist.append(
"<tr><td><a href=\"./{name}\">{name}</a></td>"
"<td>{coverage}%</td></tr>".format(
name=html.escape(filename, quote=True),
coverage=format_pct(file_coverage)))
response = string.Template(INDEX_PAGE_TMPL).safe_substitute(
filenames='\n'.join(filelist))
self.wfile.write(response.encode('UTF-8', 'replace'))
elif self.symcov_data.has_file(norm_path):
filename = norm_path
filepath = os.path.join(self.src_path, filename)
if not os.path.exists(filepath):
self.send_response(404)
self.end_headers()
return
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
linemap = self.symcov_data.compute_linemap(filename)
with open(filepath, 'r', encoding='utf8') as f:
content = "\n".join(
["<span class='{cls}'>{line} </span>".format(
line=html.escape(line.rstrip()),
cls=linemap.get(line_no, ""))
for line_no, line in enumerate(f, start=1)])
response = string.Template(CONTENT_PAGE_TMPL).safe_substitute(
path=self.path[1:],
content=content)
self.wfile.write(response.encode('UTF-8', 'replace'))
else:
self.send_response(404)
self.end_headers()
def main():
parser = argparse.ArgumentParser(description="symcov report http server.")
parser.add_argument('--host', default='127.0.0.1')
parser.add_argument('--port', default=8001)
parser.add_argument('--symcov', required=True, type=argparse.FileType('r'))
parser.add_argument('--srcpath', required=True)
args = parser.parse_args()
print("Loading coverage...")
symcov_json = json.load(args.symcov)
ServerHandler.symcov_data = SymcovData(symcov_json)
ServerHandler.src_path = args.srcpath
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer((args.host, args.port), ServerHandler)
print("Serving at {host}:{port}".format(host=args.host, port=args.port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
if __name__ == '__main__':
main()
| MDL-SDK-master | src/mdl/jit/llvm/dist/tools/sancov/coverage-report-server.py |
#!/usr/bin/env python
from __future__ import print_function
desc = '''Generate statistics about optimization records from the YAML files
generated with -fsave-optimization-record and -fdiagnostics-show-hotness.
The tools requires PyYAML and Pygments Python packages.'''
import optrecord
import argparse
import operator
from collections import defaultdict
from multiprocessing import cpu_count, Pool
try:
from guppy import hpy
hp = hpy()
except ImportError:
print("Memory consumption not shown because guppy is not installed")
hp = None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'yaml_dirs_or_files',
nargs='+',
help='List of optimization record files or directories searched '
'for optimization record files.')
parser.add_argument(
'--jobs',
'-j',
default=None,
type=int,
help='Max job count (defaults to %(default)s, the current CPU count)')
parser.add_argument(
'--no-progress-indicator',
'-n',
action='store_true',
default=False,
help='Do not display any indicator of how many YAML files were read.')
args = parser.parse_args()
print_progress = not args.no_progress_indicator
files = optrecord.find_opt_files(*args.yaml_dirs_or_files)
if not files:
parser.error("No *.opt.yaml files found")
sys.exit(1)
all_remarks, file_remarks, _ = optrecord.gather_results(
files, args.jobs, print_progress)
if print_progress:
print('\n')
bypass = defaultdict(int)
byname = defaultdict(int)
for r in optrecord.itervalues(all_remarks):
bypass[r.Pass] += 1
byname[r.Pass + "/" + r.Name] += 1
total = len(all_remarks)
print("{:24s} {:10d}".format("Total number of remarks", total))
if hp:
h = hp.heap()
print("{:24s} {:10d}".format("Memory per remark",
h.size / len(all_remarks)))
print('\n')
print("Top 10 remarks by pass:")
for (passname, count) in sorted(bypass.items(), key=operator.itemgetter(1),
reverse=True)[:10]:
print(" {:30s} {:2.0f}%". format(passname, count * 100. / total))
print("\nTop 10 remarks:")
for (name, count) in sorted(byname.items(), key=operator.itemgetter(1),
reverse=True)[:10]:
print(" {:30s} {:2.0f}%". format(name, count * 100. / total))
| MDL-SDK-master | src/mdl/jit/llvm/dist/tools/opt-viewer/opt-stats.py |
#!/usr/bin/env python
from __future__ import print_function
desc = '''Generate the difference of two YAML files into a new YAML file (works on
pair of directories too). A new attribute 'Added' is set to True or False
depending whether the entry is added or removed from the first input to the
next.
The tools requires PyYAML.'''
import yaml
# Try to use the C parser.
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
import optrecord
import argparse
from collections import defaultdict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=desc)
parser.add_argument(
'yaml_dir_or_file_1',
help='An optimization record file or a directory searched for optimization '
'record files that are used as the old version for the comparison')
parser.add_argument(
'yaml_dir_or_file_2',
help='An optimization record file or a directory searched for optimization '
'record files that are used as the new version for the comparison')
parser.add_argument(
'--jobs',
'-j',
default=None,
type=int,
help='Max job count (defaults to %(default)s, the current CPU count)')
parser.add_argument(
'--max-size',
'-m',
default=100000,
type=int,
help='Maximum number of remarks stored in an output file')
parser.add_argument(
'--no-progress-indicator',
'-n',
action='store_true',
default=False,
help='Do not display any indicator of how many YAML files were read.')
parser.add_argument('--output', '-o', default='diff{}.opt.yaml')
args = parser.parse_args()
files1 = optrecord.find_opt_files(args.yaml_dir_or_file_1)
files2 = optrecord.find_opt_files(args.yaml_dir_or_file_2)
print_progress = not args.no_progress_indicator
all_remarks1, _, _ = optrecord.gather_results(files1, args.jobs, print_progress)
all_remarks2, _, _ = optrecord.gather_results(files2, args.jobs, print_progress)
added = set(all_remarks2.values()) - set(all_remarks1.values())
removed = set(all_remarks1.values()) - set(all_remarks2.values())
for r in added:
r.Added = True
for r in removed:
r.Added = False
result = list(added | removed)
for r in result:
r.recover_yaml_structure()
for i in range(0, len(result), args.max_size):
with open(args.output.format(i / args.max_size), 'w') as stream:
yaml.dump_all(result[i:i + args.max_size], stream)
| MDL-SDK-master | src/mdl/jit/llvm/dist/tools/opt-viewer/opt-diff.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.