python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import json
import math
import os
import numpy as np
def save_embedding_table(numpy_table, dst_dir, offset=0, min_keys=100):
if numpy_table.shape[0] < min_keys:
print(
f"Artificially lengthening embedding table from size: {numpy_table.shape} to size {min_keys}"
)
num_missing_rows = min_keys - numpy_table.shape[0]
padding = np.zeros(
shape=[num_missing_rows, numpy_table.shape[1]], dtype=numpy_table.dtype
)
numpy_table = np.vstack([numpy_table, padding])
keys_table = np.arange(
start=offset, stop=offset + numpy_table.shape[0], dtype=np.int64
)
keys_bytes = keys_table.tobytes()
key_file = os.path.join(dst_dir, "key")
with open(key_file, "wb") as f:
f.write(keys_bytes)
table_bytes = numpy_table.tobytes()
table_file = os.path.join(dst_dir, "emb_vector")
with open(table_file, "wb") as f:
f.write(table_bytes)
_hps_triton_config_template = r"""name: "{model_name}"
backend: "hps"
max_batch_size:{max_batch_size}
input [ {{
name: "KEYS"
data_type: TYPE_INT64
dims: [-1]
}},
{{
name: "NUMKEYS"
data_type: TYPE_INT32
dims: [-1]
}}]
output [ {{
name: "OUTPUT0"
data_type: TYPE_FP32
dims: [-1]
}}]
version_policy: {{
specific:{{versions: {version}}}
}},
instance_group [
{{
count: {engine_count_per_device}
kind : KIND_GPU
gpus : [0]
}}
]
"""
def save_triton_config(
dst_path, model_name, version, max_batch_size, engine_count_per_device
):
config = _hps_triton_config_template.format(
model_name=model_name,
max_batch_size=max_batch_size,
version=version,
engine_count_per_device=engine_count_per_device,
)
print("saving pbtxt HPS config to: ", dst_path)
with open(dst_path, "w") as f:
f.write(config)
print("Wrote HPS Triton config to:", dst_path)
print(f"{model_name} configuration:")
print(config)
def save_json_config(
dst_path,
hps_embedding_dirs,
src_config,
num_gpus,
gpucacheper,
max_batch_size,
model_name,
fused=True,
):
num_cat_features = 1 if fused else len(src_config["categorical_cardinalities"])
if len(hps_embedding_dirs) != num_cat_features:
raise ValueError(
f"Length mismatch between hps_embedding_dirs ({len(hps_embedding_dirs)}) "
f"and num_cat_features ({num_cat_features}), fused={fused}. This should not happen."
)
vecsize_per_table = src_config["embedding_dim"]
max_batch_size_factor = 1
if fused:
vecsize_per_table = [vecsize_per_table[0]]
max_batch_size_factor = len(src_config["categorical_cardinalities"])
hps_embedding_config = {
"supportlonglong": True,
"models": [
{
"model": model_name,
# these directories should contain the "emb_vector" and "keys" files, need to copy them over from the previous location
"sparse_files": hps_embedding_dirs,
"num_of_worker_buffer_in_pool": 3,
"embedding_table_names": [
f"sparse_embedding{i}" for i in range(num_cat_features)
],
"embedding_vecsize_per_table": vecsize_per_table,
# for now, every table uses the same embedding dim
"maxnum_catfeature_query_per_table_per_sample": [
1 for _ in range(num_cat_features)
],
"default_value_for_each_table": [1.0 for _ in range(num_cat_features)],
"deployed_device_list": list(range(num_gpus)),
"max_batch_size": max_batch_size * max_batch_size_factor,
"cache_refresh_percentage_per_iteration": 0.0,
"hit_rate_threshold": 1.0,
"gpucacheper": gpucacheper,
"gpucache": True,
}
],
}
print("saving json config to: ", dst_path)
with open(dst_path, "w") as f:
json.dump(obj=hps_embedding_config, fp=f, indent=4)
def convert_embedding_tables(src_paths, dst, fused):
if fused:
return convert_embedding_tables_fused(src_paths, dst)
else:
return convert_embedding_tables_unfused(src_paths, dst)
def convert_embedding_tables_unfused(src_paths, dst):
hps_embedding_dirs = []
for src_path in src_paths:
table_index = int(src_path.split("_")[-1].split(".")[0])
dst_dir = os.path.join(dst, str(table_index))
print(f"Converting embedding table: {src_path} to {dst_dir}")
print(f"Loading source from {src_path}")
data = np.load(src_path, mmap_mode="r")
os.makedirs(dst_dir, exist_ok=True)
print(f"Saving embedding table to {dst_dir}")
save_embedding_table(numpy_table=data, dst_dir=dst_dir)
hps_embedding_dirs.append(dst_dir)
return hps_embedding_dirs
def convert_embedding_tables_fused(src_paths, dst):
dst_dir = os.path.join(dst, "0")
os.makedirs(dst_dir, exist_ok=True)
current_offset = 0
first_width = None
key_file = os.path.join(dst_dir, "key")
table_file = os.path.join(dst_dir, "emb_vector")
with open(key_file, "wb") as keys_f, open(table_file, "wb") as table_f:
for src_path in src_paths:
print(f"Converting table {src_path}")
data = np.load(src_path, mmap_mode="r")
if first_width is not None and data.shape[1] != first_width:
raise ValueError(
"Attempting to deploy with a fused embedding but not all embeddings have the same dimension."
f"Got embedding dimension: {data.shape[1]}, expected: {first_width}"
)
if first_width is None:
first_width = data.shape[1]
length = data.shape[0]
keys_table = np.arange(
start=current_offset, stop=current_offset + length, dtype=np.int64
)
keys_bytes = keys_table.tobytes()
keys_f.write(keys_bytes)
# write the table in chunks to minimize memory usage
chunk_size = 2**20
num_chunks = math.ceil(length / chunk_size)
for i in range(num_chunks):
begin = i * chunk_size
end = (i + 1) * chunk_size
end = min(end, length)
table_bytes = data[begin:end].tobytes()
table_f.write(table_bytes)
current_offset += length
return [dst_dir]
def deploy_sparse(
src,
dst,
model_name,
max_batch_size,
engine_count_per_device,
gpucacheper,
num_gpus=1,
version="1",
fused=True,
**kwargs
):
print("deploy sparse dst: ", dst)
with open(os.path.join(src, "config.json")) as f:
src_config = json.load(f)
num_cat_features = len(src_config["categorical_cardinalities"])
src_paths = [os.path.join(src, f"feature_{i}.npy") for i in range(num_cat_features)]
hps_embedding_dirs = convert_embedding_tables(
src_paths=src_paths, dst=os.path.join(dst, version), fused=fused
)
save_triton_config(
dst_path=os.path.join(dst, "config.pbtxt"),
model_name=model_name,
version=version,
max_batch_size=max_batch_size,
engine_count_per_device=engine_count_per_device,
)
save_json_config(
dst_path=os.path.join(dst, f"{model_name}.json"),
hps_embedding_dirs=hps_embedding_dirs,
src_config=src_config,
num_gpus=num_gpus,
fused=fused,
gpucacheper=gpucacheper,
max_batch_size=max_batch_size,
model_name=model_name,
)
return len(src_config["categorical_cardinalities"])
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/hps/deploy_sparse.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel ([email protected])
import os
from collections import namedtuple
Tensor = namedtuple("Tensor", ["name", "dtype", "dims"])
_config_template = r'''
name: "{model_name}"
platform: "ensemble"
max_batch_size: {max_batch_size}
input [
{{
name: "EMB_KEY"
data_type: TYPE_INT64
dims: [-1]
}},
{{
name: "EMB_N_KEY"
data_type: TYPE_INT32
dims: [-1]
}},
{{
name: "numerical_features"
data_type: TYPE_FP32
dims: [-1]
}}
]
output [
{{
name: "DENSE_OUTPUT"
data_type: TYPE_FP32
dims: [-1]
}}
]
ensemble_scheduling {{
step [
{{
model_name: "{sparse_model_name}"
model_version: -1
input_map {{
key: "KEYS"
value: "EMB_KEY"
}},
input_map {{
key: "NUMKEYS"
value: "EMB_N_KEY"
}},
output_map {{
key: "OUTPUT0"
value: "LOOKUP_VECTORS"
}}
}},
{{
model_name: "{dense_model_name}"
model_version: -1
input_map {{
key: "args_1"
value: "LOOKUP_VECTORS"
}},
input_map {{
key: "args_0"
value: "numerical_features"
}},
output_map {{
key: "output_1"
value: "DENSE_OUTPUT"
}}
}}
]
}}
'''
def deploy_ensemble(dst, model_name, sparse_model_name, dense_model_name,
num_cat_features, num_numerical_features, max_batch_size, version):
config_str = _config_template.format(model_name=model_name,
sparse_model_name=sparse_model_name,
dense_model_name=dense_model_name,
max_batch_size=max_batch_size)
with open(os.path.join(dst, "config.pbtxt"), "w") as f:
f.write(config_str)
os.mkdir(os.path.join(dst, str(version)))
print("Ensemble configuration:")
print(config_str)
| DeepLearningExamples-master | TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/hps/deploy_ensemble.py |
"""
Script to benchmark model throughput and latency
"""
import os
import numpy as np
from tqdm import tqdm
from timeit import default_timer as timer
import hydra
from omegaconf import DictConfig
import tensorflow as tf
from tensorflow.keras import mixed_precision
from data_generators import tf_data_generator
from utils.general_utils import join_paths, suppress_warnings
from utils.images_utils import postprocess_mask
from models.model import prepare_model
def benchmark_time(cfg: DictConfig):
"""
Output throughput and latency
"""
# suppress TensorFlow and DALI warnings
suppress_warnings()
if cfg.OPTIMIZATION.AMP:
print("Enabling Automatic Mixed Precision(AMP)")
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_global_policy(policy)
if cfg.OPTIMIZATION.XLA:
print("Enabling Accelerated Linear Algebra(XLA)")
tf.config.optimizer.set_jit(True)
# data generator
val_generator = tf_data_generator.DataGenerator(cfg, mode="VAL")
validation_steps = val_generator.__len__()
warmup_steps, bench_steps = 50, 100
if "warmup_steps" in cfg.keys():
warmup_steps = cfg.warmup_steps
if "bench_steps" in cfg.keys():
bench_steps = cfg.bench_steps
validation_steps = min(validation_steps, (warmup_steps + bench_steps))
progress_bar = tqdm(total=validation_steps)
# create model
model = prepare_model(cfg)
# weights model path
checkpoint_path = join_paths(
cfg.WORK_DIR,
cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,
f"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5"
)
assert os.path.exists(checkpoint_path), \
f"Model weight's file does not exist at \n{checkpoint_path}"
# load model weights
model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)
# model.summary()
time_taken = []
# for each batch
for i, (batch_images, batch_mask) in enumerate(val_generator):
start_time = timer()
# make prediction on batch
batch_predictions = model.predict_on_batch(batch_images)
if len(model.outputs) > 1:
batch_predictions = batch_predictions[0]
# do postprocessing on predicted mask
batch_predictions = postprocess_mask(batch_predictions, cfg.OUTPUT.CLASSES)
time_taken.append(timer() - start_time)
progress_bar.update(1)
if i >= validation_steps:
break
progress_bar.close()
mean_time = np.mean(time_taken[warmup_steps:]) # skipping warmup_steps
throughput = (cfg.HYPER_PARAMETERS.BATCH_SIZE / mean_time)
print(f"Latency: {round(mean_time * 1e3, 2)} msec")
print(f"Throughput/FPS: {round(throughput, 2)} samples/sec")
@hydra.main(version_base=None, config_path="configs", config_name="config")
def main(cfg: DictConfig):
"""
Read config file and pass to benchmark_time method
"""
benchmark_time(cfg)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/benchmark_inference.py |
"""
Prediction script used to visualize model output
"""
import os
import hydra
from omegaconf import DictConfig
from data_generators import tf_data_generator
from utils.general_utils import join_paths, suppress_warnings
from utils.images_utils import display
from utils.images_utils import postprocess_mask, denormalize_mask
from models.model import prepare_model
def predict(cfg: DictConfig):
"""
Predict and visualize given data
"""
# suppress TensorFlow and DALI warnings
suppress_warnings()
# set batch size to one
cfg.HYPER_PARAMETERS.BATCH_SIZE = 1
# data generator
val_generator = tf_data_generator.DataGenerator(cfg, mode="VAL")
# create model
model = prepare_model(cfg)
# weights model path
checkpoint_path = join_paths(
cfg.WORK_DIR,
cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,
f"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5"
)
assert os.path.exists(checkpoint_path), \
f"Model weight's file does not exist at \n{checkpoint_path}"
# load model weights
model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)
# model.summary()
# check mask are available or not
mask_available = True
if cfg.DATASET.VAL.MASK_PATH is None or \
str(cfg.DATASET.VAL.MASK_PATH).lower() == "none":
mask_available = False
showed_images = 0
for batch_data in val_generator: # for each batch
batch_images = batch_data[0]
if mask_available:
batch_mask = batch_data[1]
# make prediction on batch
batch_predictions = model.predict_on_batch(batch_images)
if len(model.outputs) > 1:
batch_predictions = batch_predictions[0]
for index in range(len(batch_images)):
image = batch_images[index] # for each image
if cfg.SHOW_CENTER_CHANNEL_IMAGE:
# for UNet3+ show only center channel as image
image = image[:, :, 1]
# do postprocessing on predicted mask
prediction = batch_predictions[index]
prediction = postprocess_mask(prediction, cfg.OUTPUT.CLASSES)
# denormalize mask for better visualization
prediction = denormalize_mask(prediction, cfg.OUTPUT.CLASSES)
if mask_available:
mask = batch_mask[index]
mask = postprocess_mask(mask, cfg.OUTPUT.CLASSES)
mask = denormalize_mask(mask, cfg.OUTPUT.CLASSES)
# if np.unique(mask).shape[0] == 2:
if mask_available:
display([image, mask, prediction], show_true_mask=True)
else:
display([image, prediction], show_true_mask=False)
showed_images += 1
# stop after displaying below number of images
# if showed_images >= 10: break
@hydra.main(version_base=None, config_path="configs", config_name="config")
def main(cfg: DictConfig):
"""
Read config file and pass to prediction method
"""
predict(cfg)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/predict.py |
"""
Training script
"""
import numpy as np
from datetime import datetime, timedelta
import hydra
from omegaconf import DictConfig
import tensorflow as tf
from tensorflow.keras import mixed_precision
from tensorflow.keras.callbacks import (
EarlyStopping,
ModelCheckpoint,
TensorBoard,
CSVLogger
)
from data_generators import data_generator
from data_preparation.verify_data import verify_data
from utils.general_utils import create_directory, join_paths, set_gpus, \
suppress_warnings
from models.model import prepare_model
from losses.loss import DiceCoefficient
from losses.unet_loss import unet3p_hybrid_loss
from callbacks.timing_callback import TimingCallback
def create_training_folders(cfg: DictConfig):
"""
Create directories to store Model CheckPoint and TensorBoard logs.
"""
create_directory(
join_paths(
cfg.WORK_DIR,
cfg.CALLBACKS.MODEL_CHECKPOINT.PATH
)
)
create_directory(
join_paths(
cfg.WORK_DIR,
cfg.CALLBACKS.TENSORBOARD.PATH
)
)
def train(cfg: DictConfig):
"""
Training method
"""
# suppress TensorFlow and DALI warnings
suppress_warnings()
print("Verifying data ...")
verify_data(cfg)
if cfg.MODEL.TYPE == "unet3plus_deepsup_cgm":
raise ValueError(
"UNet3+ with Deep Supervision and Classification Guided Module"
"\nModel exist but training script is not supported for this variant"
"please choose other variants from config file"
)
if cfg.USE_MULTI_GPUS.VALUE:
# change number of visible gpus for training
set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)
# update batch size according to available gpus
data_generator.update_batch_size(cfg)
# create folders to store training checkpoints and logs
create_training_folders(cfg)
if cfg.OPTIMIZATION.AMP:
print("Enabling Automatic Mixed Precision(AMP) training")
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_global_policy(policy)
if cfg.OPTIMIZATION.XLA:
print("Enabling Accelerated Linear Algebra(XLA) training")
tf.config.optimizer.set_jit(True)
# create model
strategy = None
if cfg.USE_MULTI_GPUS.VALUE:
# multi gpu training using tensorflow mirrored strategy
strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()
)
print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
optimizer = tf.keras.optimizers.Adam(
learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE
) # optimizer
if cfg.OPTIMIZATION.AMP:
optimizer = mixed_precision.LossScaleOptimizer(
optimizer,
dynamic=True
)
dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)
dice_coef = tf.keras.metrics.MeanMetricWrapper(name="dice_coef", fn=dice_coef)
model = prepare_model(cfg, training=True)
else:
optimizer = tf.keras.optimizers.Adam(
learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE
) # optimizer
if cfg.OPTIMIZATION.AMP:
optimizer = mixed_precision.LossScaleOptimizer(
optimizer,
dynamic=True
)
dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)
dice_coef = tf.keras.metrics.MeanMetricWrapper(name="dice_coef", fn=dice_coef)
model = prepare_model(cfg, training=True)
model.compile(
optimizer=optimizer,
loss=unet3p_hybrid_loss,
metrics=[dice_coef],
)
model.summary()
# data generators
train_generator = data_generator.get_data_generator(cfg, "TRAIN", strategy)
val_generator = data_generator.get_data_generator(cfg, "VAL", strategy)
# verify generator
# for i, (batch_images, batch_mask) in enumerate(val_generator):
# print(len(batch_images))
# if i >= 3: break
# the tensorboard log directory will be a unique subdirectory
# based on the start time for the run
tb_log_dir = join_paths(
cfg.WORK_DIR,
cfg.CALLBACKS.TENSORBOARD.PATH,
"{}".format(datetime.now().strftime("%Y.%m.%d.%H.%M.%S"))
)
print("TensorBoard directory\n" + tb_log_dir)
checkpoint_path = join_paths(
cfg.WORK_DIR,
cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,
f"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5"
)
print("Weights path\n" + checkpoint_path)
csv_log_path = join_paths(
cfg.WORK_DIR,
cfg.CALLBACKS.CSV_LOGGER.PATH,
f"training_logs_{cfg.MODEL.TYPE}.csv"
)
print("Logs path\n" + csv_log_path)
# evaluation metric
evaluation_metric = "val_dice_coef"
if len(model.outputs) > 1:
evaluation_metric = f"val_{model.output_names[0]}_dice_coef"
# Timing, TensorBoard, EarlyStopping, ModelCheckpoint, CSVLogger callbacks
timing_callback = TimingCallback()
callbacks = [
TensorBoard(log_dir=tb_log_dir, write_graph=False, profile_batch=0),
EarlyStopping(
patience=cfg.CALLBACKS.EARLY_STOPPING.PATIENCE,
verbose=cfg.VERBOSE
),
ModelCheckpoint(
checkpoint_path,
verbose=cfg.VERBOSE,
save_weights_only=cfg.CALLBACKS.MODEL_CHECKPOINT.SAVE_WEIGHTS_ONLY,
save_best_only=cfg.CALLBACKS.MODEL_CHECKPOINT.SAVE_BEST_ONLY,
monitor=evaluation_metric,
mode="max"
),
CSVLogger(
csv_log_path,
append=cfg.CALLBACKS.CSV_LOGGER.APPEND_LOGS
),
timing_callback
]
training_steps = data_generator.get_iterations(cfg, mode="TRAIN")
validation_steps = data_generator.get_iterations(cfg, mode="VAL")
# start training
model.fit(
x=train_generator,
steps_per_epoch=training_steps,
validation_data=val_generator,
validation_steps=validation_steps,
epochs=cfg.HYPER_PARAMETERS.EPOCHS,
callbacks=callbacks,
workers=cfg.DATALOADER_WORKERS,
)
training_time = timing_callback.train_end_time - timing_callback.train_start_time
training_time = timedelta(seconds=training_time)
print(f"Total training time {training_time}")
mean_time = np.mean(timing_callback.batch_time)
throughput = data_generator.get_batch_size(cfg) / mean_time
print(f"Training latency: {round(mean_time * 1e3, 2)} msec")
print(f"Training throughput/FPS: {round(throughput, 2)} samples/sec")
@hydra.main(version_base=None, config_path="configs", config_name="config")
def main(cfg: DictConfig):
"""
Read config file and pass to train method for training
"""
train(cfg)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/train.py |
"""
Evaluation script used to calculate accuracy of trained model
"""
import os
import hydra
from omegaconf import DictConfig
import tensorflow as tf
from tensorflow.keras import mixed_precision
from data_generators import data_generator
from utils.general_utils import join_paths, set_gpus, suppress_warnings
from models.model import prepare_model
from losses.loss import DiceCoefficient
from losses.unet_loss import unet3p_hybrid_loss
def evaluate(cfg: DictConfig):
"""
Evaluate or calculate accuracy of given model
"""
# suppress TensorFlow and DALI warnings
suppress_warnings()
if cfg.USE_MULTI_GPUS.VALUE:
# change number of visible gpus for evaluation
set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)
# update batch size according to available gpus
data_generator.update_batch_size(cfg)
if cfg.OPTIMIZATION.AMP:
print("Enabling Automatic Mixed Precision(AMP) training")
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_global_policy(policy)
if cfg.OPTIMIZATION.XLA:
print("Enabling Automatic Mixed Precision(XLA) training")
tf.config.optimizer.set_jit(True)
# create model
strategy = None
if cfg.USE_MULTI_GPUS.VALUE:
# multi gpu training using tensorflow mirrored strategy
strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()
)
print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
optimizer = tf.keras.optimizers.Adam(
learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE
) # optimizer
if cfg.OPTIMIZATION.AMP:
optimizer = mixed_precision.LossScaleOptimizer(
optimizer,
dynamic=True
)
dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)
dice_coef = tf.keras.metrics.MeanMetricWrapper(name="dice_coef", fn=dice_coef)
model = prepare_model(cfg, training=True)
else:
optimizer = tf.keras.optimizers.Adam(
learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE
) # optimizer
if cfg.OPTIMIZATION.AMP:
optimizer = mixed_precision.LossScaleOptimizer(
optimizer,
dynamic=True
)
dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)
dice_coef = tf.keras.metrics.MeanMetricWrapper(name="dice_coef", fn=dice_coef)
model = prepare_model(cfg, training=True)
model.compile(
optimizer=optimizer,
loss=unet3p_hybrid_loss,
metrics=[dice_coef],
)
# weights model path
checkpoint_path = join_paths(
cfg.WORK_DIR,
cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,
f"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5"
)
assert os.path.exists(checkpoint_path), \
f"Model weight's file does not exist at \n{checkpoint_path}"
# TODO: verify without augment it produces same results
# load model weights
model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)
model.summary()
# data generators
val_generator = data_generator.get_data_generator(cfg, "VAL", strategy)
validation_steps = data_generator.get_iterations(cfg, mode="VAL")
# evaluation metric
evaluation_metric = "dice_coef"
if len(model.outputs) > 1:
evaluation_metric = f"{model.output_names[0]}_dice_coef"
result = model.evaluate(
x=val_generator,
steps=validation_steps,
workers=cfg.DATALOADER_WORKERS,
return_dict=True,
)
# return computed loss, validation accuracy, and it's metric name
return result, evaluation_metric
@hydra.main(version_base=None, config_path="configs", config_name="config")
def main(cfg: DictConfig):
"""
Read config file and pass to evaluate method
"""
result, evaluation_metric = evaluate(cfg)
print(result)
print(f"Validation dice coefficient: {result[evaluation_metric]}")
if __name__ == "__main__":
main()
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/evaluate.py |
"""
Implementation of different loss functions
"""
import tensorflow as tf
import tensorflow.keras.backend as K
def iou(y_true, y_pred, smooth=1.e-9):
"""
Calculate intersection over union (IoU) between images.
Input shape should be Batch x Height x Width x #Classes (BxHxWxN).
Using Mean as reduction type for batch values.
"""
intersection = K.sum(K.abs(y_true * y_pred), axis=[1, 2, 3])
union = K.sum(y_true, [1, 2, 3]) + K.sum(y_pred, [1, 2, 3])
union = union - intersection
iou = K.mean((intersection + smooth) / (union + smooth), axis=0)
return iou
def iou_loss(y_true, y_pred):
"""
Jaccard / IoU loss
"""
return 1 - iou(y_true, y_pred)
def focal_loss(y_true, y_pred):
"""
Focal loss
"""
gamma = 2.
alpha = 4.
epsilon = 1.e-9
y_true_c = tf.convert_to_tensor(y_true, tf.float32)
y_pred_c = tf.convert_to_tensor(y_pred, tf.float32)
model_out = tf.add(y_pred_c, epsilon)
ce = tf.multiply(y_true_c, -tf.math.log(model_out))
weight = tf.multiply(y_true_c, tf.pow(
tf.subtract(1., model_out), gamma)
)
fl = tf.multiply(alpha, tf.multiply(weight, ce))
reduced_fl = tf.reduce_max(fl, axis=-1)
return tf.reduce_mean(reduced_fl)
def ssim_loss(y_true, y_pred, smooth=1.e-9):
"""
Structural Similarity Index loss.
Input shape should be Batch x Height x Width x #Classes (BxHxWxN).
Using Mean as reduction type for batch values.
"""
ssim_value = tf.image.ssim(y_true, y_pred, max_val=1)
return K.mean(1 - ssim_value + smooth, axis=0)
class DiceCoefficient(tf.keras.metrics.Metric):
"""
Dice coefficient metric. Can be used to calculate dice on probabilities
or on their respective classes
"""
def __init__(self, post_processed: bool,
classes: int,
name='dice_coef',
**kwargs):
"""
Set post_processed=False if dice coefficient needs to be calculated
on probabilities. Set post_processed=True if probabilities needs to
be first converted/mapped into their respective class.
"""
super(DiceCoefficient, self).__init__(name=name, **kwargs)
self.dice_value = self.add_weight(name='dice_value', initializer='zeros',
aggregation=tf.VariableAggregation.MEAN) # SUM
self.post_processed = post_processed
self.classes = classes
if self.classes == 1:
self.axis = [1, 2, 3]
else:
self.axis = [1, 2, ]
def update_state(self, y_true, y_pred, sample_weight=None):
if self.post_processed:
if self.classes == 1:
y_true_ = y_true
y_pred_ = tf.where(y_pred > .5, 1.0, 0.0)
else:
y_true_ = tf.math.argmax(y_true, axis=-1, output_type=tf.int32)
y_pred_ = tf.math.argmax(y_pred, axis=-1, output_type=tf.int32)
y_true_ = tf.cast(y_true_, dtype=tf.float32)
y_pred_ = tf.cast(y_pred_, dtype=tf.float32)
else:
y_true_, y_pred_ = y_true, y_pred
self.dice_value.assign(self.dice_coef(y_true_, y_pred_))
def result(self):
return self.dice_value
def reset_state(self):
self.dice_value.assign(0.0) # reset metric state
def dice_coef(self, y_true, y_pred, smooth=1.e-9):
"""
Calculate dice coefficient.
Input shape could be either Batch x Height x Width x #Classes (BxHxWxN)
or Batch x Height x Width (BxHxW).
Using Mean as reduction type for batch values.
"""
intersection = K.sum(y_true * y_pred, axis=self.axis)
union = K.sum(y_true, axis=self.axis) + K.sum(y_pred, axis=self.axis)
return K.mean((2. * intersection + smooth) / (union + smooth), axis=0)
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/losses/loss.py |
"""
UNet 3+ Loss
"""
from .loss import focal_loss, ssim_loss, iou_loss
def unet3p_hybrid_loss(y_true, y_pred):
"""
Hybrid loss proposed in
UNET 3+ (https://arxiv.org/ftp/arxiv/papers/2004/2004.08790.pdf)
Hybrid loss for segmentation in three-level hierarchy – pixel,
patch and map-level, which is able to capture both large-scale
and fine structures with clear boundaries.
"""
f_loss = focal_loss(y_true, y_pred)
ms_ssim_loss = ssim_loss(y_true, y_pred)
jacard_loss = iou_loss(y_true, y_pred)
return f_loss + ms_ssim_loss + jacard_loss
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/losses/unet_loss.py |
import sys
from timeit import default_timer as timer
import tensorflow as tf
class TimingCallback(tf.keras.callbacks.Callback):
"""
Custom callback to note training time, latency and throughput
"""
def __init__(self, ):
super(TimingCallback, self).__init__()
self.train_start_time = None
self.train_end_time = None
self.batch_time = []
self.batch_start_time = None
def on_train_begin(self, logs: dict):
tf.print("Training starting time noted.", output_stream=sys.stdout)
self.train_start_time = timer()
def on_train_end(self, logs: dict):
tf.print("Training ending time noted.", output_stream=sys.stdout)
self.train_end_time = timer()
def on_train_batch_begin(self, batch: int, logs: dict):
self.batch_start_time = timer()
def on_train_batch_end(self, batch: int, logs: dict):
self.batch_time.append(timer() - self.batch_start_time)
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/callbacks/timing_callback.py |
"""
Verify for each image corresponding mask exist or not.
Check against both train and val data
"""
import os
import sys
from omegaconf import DictConfig
from tqdm import tqdm
sys.path.append(os.path.abspath("./"))
from utils.general_utils import join_paths
from utils.images_utils import image_to_mask_name
def check_image_and_mask(cfg, mode):
"""
Check and print names of those images whose mask are not found.
"""
images_path = join_paths(
cfg.WORK_DIR,
cfg.DATASET[mode].IMAGES_PATH
)
mask_path = join_paths(
cfg.WORK_DIR,
cfg.DATASET[mode].MASK_PATH
)
all_images = os.listdir(images_path)
both_found = True
for image in tqdm(all_images):
mask_name = image_to_mask_name(image)
if not (
os.path.exists(
join_paths(images_path, image)
) and
os.path.exists(
join_paths(mask_path, mask_name)
)
):
print(f"{mask_name} did not found against {image}")
both_found = False
return both_found
def verify_data(cfg: DictConfig):
"""
For both train and val data, check for each image its
corresponding mask exist or not. If not then stop the program.
"""
assert check_image_and_mask(cfg, "TRAIN"), \
"Train images and mask should be same in length"
assert check_image_and_mask(cfg, "VAL"), \
"Validation images and mask should be same in length"
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/data_preparation/verify_data.py |
"""
Convert LiTS 2017 (Liver Tumor Segmentation) data into UNet3+ data format
LiTS: https://competitions.codalab.org/competitions/17094
"""
import os
import sys
from glob import glob
from pathlib import Path
from tqdm import tqdm
import numpy as np
import multiprocessing as mp
import cv2
import nibabel as nib
import hydra
from omegaconf import DictConfig
sys.path.append(os.path.abspath("./"))
from utils.general_utils import create_directory, join_paths
from utils.images_utils import resize_image
def read_nii(filepath):
"""
Reads .nii file and returns pixel array
"""
ct_scan = nib.load(filepath).get_fdata()
# TODO: Verify images orientation
# in both train and test set, especially on train scan 130
ct_scan = np.rot90(np.array(ct_scan))
return ct_scan
def crop_center(img, croph, cropw):
"""
Center crop on given height and width
"""
height, width = img.shape[:2]
starth = height // 2 - (croph // 2)
startw = width // 2 - (cropw // 2)
return img[starth:starth + croph, startw:startw + cropw, :]
def linear_scale(img):
"""
First convert image to range of 0-1 and them scale to 255
"""
img = (img - img.min(axis=(0, 1))) / (img.max(axis=(0, 1)) - img.min(axis=(0, 1)))
return img * 255
def clip_scan(img, min_value, max_value):
"""
Clip scan to given range
"""
return np.clip(img, min_value, max_value)
def resize_scan(scan, new_height, new_width, scan_type):
"""
Resize CT scan to given size
"""
scan_shape = scan.shape
resized_scan = np.zeros((new_height, new_width, scan_shape[2]), dtype=scan.dtype)
resize_method = cv2.INTER_CUBIC if scan_type == "image" else cv2.INTER_NEAREST
for start in range(0, scan_shape[2], scan_shape[1]):
end = start + scan_shape[1]
if end >= scan_shape[2]: end = scan_shape[2]
resized_scan[:, :, start:end] = resize_image(
scan[:, :, start:end],
new_height, new_width,
resize_method
)
return resized_scan
def save_images(scan, save_path, img_index):
"""
Based on UNet3+ requirement "input image had three channels, including
the slice to be segmented and the upper and lower slices, which was
cropped to 320×320" save each scan as separate image with previous and
next scan concatenated.
"""
scan_shape = scan.shape
for index in range(scan_shape[-1]):
before_index = index - 1 if (index - 1) > 0 else 0
after_index = index + 1 if (index + 1) < scan_shape[-1] else scan_shape[-1] - 1
new_img_path = join_paths(save_path, f"image_{img_index}_{index}.png")
new_image = np.stack(
(
scan[:, :, before_index],
scan[:, :, index],
scan[:, :, after_index]
)
, axis=-1)
new_image = cv2.cvtColor(new_image, cv2.COLOR_RGB2BGR) # RGB to BGR
cv2.imwrite(new_img_path, new_image) # save the images as .png
def save_mask(scan, save_path, mask_index):
"""
Save each scan as separate mask
"""
for index in range(scan.shape[-1]):
new_mask_path = join_paths(save_path, f"mask_{mask_index}_{index}.png")
cv2.imwrite(new_mask_path, scan[:, :, index]) # save grey scale image
def extract_image(cfg, image_path, save_path, scan_type="image", ):
"""
Extract image from given scan path
"""
_, index = str(Path(image_path).stem).split("-")
scan = read_nii(image_path)
scan = resize_scan(
scan,
cfg.DATA_PREPARATION.RESIZED_HEIGHT,
cfg.DATA_PREPARATION.RESIZED_WIDTH,
scan_type
)
if scan_type == "image":
scan = clip_scan(
scan,
cfg.DATA_PREPARATION.SCAN_MIN_VALUE,
cfg.DATA_PREPARATION.SCAN_MAX_VALUE
)
scan = linear_scale(scan)
scan = np.uint8(scan)
save_images(scan, save_path, index)
else:
# 0 for background/non-lesion, 1 for liver, 2 for lesion/tumor
# merging label 2 into label 1, because lesion/tumor is part of liver
scan = np.where(scan != 0, 1, scan)
# scan = np.where(scan==2, 1, scan)
scan = np.uint8(scan)
save_mask(scan, save_path, index)
def extract_images(cfg, images_path, save_path, scan_type="image", ):
"""
Extract images paths using multiprocessing and pass to
extract_image function for further processing .
"""
# create pool
process_count = np.clip(mp.cpu_count() - 2, 1, 20) # less than 20 workers
pool = mp.Pool(process_count)
for image_path in tqdm(images_path):
pool.apply_async(extract_image,
args=(cfg, image_path, save_path, scan_type),
)
# close pool
pool.close()
pool.join()
@hydra.main(version_base=None, config_path="../configs", config_name="config")
def preprocess_lits_data(cfg: DictConfig):
"""
Preprocess LiTS 2017 (Liver Tumor Segmentation) data by extractions
images and mask into UNet3+ data format
"""
train_images_names = glob(
join_paths(
cfg.WORK_DIR,
cfg.DATA_PREPARATION.SCANS_TRAIN_DATA_PATH,
"volume-*.nii"
)
)
train_mask_names = glob(
join_paths(
cfg.WORK_DIR,
cfg.DATA_PREPARATION.SCANS_TRAIN_DATA_PATH,
"segmentation-*.nii"
)
)
assert len(train_images_names) == len(train_mask_names), \
"Train volumes and segmentations are not same in length"
val_images_names = glob(
join_paths(
cfg.WORK_DIR,
cfg.DATA_PREPARATION.SCANS_VAL_DATA_PATH,
"volume-*.nii"
)
)
val_mask_names = glob(
join_paths(
cfg.WORK_DIR,
cfg.DATA_PREPARATION.SCANS_VAL_DATA_PATH,
"segmentation-*.nii"
)
)
assert len(val_images_names) == len(val_mask_names), \
"Validation volumes and segmentations are not same in length"
train_images_names = sorted(train_images_names)
train_mask_names = sorted(train_mask_names)
val_images_names = sorted(val_images_names)
val_mask_names = sorted(val_mask_names)
train_images_path = join_paths(
cfg.WORK_DIR, cfg.DATASET.TRAIN.IMAGES_PATH
)
train_mask_path = join_paths(
cfg.WORK_DIR, cfg.DATASET.TRAIN.MASK_PATH
)
val_images_path = join_paths(
cfg.WORK_DIR, cfg.DATASET.VAL.IMAGES_PATH
)
val_mask_path = join_paths(
cfg.WORK_DIR, cfg.DATASET.VAL.MASK_PATH
)
create_directory(train_images_path)
create_directory(train_mask_path)
create_directory(val_images_path)
create_directory(val_mask_path)
print("\nExtracting train images")
extract_images(
cfg, train_images_names, train_images_path, scan_type="image"
)
print("\nExtracting train mask")
extract_images(
cfg, train_mask_names, train_mask_path, scan_type="mask"
)
print("\nExtracting val images")
extract_images(
cfg, val_images_names, val_images_path, scan_type="image"
)
print("\nExtracting val mask")
extract_images(
cfg, val_mask_names, val_mask_path, scan_type="mask"
)
if __name__ == '__main__':
preprocess_lits_data()
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/data_preparation/preprocess_data.py |
"""
General Utility functions
"""
import os
import tensorflow as tf
from omegaconf import DictConfig
from .images_utils import image_to_mask_name
def create_directory(path):
"""
Create Directory if it already does not exist.
"""
if not os.path.exists(path):
os.makedirs(path)
def join_paths(*paths):
"""
Concatenate multiple paths.
"""
return os.path.normpath(os.path.sep.join(path.rstrip(r"\/") for path in paths))
def set_gpus(gpu_ids):
"""
Change number of visible gpus for tensorflow.
gpu_ids: Could be integer or list of integers.
In case Integer: if integer value is -1 then use all available gpus.
otherwise if positive number, then use given number of gpus.
In case list of Integer: each integer will be considered as gpu id
"""
all_gpus = tf.config.experimental.list_physical_devices('GPU')
all_gpus_length = len(all_gpus)
if isinstance(gpu_ids, int):
if gpu_ids == -1:
gpu_ids = range(all_gpus_length)
else:
gpu_ids = min(gpu_ids, all_gpus_length)
gpu_ids = range(gpu_ids)
selected_gpus = [all_gpus[gpu_id] for gpu_id in gpu_ids if gpu_id < all_gpus_length]
try:
tf.config.experimental.set_visible_devices(selected_gpus, 'GPU')
except RuntimeError as e:
# Visible devices must be set at program startup
print(e)
def get_gpus_count():
"""
Return length of available gpus.
"""
return len(tf.config.experimental.list_logical_devices('GPU'))
def get_data_paths(cfg: DictConfig, mode: str, mask_available: bool):
"""
Return list of absolute images/mask paths.
There are two options you can either pass directory path or list.
In case of directory, it should contain relative path of images/mask
folder from project root path.
In case of list of images, every element should contain absolute path
for each image and mask.
For prediction, you can set mask path to None if mask are not
available for visualization.
"""
# read images from directory
if isinstance(cfg.DATASET[mode].IMAGES_PATH, str):
# has only images name not full path
images_paths = os.listdir(
join_paths(
cfg.WORK_DIR,
cfg.DATASET[mode].IMAGES_PATH
)
)
if mask_available:
mask_paths = [
image_to_mask_name(image_name) for image_name in images_paths
]
# create full mask paths from folder
mask_paths = [
join_paths(
cfg.WORK_DIR,
cfg.DATASET[mode].MASK_PATH,
mask_name
) for mask_name in mask_paths
]
# create full images paths from folder
images_paths = [
join_paths(
cfg.WORK_DIR,
cfg.DATASET[mode].IMAGES_PATH,
image_name
) for image_name in images_paths
]
else:
# read images and mask from absolute paths given in list
images_paths = list(cfg.DATASET[mode].IMAGES_PATH)
if mask_available:
mask_paths = list(cfg.DATASET[mode].MASK_PATH)
if mask_available:
return images_paths, mask_paths
else:
return images_paths,
def suppress_warnings():
"""
Suppress TensorFlow warnings.
"""
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
logging.getLogger('dali').setLevel(logging.ERROR)
os.environ["KMP_AFFINITY"] = "noverbose"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
tf.autograph.set_verbosity(3)
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/utils/general_utils.py |
"""
Utility functions for image processing
"""
import numpy as np
import cv2
from omegaconf import DictConfig
import matplotlib.pyplot as plt
def read_image(img_path, color_mode):
"""
Read and return image as np array from given path.
In case of color image, it returns image in BGR mode.
"""
return cv2.imread(img_path, color_mode)
def resize_image(img, height, width, resize_method=cv2.INTER_CUBIC):
"""
Resize image
"""
return cv2.resize(img, dsize=(width, height), interpolation=resize_method)
def prepare_image(path: str, resize: DictConfig, normalize_type: str):
"""
Prepare image for model.
read image --> resize --> normalize --> return as float32
"""
image = read_image(path, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if resize.VALUE:
# TODO verify image resizing method
image = resize_image(image, resize.HEIGHT, resize.WIDTH, cv2.INTER_AREA)
if normalize_type == "normalize":
image = image / 255.0
image = image.astype(np.float32)
return image
def prepare_mask(path: str, resize: dict, normalize_mask: dict):
"""
Prepare mask for model.
read mask --> resize --> normalize --> return as int32
"""
mask = read_image(path, cv2.IMREAD_GRAYSCALE)
if resize.VALUE:
mask = resize_image(mask, resize.HEIGHT, resize.WIDTH, cv2.INTER_NEAREST)
if normalize_mask.VALUE:
mask = mask / normalize_mask.NORMALIZE_VALUE
mask = mask.astype(np.int32)
return mask
def image_to_mask_name(image_name: str):
"""
Convert image file name to it's corresponding mask file name e.g.
image name --> mask name
image_28_0.png mask_28_0.png
replace image with mask
"""
return image_name.replace('image', 'mask')
def postprocess_mask(mask, classes, output_type=np.int32):
"""
Post process model output.
Covert probabilities into indexes based on maximum value.
"""
if classes == 1:
mask = np.where(mask > .5, 1.0, 0.0)
else:
mask = np.argmax(mask, axis=-1)
return mask.astype(output_type)
def denormalize_mask(mask, classes):
"""
Denormalize mask by multiplying each class with higher
integer (255 / classes) for better visualization.
"""
mask = mask * (255 / classes)
return mask.astype(np.int32)
def display(display_list, show_true_mask=False):
"""
Show list of images. it could be
either [image, true_mask, predicted_mask] or [image, predicted_mask].
Set show_true_mask to True if true mask is available or vice versa
"""
if show_true_mask:
title_list = ('Input Image', 'True Mask', 'Predicted Mask')
plt.figure(figsize=(12, 4))
else:
title_list = ('Input Image', 'Predicted Mask')
plt.figure(figsize=(8, 4))
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
if title_list is not None:
plt.title(title_list[i])
if len(np.squeeze(display_list[i]).shape) == 2:
plt.imshow(np.squeeze(display_list[i]), cmap='gray')
plt.axis('on')
else:
plt.imshow(np.squeeze(display_list[i]))
plt.axis('on')
plt.show()
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/utils/images_utils.py |
"""
UNet3+ base model
"""
import tensorflow as tf
import tensorflow.keras as k
from .unet3plus_utils import conv_block
def unet3plus(encoder_layer, output_channels, filters):
""" UNet3+ base model """
""" Encoder """
e1 = encoder_layer[0]
e2 = encoder_layer[1]
e3 = encoder_layer[2]
e4 = encoder_layer[3]
e5 = encoder_layer[4]
""" Decoder """
cat_channels = filters[0]
cat_blocks = len(filters)
upsample_channels = cat_blocks * cat_channels
""" d4 """
e1_d4 = k.layers.MaxPool2D(pool_size=(8, 8))(e1) # 320*320*64 --> 40*40*64
e1_d4 = conv_block(e1_d4, cat_channels, n=1) # 320*320*64 --> 40*40*64
e2_d4 = k.layers.MaxPool2D(pool_size=(4, 4))(e2) # 160*160*128 --> 40*40*128
e2_d4 = conv_block(e2_d4, cat_channels, n=1) # 160*160*128 --> 40*40*64
e3_d4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 80*80*256 --> 40*40*256
e3_d4 = conv_block(e3_d4, cat_channels, n=1) # 80*80*256 --> 40*40*64
e4_d4 = conv_block(e4, cat_channels, n=1) # 40*40*512 --> 40*40*64
e5_d4 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(e5) # 80*80*256 --> 40*40*256
e5_d4 = conv_block(e5_d4, cat_channels, n=1) # 20*20*1024 --> 20*20*64
d4 = k.layers.concatenate([e1_d4, e2_d4, e3_d4, e4_d4, e5_d4])
d4 = conv_block(d4, upsample_channels, n=1) # 40*40*320 --> 40*40*320
""" d3 """
e1_d3 = k.layers.MaxPool2D(pool_size=(4, 4))(e1) # 320*320*64 --> 80*80*64
e1_d3 = conv_block(e1_d3, cat_channels, n=1) # 80*80*64 --> 80*80*64
e2_d3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 160*160*256 --> 80*80*256
e2_d3 = conv_block(e2_d3, cat_channels, n=1) # 80*80*256 --> 80*80*64
e3_d3 = conv_block(e3, cat_channels, n=1) # 80*80*512 --> 80*80*64
e4_d3 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d4) # 40*40*320 --> 80*80*320
e4_d3 = conv_block(e4_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64
e5_d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(e5) # 20*20*320 --> 80*80*320
e5_d3 = conv_block(e5_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64
d3 = k.layers.concatenate([e1_d3, e2_d3, e3_d3, e4_d3, e5_d3])
d3 = conv_block(d3, upsample_channels, n=1) # 80*80*320 --> 80*80*320
""" d2 """
e1_d2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 320*320*64 --> 160*160*64
e1_d2 = conv_block(e1_d2, cat_channels, n=1) # 160*160*64 --> 160*160*64
e2_d2 = conv_block(e2, cat_channels, n=1) # 160*160*256 --> 160*160*64
d3_d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d3) # 80*80*320 --> 160*160*320
d3_d2 = conv_block(d3_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
d4_d2 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d4) # 40*40*320 --> 160*160*320
d4_d2 = conv_block(d4_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
e5_d2 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(e5) # 20*20*320 --> 160*160*320
e5_d2 = conv_block(e5_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
d2 = k.layers.concatenate([e1_d2, e2_d2, d3_d2, d4_d2, e5_d2])
d2 = conv_block(d2, upsample_channels, n=1) # 160*160*320 --> 160*160*320
""" d1 """
e1_d1 = conv_block(e1, cat_channels, n=1) # 320*320*64 --> 320*320*64
d2_d1 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) # 160*160*320 --> 320*320*320
d2_d1 = conv_block(d2_d1, cat_channels, n=1) # 160*160*320 --> 160*160*64
d3_d1 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) # 80*80*320 --> 320*320*320
d3_d1 = conv_block(d3_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
d4_d1 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) # 40*40*320 --> 320*320*320
d4_d1 = conv_block(d4_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
e5_d1 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) # 20*20*320 --> 320*320*320
e5_d1 = conv_block(e5_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
d1 = k.layers.concatenate([e1_d1, d2_d1, d3_d1, d4_d1, e5_d1, ])
d1 = conv_block(d1, upsample_channels, n=1) # 320*320*320 --> 320*320*320
# last layer does not have batchnorm and relu
d = conv_block(d1, output_channels, n=1, is_bn=False, is_relu=False)
if output_channels == 1:
output = k.layers.Activation('sigmoid', dtype='float32')(d)
else:
output = k.layers.Activation('softmax', dtype='float32')(d)
return output, 'UNet_3Plus'
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/models/unet3plus.py |
"""
UNet_3Plus with Deep Supervision and Classification Guided Module
"""
import tensorflow as tf
import tensorflow.keras as k
from .unet3plus_utils import conv_block, dot_product
def unet3plus_deepsup_cgm(encoder_layer, output_channels, filters, training=False):
""" UNet_3Plus with Deep Supervision and Classification Guided Module """
""" Encoder """
e1 = encoder_layer[0]
e2 = encoder_layer[1]
e3 = encoder_layer[2]
e4 = encoder_layer[3]
e5 = encoder_layer[4]
""" Classification Guided Module. Part 1"""
cls = k.layers.Dropout(rate=0.5)(e5)
cls = k.layers.Conv2D(2, kernel_size=(1, 1), padding="same", strides=(1, 1))(cls)
cls = k.layers.GlobalMaxPooling2D()(cls)
cls = k.layers.Activation('sigmoid', dtype='float32')(cls)
cls = tf.argmax(cls, axis=-1)
cls = cls[..., tf.newaxis]
cls = tf.cast(cls, dtype=tf.float32, )
""" Decoder """
cat_channels = filters[0]
cat_blocks = len(filters)
upsample_channels = cat_blocks * cat_channels
""" d4 """
e1_d4 = k.layers.MaxPool2D(pool_size=(8, 8))(e1) # 320*320*64 --> 40*40*64
e1_d4 = conv_block(e1_d4, cat_channels, n=1) # 320*320*64 --> 40*40*64
e2_d4 = k.layers.MaxPool2D(pool_size=(4, 4))(e2) # 160*160*128 --> 40*40*128
e2_d4 = conv_block(e2_d4, cat_channels, n=1) # 160*160*128 --> 40*40*64
e3_d4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 80*80*256 --> 40*40*256
e3_d4 = conv_block(e3_d4, cat_channels, n=1) # 80*80*256 --> 40*40*64
e4_d4 = conv_block(e4, cat_channels, n=1) # 40*40*512 --> 40*40*64
e5_d4 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(e5) # 80*80*256 --> 40*40*256
e5_d4 = conv_block(e5_d4, cat_channels, n=1) # 20*20*1024 --> 20*20*64
d4 = k.layers.concatenate([e1_d4, e2_d4, e3_d4, e4_d4, e5_d4])
d4 = conv_block(d4, upsample_channels, n=1) # 40*40*320 --> 40*40*320
""" d3 """
e1_d3 = k.layers.MaxPool2D(pool_size=(4, 4))(e1) # 320*320*64 --> 80*80*64
e1_d3 = conv_block(e1_d3, cat_channels, n=1) # 80*80*64 --> 80*80*64
e2_d3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 160*160*256 --> 80*80*256
e2_d3 = conv_block(e2_d3, cat_channels, n=1) # 80*80*256 --> 80*80*64
e3_d3 = conv_block(e3, cat_channels, n=1) # 80*80*512 --> 80*80*64
e4_d3 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d4) # 40*40*320 --> 80*80*320
e4_d3 = conv_block(e4_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64
e5_d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(e5) # 20*20*320 --> 80*80*320
e5_d3 = conv_block(e5_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64
d3 = k.layers.concatenate([e1_d3, e2_d3, e3_d3, e4_d3, e5_d3])
d3 = conv_block(d3, upsample_channels, n=1) # 80*80*320 --> 80*80*320
""" d2 """
e1_d2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 320*320*64 --> 160*160*64
e1_d2 = conv_block(e1_d2, cat_channels, n=1) # 160*160*64 --> 160*160*64
e2_d2 = conv_block(e2, cat_channels, n=1) # 160*160*256 --> 160*160*64
d3_d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d3) # 80*80*320 --> 160*160*320
d3_d2 = conv_block(d3_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
d4_d2 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d4) # 40*40*320 --> 160*160*320
d4_d2 = conv_block(d4_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
e5_d2 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(e5) # 20*20*320 --> 160*160*320
e5_d2 = conv_block(e5_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
d2 = k.layers.concatenate([e1_d2, e2_d2, d3_d2, d4_d2, e5_d2])
d2 = conv_block(d2, upsample_channels, n=1) # 160*160*320 --> 160*160*320
""" d1 """
e1_d1 = conv_block(e1, cat_channels, n=1) # 320*320*64 --> 320*320*64
d2_d1 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) # 160*160*320 --> 320*320*320
d2_d1 = conv_block(d2_d1, cat_channels, n=1) # 160*160*320 --> 160*160*64
d3_d1 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) # 80*80*320 --> 320*320*320
d3_d1 = conv_block(d3_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
d4_d1 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) # 40*40*320 --> 320*320*320
d4_d1 = conv_block(d4_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
e5_d1 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) # 20*20*320 --> 320*320*320
e5_d1 = conv_block(e5_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
d1 = k.layers.concatenate([e1_d1, d2_d1, d3_d1, d4_d1, e5_d1, ])
d1 = conv_block(d1, upsample_channels, n=1) # 320*320*320 --> 320*320*320
""" Deep Supervision Part"""
# last layer does not have batch norm and relu
d1 = conv_block(d1, output_channels, n=1, is_bn=False, is_relu=False)
if training:
d2 = conv_block(d2, output_channels, n=1, is_bn=False, is_relu=False)
d3 = conv_block(d3, output_channels, n=1, is_bn=False, is_relu=False)
d4 = conv_block(d4, output_channels, n=1, is_bn=False, is_relu=False)
e5 = conv_block(e5, output_channels, n=1, is_bn=False, is_relu=False)
# d1 = no need for up sampling
d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2)
d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3)
d4 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4)
e5 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5)
""" Classification Guided Module. Part 2"""
d1 = dot_product(d1, cls)
d1 = k.layers.Activation('sigmoid', dtype='float32')(d1)
if training:
d2 = dot_product(d2, cls)
d3 = dot_product(d3, cls)
d4 = dot_product(d4, cls)
e5 = dot_product(e5, cls)
d2 = k.layers.Activation('sigmoid', dtype='float32')(d2)
d3 = k.layers.Activation('sigmoid', dtype='float32')(d3)
d4 = k.layers.Activation('sigmoid', dtype='float32')(d4)
e5 = k.layers.Activation('sigmoid', dtype='float32')(e5)
if training:
return [d1, d2, d3, d4, e5, cls], 'UNet3Plus_DeepSup_CGM'
else:
return [d1, ], 'UNet3Plus_DeepSup_CGM'
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/models/unet3plus_deep_supervision_cgm.py |
"""
Unet3+ backbones
"""
import tensorflow as tf
import tensorflow.keras as k
from .unet3plus_utils import conv_block
def vgg16_backbone(input_layer, ):
""" VGG-16 backbone as encoder for UNet3P """
base_model = tf.keras.applications.VGG16(
input_tensor=input_layer,
weights=None,
include_top=False
)
# block 1
e1 = base_model.get_layer("block1_conv2").output # 320, 320, 64
# block 2
e2 = base_model.get_layer("block2_conv2").output # 160, 160, 128
# block 3
e3 = base_model.get_layer("block3_conv3").output # 80, 80, 256
# block 4
e4 = base_model.get_layer("block4_conv3").output # 40, 40, 512
# block 5
e5 = base_model.get_layer("block5_conv3").output # 20, 20, 512
return [e1, e2, e3, e4, e5]
def vgg19_backbone(input_layer, ):
""" VGG-19 backbone as encoder for UNet3P """
base_model = tf.keras.applications.VGG19(
input_tensor=input_layer,
weights=None,
include_top=False
)
# block 1
e1 = base_model.get_layer("block1_conv2").output # 320, 320, 64
# block 2
e2 = base_model.get_layer("block2_conv2").output # 160, 160, 128
# block 3
e3 = base_model.get_layer("block3_conv4").output # 80, 80, 256
# block 4
e4 = base_model.get_layer("block4_conv4").output # 40, 40, 512
# block 5
e5 = base_model.get_layer("block5_conv4").output # 20, 20, 512
return [e1, e2, e3, e4, e5]
def unet3plus_backbone(input_layer, filters):
""" UNet3+ own backbone """
""" Encoder"""
# block 1
e1 = conv_block(input_layer, filters[0]) # 320*320*64
# block 2
e2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 160*160*64
e2 = conv_block(e2, filters[1]) # 160*160*128
# block 3
e3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 80*80*128
e3 = conv_block(e3, filters[2]) # 80*80*256
# block 4
e4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 40*40*256
e4 = conv_block(e4, filters[3]) # 40*40*512
# block 5, bottleneck layer
e5 = k.layers.MaxPool2D(pool_size=(2, 2))(e4) # 20*20*512
e5 = conv_block(e5, filters[4]) # 20*20*1024
return [e1, e2, e3, e4, e5]
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/models/backbones.py |
"""
Returns Unet3+ model
"""
import tensorflow as tf
from omegaconf import DictConfig
from .backbones import vgg16_backbone, vgg19_backbone, unet3plus_backbone
from .unet3plus import unet3plus
from .unet3plus_deep_supervision import unet3plus_deepsup
from .unet3plus_deep_supervision_cgm import unet3plus_deepsup_cgm
def prepare_model(cfg: DictConfig, training=False):
"""
Creates and return model object based on given model type.
"""
input_shape = [cfg.INPUT.HEIGHT, cfg.INPUT.WIDTH, cfg.INPUT.CHANNELS]
input_layer = tf.keras.layers.Input(
shape=input_shape,
name="input_layer"
) # 320*320*3
filters = [64, 128, 256, 512, 1024]
# create backbone
if cfg.MODEL.BACKBONE.TYPE == "unet3plus":
backbone_layers = unet3plus_backbone(
input_layer,
filters
)
elif cfg.MODEL.BACKBONE.TYPE == "vgg16":
backbone_layers = vgg16_backbone(input_layer, )
elif cfg.MODEL.BACKBONE.TYPE == "vgg19":
backbone_layers = vgg19_backbone(input_layer, )
else:
raise ValueError(
"Wrong backbone type passed."
"\nPlease check config file for possible options."
)
print(f"Using {cfg.MODEL.BACKBONE.TYPE} as a backbone.")
if cfg.MODEL.TYPE == "unet3plus":
# training parameter does not matter in this case
outputs, model_name = unet3plus(
backbone_layers,
cfg.OUTPUT.CLASSES,
filters
)
elif cfg.MODEL.TYPE == "unet3plus_deepsup":
outputs, model_name = unet3plus_deepsup(
backbone_layers,
cfg.OUTPUT.CLASSES,
filters,
training
)
elif cfg.MODEL.TYPE == "unet3plus_deepsup_cgm":
if cfg.OUTPUT.CLASSES != 1:
raise ValueError(
"UNet3+ with Deep Supervision and Classification Guided Module"
"\nOnly works when model output classes are equal to 1"
)
outputs, model_name = unet3plus_deepsup_cgm(
backbone_layers,
cfg.OUTPUT.CLASSES,
filters,
training
)
else:
raise ValueError(
"Wrong model type passed."
"\nPlease check config file for possible options."
)
return tf.keras.Model(
inputs=input_layer,
outputs=outputs,
name=model_name
)
if __name__ == "__main__":
"""## Test model Compilation,"""
from omegaconf import OmegaConf
cfg = {
"WORK_DIR": "H:\\Projects\\UNet3P",
"INPUT": {"HEIGHT": 320, "WIDTH": 320, "CHANNELS": 3},
"OUTPUT": {"CLASSES": 1},
# available variants are unet3plus, unet3plus_deepsup, unet3plus_deepsup_cgm
"MODEL": {"TYPE": "unet3plus",
# available variants are unet3plus, vgg16, vgg19
"BACKBONE": {"TYPE": "vgg19", }
}
}
unet_3P = prepare_model(OmegaConf.create(cfg), True)
unet_3P.summary()
# tf.keras.utils.plot_model(unet_3P, show_layer_names=True, show_shapes=True)
# unet_3P.save("unet_3P.hdf5")
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/models/model.py |
"""
UNet3+ with Deep Supervision
"""
import tensorflow as tf
import tensorflow.keras as k
from .unet3plus_utils import conv_block
def unet3plus_deepsup(encoder_layer, output_channels, filters, training=False):
""" UNet_3Plus with Deep Supervision """
""" Encoder """
e1 = encoder_layer[0]
e2 = encoder_layer[1]
e3 = encoder_layer[2]
e4 = encoder_layer[3]
e5 = encoder_layer[4]
""" Decoder """
cat_channels = filters[0]
cat_blocks = len(filters)
upsample_channels = cat_blocks * cat_channels
""" d4 """
e1_d4 = k.layers.MaxPool2D(pool_size=(8, 8))(e1) # 320*320*64 --> 40*40*64
e1_d4 = conv_block(e1_d4, cat_channels, n=1) # 320*320*64 --> 40*40*64
e2_d4 = k.layers.MaxPool2D(pool_size=(4, 4))(e2) # 160*160*128 --> 40*40*128
e2_d4 = conv_block(e2_d4, cat_channels, n=1) # 160*160*128 --> 40*40*64
e3_d4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 80*80*256 --> 40*40*256
e3_d4 = conv_block(e3_d4, cat_channels, n=1) # 80*80*256 --> 40*40*64
e4_d4 = conv_block(e4, cat_channels, n=1) # 40*40*512 --> 40*40*64
e5_d4 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(e5) # 80*80*256 --> 40*40*256
e5_d4 = conv_block(e5_d4, cat_channels, n=1) # 20*20*1024 --> 20*20*64
d4 = k.layers.concatenate([e1_d4, e2_d4, e3_d4, e4_d4, e5_d4])
d4 = conv_block(d4, upsample_channels, n=1) # 40*40*320 --> 40*40*320
""" d3 """
e1_d3 = k.layers.MaxPool2D(pool_size=(4, 4))(e1) # 320*320*64 --> 80*80*64
e1_d3 = conv_block(e1_d3, cat_channels, n=1) # 80*80*64 --> 80*80*64
e2_d3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 160*160*256 --> 80*80*256
e2_d3 = conv_block(e2_d3, cat_channels, n=1) # 80*80*256 --> 80*80*64
e3_d3 = conv_block(e3, cat_channels, n=1) # 80*80*512 --> 80*80*64
e4_d3 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d4) # 40*40*320 --> 80*80*320
e4_d3 = conv_block(e4_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64
e5_d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(e5) # 20*20*320 --> 80*80*320
e5_d3 = conv_block(e5_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64
d3 = k.layers.concatenate([e1_d3, e2_d3, e3_d3, e4_d3, e5_d3])
d3 = conv_block(d3, upsample_channels, n=1) # 80*80*320 --> 80*80*320
""" d2 """
e1_d2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 320*320*64 --> 160*160*64
e1_d2 = conv_block(e1_d2, cat_channels, n=1) # 160*160*64 --> 160*160*64
e2_d2 = conv_block(e2, cat_channels, n=1) # 160*160*256 --> 160*160*64
d3_d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d3) # 80*80*320 --> 160*160*320
d3_d2 = conv_block(d3_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
d4_d2 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d4) # 40*40*320 --> 160*160*320
d4_d2 = conv_block(d4_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
e5_d2 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(e5) # 20*20*320 --> 160*160*320
e5_d2 = conv_block(e5_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64
d2 = k.layers.concatenate([e1_d2, e2_d2, d3_d2, d4_d2, e5_d2])
d2 = conv_block(d2, upsample_channels, n=1) # 160*160*320 --> 160*160*320
""" d1 """
e1_d1 = conv_block(e1, cat_channels, n=1) # 320*320*64 --> 320*320*64
d2_d1 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) # 160*160*320 --> 320*320*320
d2_d1 = conv_block(d2_d1, cat_channels, n=1) # 160*160*320 --> 160*160*64
d3_d1 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) # 80*80*320 --> 320*320*320
d3_d1 = conv_block(d3_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
d4_d1 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) # 40*40*320 --> 320*320*320
d4_d1 = conv_block(d4_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
e5_d1 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) # 20*20*320 --> 320*320*320
e5_d1 = conv_block(e5_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64
d1 = k.layers.concatenate([e1_d1, d2_d1, d3_d1, d4_d1, e5_d1, ])
d1 = conv_block(d1, upsample_channels, n=1) # 320*320*320 --> 320*320*320
# last layer does not have batch norm and relu
d1 = conv_block(d1, output_channels, n=1, is_bn=False, is_relu=False)
if output_channels == 1:
d1 = k.layers.Activation('sigmoid', dtype='float32')(d1)
else:
# d1 = k.activations.softmax(d1)
d1 = k.layers.Activation('softmax', dtype='float32')(d1)
""" Deep Supervision Part"""
if training:
d2 = conv_block(d2, output_channels, n=1, is_bn=False, is_relu=False)
d3 = conv_block(d3, output_channels, n=1, is_bn=False, is_relu=False)
d4 = conv_block(d4, output_channels, n=1, is_bn=False, is_relu=False)
e5 = conv_block(e5, output_channels, n=1, is_bn=False, is_relu=False)
# d1 = no need for up sampling
d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2)
d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3)
d4 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4)
e5 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5)
if output_channels == 1:
d2 = k.layers.Activation('sigmoid', dtype='float32')(d2)
d3 = k.layers.Activation('sigmoid', dtype='float32')(d3)
d4 = k.layers.Activation('sigmoid', dtype='float32')(d4)
e5 = k.layers.Activation('sigmoid', dtype='float32')(e5)
else:
d2 = k.layers.Activation('softmax', dtype='float32')(d2)
d3 = k.layers.Activation('softmax', dtype='float32')(d3)
d4 = k.layers.Activation('softmax', dtype='float32')(d4)
e5 = k.layers.Activation('softmax', dtype='float32')(e5)
if training:
return [d1, d2, d3, d4, e5], 'UNet3Plus_DeepSup'
else:
return [d1, ], 'UNet3Plus_DeepSup'
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/models/unet3plus_deep_supervision.py |
"""
Utility functions for Unet3+ models
"""
import tensorflow as tf
import tensorflow.keras as k
def conv_block(x, kernels, kernel_size=(3, 3), strides=(1, 1), padding='same',
is_bn=True, is_relu=True, n=2):
""" Custom function for conv2d:
Apply 3*3 convolutions with BN and relu.
"""
for i in range(1, n + 1):
x = k.layers.Conv2D(filters=kernels, kernel_size=kernel_size,
padding=padding, strides=strides,
kernel_regularizer=tf.keras.regularizers.l2(1e-4),
kernel_initializer=k.initializers.he_normal(seed=5))(x)
if is_bn:
x = k.layers.BatchNormalization()(x)
if is_relu:
x = k.activations.relu(x)
return x
def dot_product(seg, cls):
b, h, w, n = k.backend.int_shape(seg)
seg = tf.reshape(seg, [-1, h * w, n])
final = tf.einsum("ijk,ik->ijk", seg, cls)
final = tf.reshape(final, [-1, h, w, n])
return final
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/models/unet3plus_utils.py |
"""
NVIDIA DALI data generator object.
"""
import nvidia.dali.fn as fn
from nvidia.dali import pipeline_def
import nvidia.dali.types as types
import nvidia.dali.plugin.tf as dali_tf
import tensorflow as tf
from omegaconf import DictConfig
from utils.general_utils import get_data_paths, get_gpus_count
def data_generator_pipeline(cfg: DictConfig, mode: str, mask_available: bool):
"""
Returns DALI data pipeline object.
"""
data_paths = get_data_paths(cfg, mode, mask_available) # get data paths
images_paths = data_paths[0]
if mask_available:
mask_paths = data_paths[1]
@pipeline_def(batch_size=cfg.HYPER_PARAMETERS.BATCH_SIZE)
def single_gpu_pipeline(device):
"""
Returns DALI data pipeline object for single GPU training.
"""
device = 'mixed' if 'gpu' in device.lower() else 'cpu'
pngs, _ = fn.readers.file(
files=images_paths,
random_shuffle=cfg.PREPROCESS_DATA.SHUFFLE[mode].VALUE,
seed=cfg.SEED
)
images = fn.decoders.image(pngs, device=device, output_type=types.RGB)
if cfg.PREPROCESS_DATA.RESIZE.VALUE:
# TODO verify image resizing method
images = fn.resize(
images,
size=[
cfg.PREPROCESS_DATA.RESIZE.HEIGHT,
cfg.PREPROCESS_DATA.RESIZE.WIDTH
]
)
if cfg.PREPROCESS_DATA.IMAGE_PREPROCESSING_TYPE == "normalize":
images = fn.normalize(images, mean=0, stddev=255, ) # axes=(2,)
if mask_available:
labels, _ = fn.readers.file(
files=mask_paths,
random_shuffle=cfg.PREPROCESS_DATA.SHUFFLE[mode].VALUE,
seed=cfg.SEED
)
labels = fn.decoders.image(
labels,
device=device,
output_type=types.GRAY
)
if cfg.PREPROCESS_DATA.RESIZE.VALUE:
# TODO verify image resizing method
labels = fn.resize(
labels,
size=[
cfg.PREPROCESS_DATA.RESIZE.HEIGHT,
cfg.PREPROCESS_DATA.RESIZE.WIDTH
]
)
if cfg.PREPROCESS_DATA.NORMALIZE_MASK.VALUE:
labels = fn.normalize(
labels,
mean=0,
stddev=cfg.PREPROCESS_DATA.NORMALIZE_MASK.NORMALIZE_VALUE,
)
if cfg.OUTPUT.CLASSES == 1:
labels = fn.cast(labels, dtype=types.FLOAT)
else:
labels = fn.squeeze(labels, axes=[2])
labels = fn.one_hot(labels, num_classes=cfg.OUTPUT.CLASSES)
if mask_available:
return images, labels
else:
return images,
@pipeline_def(batch_size=cfg.HYPER_PARAMETERS.BATCH_SIZE)
def multi_gpu_pipeline(device, shard_id):
"""
Returns DALI data pipeline object for multi GPU'S training.
"""
device = 'mixed' if 'gpu' in device.lower() else 'cpu'
shard_id = 1 if 'cpu' in device else shard_id
num_shards = get_gpus_count()
# num_shards should be <= #images
num_shards = len(images_paths) if num_shards > len(images_paths) else num_shards
pngs, _ = fn.readers.file(
files=images_paths,
random_shuffle=cfg.PREPROCESS_DATA.SHUFFLE[mode].VALUE,
shard_id=shard_id,
num_shards=num_shards,
seed=cfg.SEED
)
images = fn.decoders.image(pngs, device=device, output_type=types.RGB)
if cfg.PREPROCESS_DATA.RESIZE.VALUE:
# TODO verify image resizing method
images = fn.resize(
images,
size=[
cfg.PREPROCESS_DATA.RESIZE.HEIGHT,
cfg.PREPROCESS_DATA.RESIZE.WIDTH
]
)
if cfg.PREPROCESS_DATA.IMAGE_PREPROCESSING_TYPE == "normalize":
images = fn.normalize(images, mean=0, stddev=255, ) # axes=(2,)
if mask_available:
labels, _ = fn.readers.file(
files=mask_paths,
random_shuffle=cfg.PREPROCESS_DATA.SHUFFLE[mode].VALUE,
shard_id=shard_id,
num_shards=num_shards,
seed=cfg.SEED
)
labels = fn.decoders.image(
labels,
device=device,
output_type=types.GRAY
)
if cfg.PREPROCESS_DATA.RESIZE.VALUE:
# TODO verify image resizing method
labels = fn.resize(
labels,
size=[
cfg.PREPROCESS_DATA.RESIZE.HEIGHT,
cfg.PREPROCESS_DATA.RESIZE.WIDTH
]
)
if cfg.PREPROCESS_DATA.NORMALIZE_MASK.VALUE:
labels = fn.normalize(
labels,
mean=0,
stddev=cfg.PREPROCESS_DATA.NORMALIZE_MASK.NORMALIZE_VALUE,
)
if cfg.OUTPUT.CLASSES == 1:
labels = fn.cast(labels, dtype=types.FLOAT)
else:
labels = fn.squeeze(labels, axes=[2])
labels = fn.one_hot(labels, num_classes=cfg.OUTPUT.CLASSES)
if mask_available:
return images, labels
else:
return images,
if cfg.USE_MULTI_GPUS.VALUE:
return multi_gpu_pipeline
else:
return single_gpu_pipeline
def get_data_shapes(cfg: DictConfig, mask_available: bool):
"""
Returns shapes and dtypes of the outputs.
"""
if mask_available:
shapes = (
(cfg.HYPER_PARAMETERS.BATCH_SIZE,
cfg.INPUT.HEIGHT,
cfg.INPUT.WIDTH,
cfg.INPUT.CHANNELS),
(cfg.HYPER_PARAMETERS.BATCH_SIZE,
cfg.INPUT.HEIGHT,
cfg.INPUT.WIDTH,
cfg.OUTPUT.CLASSES)
)
dtypes = (
tf.float32,
tf.float32)
else:
shapes = (
(cfg.HYPER_PARAMETERS.BATCH_SIZE,
cfg.INPUT.HEIGHT,
cfg.INPUT.WIDTH,
cfg.INPUT.CHANNELS),
)
dtypes = (
tf.float32,
)
return shapes, dtypes
def data_generator(cfg: DictConfig,
mode: str,
strategy: tf.distribute.Strategy = None):
"""
Generate batches of data for model by reading images and their
corresponding masks using NVIDIA DALI.
Works for both single and mult GPU's. In case of multi gpu pass
the strategy object too.
There are two options you can either pass directory path or list.
In case of directory, it should contain relative path of images/mask
folder from project root path.
In case of list of images, every element should contain absolute path
for each image and mask.
"""
# check mask are available or not
mask_available = False if cfg.DATASET[mode].MASK_PATH is None or str(
cfg.DATASET[mode].MASK_PATH).lower() == "none" else True
# create dali data pipeline
data_pipeline = data_generator_pipeline(cfg, mode, mask_available)
shapes, dtypes = get_data_shapes(cfg, mask_available)
if cfg.USE_MULTI_GPUS.VALUE:
def bound_dataset(input_context):
"""
In case of multi gpu training bound dataset to a device for distributed training.
"""
with tf.device("/gpu:{}".format(input_context.input_pipeline_id)):
device_id = input_context.input_pipeline_id
return dali_tf.DALIDataset(
pipeline=data_pipeline(
device="gpu",
device_id=device_id,
shard_id=device_id,
num_threads=cfg.DATALOADER_WORKERS
),
batch_size=cfg.HYPER_PARAMETERS.BATCH_SIZE,
output_shapes=shapes,
output_dtypes=dtypes,
device_id=device_id,
)
# distribute dataset
input_options = tf.distribute.InputOptions(
experimental_place_dataset_on_device=True,
# for older dali versions use experimental_prefetch_to_device
# for new dali versions use experimental_fetch_to_device
experimental_fetch_to_device=False, # experimental_fetch_to_device
experimental_replication_mode=tf.distribute.InputReplicationMode.PER_REPLICA)
# map dataset to given strategy and return it
return strategy.distribute_datasets_from_function(bound_dataset, input_options)
else:
# single gpu pipeline
pipeline = data_pipeline(
batch_size=cfg.HYPER_PARAMETERS.BATCH_SIZE,
num_threads=cfg.DATALOADER_WORKERS,
device="gpu",
device_id=0
)
# create dataset
with tf.device('/gpu:0'):
data_generator = dali_tf.DALIDataset(
pipeline=pipeline,
batch_size=cfg.HYPER_PARAMETERS.BATCH_SIZE,
output_shapes=shapes,
output_dtypes=dtypes,
device_id=0)
return data_generator
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/data_generators/dali_data_generator.py |
"""
Data generator
"""
import os
import tensorflow as tf
from omegaconf import DictConfig
from utils.general_utils import join_paths, get_gpus_count
from .tf_data_generator import DataGenerator as tf_data_generator
try:
from .dali_data_generator import data_generator as dali_data_generator
except ModuleNotFoundError:
print("NVIDIA DALI not installed, please install it."
"\nNote: DALI is only available on Linux platform. For Window "
"you can use TensorFlow generator for training.")
def get_data_generator(cfg: DictConfig,
mode: str,
strategy: tf.distribute.Strategy = None):
"""
Creates and return data generator object based on given type.
"""
if cfg.DATA_GENERATOR_TYPE == "TF_GENERATOR":
print(f"Using TensorFlow generator for {mode} data")
generator = tf_data_generator(cfg, mode)
elif cfg.DATA_GENERATOR_TYPE == "DALI_GENERATOR":
print(f"Using NVIDIA DALI generator for {mode} data")
if cfg.USE_MULTI_GPUS.VALUE:
generator = dali_data_generator(cfg, mode, strategy)
else:
generator = dali_data_generator(cfg, mode)
else:
raise ValueError(
"Wrong generator type passed."
"\nPossible options are TF_GENERATOR and DALI_GENERATOR"
)
return generator
def update_batch_size(cfg: DictConfig):
"""
Scale up batch size to multi gpus in case of TensorFlow generator.
"""
if cfg.DATA_GENERATOR_TYPE == "TF_GENERATOR" and cfg.USE_MULTI_GPUS.VALUE:
# change batch size according to available gpus
cfg.HYPER_PARAMETERS.BATCH_SIZE = \
cfg.HYPER_PARAMETERS.BATCH_SIZE * get_gpus_count()
def get_batch_size(cfg: DictConfig):
"""
Return batch size.
In case of DALI generator scale up batch size to multi gpus.
"""
if cfg.DATA_GENERATOR_TYPE == "DALI_GENERATOR" and cfg.USE_MULTI_GPUS.VALUE:
# change batch size according to available gpus
return cfg.HYPER_PARAMETERS.BATCH_SIZE * get_gpus_count()
else:
return cfg.HYPER_PARAMETERS.BATCH_SIZE
def get_iterations(cfg: DictConfig, mode: str):
"""
Return steps per epoch
"""
images_length = len(
os.listdir(
join_paths(
cfg.WORK_DIR,
cfg.DATASET[mode].IMAGES_PATH
)
)
)
if cfg.DATA_GENERATOR_TYPE == "TF_GENERATOR":
training_steps = images_length // cfg.HYPER_PARAMETERS.BATCH_SIZE
elif cfg.DATA_GENERATOR_TYPE == "DALI_GENERATOR":
if cfg.USE_MULTI_GPUS.VALUE:
training_steps = images_length // (
cfg.HYPER_PARAMETERS.BATCH_SIZE * get_gpus_count())
else:
training_steps = images_length // cfg.HYPER_PARAMETERS.BATCH_SIZE
else:
raise ValueError("Wrong generator type passed.")
return training_steps
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/data_generators/data_generator.py |
"""
Tensorflow data generator class.
"""
import tensorflow as tf
import numpy as np
from omegaconf import DictConfig
from utils.general_utils import get_data_paths
from utils.images_utils import prepare_image, prepare_mask
class DataGenerator(tf.keras.utils.Sequence):
"""
Generate batches of data for model by reading images and their
corresponding masks using TensorFlow Sequence Generator.
There are two options you can either pass directory path or list.
In case of directory, it should contain relative path of images/mask
folder from project root path.
In case of list of images, every element should contain absolute path
for each image and mask.
Because this generator is also used for prediction, so during testing you can
set mask path to None if mask are not available for visualization.
"""
def __init__(self, cfg: DictConfig, mode: str):
"""
Initialization
"""
self.cfg = cfg
self.mode = mode
self.batch_size = self.cfg.HYPER_PARAMETERS.BATCH_SIZE
# set seed for reproducibility
np.random.seed(cfg.SEED)
# check mask are available or not
self.mask_available = False if cfg.DATASET[mode].MASK_PATH is None or str(
cfg.DATASET[mode].MASK_PATH).lower() == "none" else True
data_paths = get_data_paths(cfg, mode, self.mask_available)
self.images_paths = data_paths[0]
if self.mask_available:
self.mask_paths = data_paths[1]
# self.images_paths.sort() # no need for sorting
self.on_epoch_end()
def __len__(self):
"""
Denotes the number of batches per epoch
"""
# Tensorflow problem: on_epoch_end is not being called at the end
# of each epoch, so forcing on_epoch_end call
self.on_epoch_end()
return int(
np.floor(
len(self.images_paths) / self.batch_size
)
)
def on_epoch_end(self):
"""
Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.images_paths))
if self.cfg.PREPROCESS_DATA.SHUFFLE[self.mode].VALUE:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
"""
Generate one batch of data
"""
# Generate indexes of the batch
indexes = self.indexes[
index * self.batch_size:(index + 1) * self.batch_size
]
# Generate data
return self.__data_generation(indexes)
def __data_generation(self, indexes):
"""
Generates batch data
"""
# create empty array to store batch data
batch_images = np.zeros(
(
self.cfg.HYPER_PARAMETERS.BATCH_SIZE,
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.INPUT.CHANNELS
)
).astype(np.float32)
if self.mask_available:
batch_masks = np.zeros(
(
self.cfg.HYPER_PARAMETERS.BATCH_SIZE,
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.OUTPUT.CLASSES
)
).astype(np.float32)
for i, index in enumerate(indexes):
# extract path from list
img_path = self.images_paths[int(index)]
if self.mask_available:
mask_path = self.mask_paths[int(index)]
# prepare image for model by resizing and preprocessing it
image = prepare_image(
img_path,
self.cfg.PREPROCESS_DATA.RESIZE,
self.cfg.PREPROCESS_DATA.IMAGE_PREPROCESSING_TYPE,
)
if self.mask_available:
# prepare image for model by resizing and preprocessing it
mask = prepare_mask(
mask_path,
self.cfg.PREPROCESS_DATA.RESIZE,
self.cfg.PREPROCESS_DATA.NORMALIZE_MASK,
)
# numpy to tensorflow conversion
if self.mask_available:
image, mask = tf.numpy_function(
self.tf_func,
[image, mask],
[tf.float32, tf.int32]
)
else:
image = tf.numpy_function(
self.tf_func,
[image, ],
[tf.float32, ]
)
# set shape attributes which was lost during Tf conversion
image.set_shape(
[
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.INPUT.CHANNELS
]
)
batch_images[i] = image
if self.mask_available:
# height x width --> height x width x output classes
if self.cfg.OUTPUT.CLASSES == 1:
mask = tf.expand_dims(mask, axis=-1)
else:
# convert mask into one hot vectors
mask = tf.one_hot(
mask,
self.cfg.OUTPUT.CLASSES,
dtype=tf.int32
)
mask.set_shape(
[
self.cfg.INPUT.HEIGHT,
self.cfg.INPUT.WIDTH,
self.cfg.OUTPUT.CLASSES
]
)
batch_masks[i] = mask
if self.mask_available:
return batch_images, batch_masks
else:
return batch_images,
@staticmethod
def tf_func(*args):
return args
| DeepLearningExamples-master | TensorFlow2/Segmentation/Contrib/UNet3P/data_generators/tf_data_generator.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script for Mask-RCNN."""
import logging
import os
from argparse import Namespace
from mrcnn_tf2.runtime.run import run_training, run_inference, run_evaluation
from mrcnn_tf2.utils.dllogger import LoggingBackend
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["TF_CPP_VMODULE"] = 'non_max_suppression_op=0,generate_box_proposals_op=0,executor=0'
import dllogger
from mrcnn_tf2.arguments import PARSER
from mrcnn_tf2.config import CONFIG
from mrcnn_tf2.dataset import Dataset
def main():
# setup params
arguments = PARSER.parse_args()
params = Namespace(**{**vars(CONFIG), **vars(arguments)})
# setup logging
# noinspection PyArgumentList
logging.basicConfig(
level=logging.DEBUG if params.verbose else logging.INFO,
format='{asctime} {levelname:.1} {name:15} {message}',
style='{'
)
# remove custom tf handler that logs to stderr
logging.getLogger('tensorflow').setLevel(logging.WARNING)
logging.getLogger('tensorflow').handlers.clear()
# setup dllogger
dllogger.init(backends=[
dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=params.log_file, append=True),
LoggingBackend(verbosity=dllogger.Verbosity.VERBOSE)
])
dllogger.log(step='PARAMETER', data=vars(params))
# setup dataset
dataset = Dataset(params)
if params.mode == 'train':
run_training(dataset, params)
if params.mode == 'eval':
run_evaluation(dataset, params)
if params.mode == 'infer':
run_inference(dataset, params)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/main.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameters used to build Mask-RCNN model."""
from argparse import Namespace
CONFIG = Namespace(**dict(
# input pre-processing parameters
image_size=(832, 1344),
augment_input_data=True,
gt_mask_size=112,
# dataset specific parameters
num_classes=91,
skip_crowd_during_training=True,
use_category=True,
# Region Proposal Network
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5,
rpn_min_size=0.,
# Proposal layer.
batch_size_per_im=512,
fg_fraction=0.25,
fg_thresh=0.5,
bg_thresh_hi=0.5,
bg_thresh_lo=0.,
# Faster-RCNN heads.
fast_rcnn_mlp_head_dim=1024,
bbox_reg_weights=(10., 10., 5., 5.),
# Mask-RCNN heads.
include_mask=True, # whether or not to include mask branch. # ===== Not existing in MLPerf ===== #
mrcnn_resolution=28,
# training
train_rpn_pre_nms_topn=2000,
train_rpn_post_nms_topn=1000,
train_rpn_nms_threshold=0.7,
# evaluation
test_detections_per_image=100,
test_nms=0.5,
test_rpn_pre_nms_topn=1000,
test_rpn_post_nms_topn=1000,
test_rpn_nms_thresh=0.7,
# model architecture
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)],
anchor_scale=8.0,
# localization loss
rpn_box_loss_weight=1.0,
fast_rcnn_box_loss_weight=1.0,
mrcnn_weight_loss_mask=1.0,
# other
checkpoint_name_format='nvidia_mrcnn_tf2.ckpt'
))
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/config.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Command line argument parser """
import argparse
# ===================================================================
# Parser setup
# ===================================================================
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
# noinspection PyTypeChecker
PARSER = argparse.ArgumentParser(
usage='main.py MODE [arguments...]',
description='NVIDIA implementation of MastRCNN for TensorFlow 2.x',
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100),
add_help=False
)
RUNTIME_GROUP = PARSER.add_argument_group('Runtime')
HYPER_GROUP = PARSER.add_argument_group('Hyperparameters')
LOGGING_GROUP = PARSER.add_argument_group('Logging')
UTILITY_GROUP = PARSER.add_argument_group('Utility')
# ===================================================================
# Runtime arguments
# ===================================================================
RUNTIME_GROUP.add_argument(
'mode',
type=str,
metavar='MODE',
help=(
'One of supported execution modes:'
'\n\ttrain - run in training mode'
'\n\teval - run evaluation on eval data split'
'\n\tinfer - run inference on eval data split'
),
choices=[
'train', 'eval', 'infer'
]
)
RUNTIME_GROUP.add_argument(
'--data_dir',
type=str,
default='/data',
metavar='DIR',
help='Input directory containing the dataset'
)
RUNTIME_GROUP.add_argument(
'--model_dir',
type=str,
default='/results',
metavar='DIR',
help='Output directory for information related to the model'
)
RUNTIME_GROUP.add_argument(
'--backbone_checkpoint',
type=str,
default='/weights/rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp',
metavar='FILE',
help='Pretrained checkpoint for resnet'
)
RUNTIME_GROUP.add_argument(
'--eval_file',
type=str,
default='/data/annotations/instances_val2017.json',
metavar='FILE',
help='Path to the validation json file'
)
RUNTIME_GROUP.add_argument(
'--epochs',
type=int,
default=12,
help='Number of training epochs'
)
RUNTIME_GROUP.add_argument(
'--steps_per_epoch',
type=int,
help='Number of steps (batches) per epoch. Defaults to dataset size divided by batch size.'
)
RUNTIME_GROUP.add_argument(
'--eval_samples',
type=int,
default=None,
metavar='N',
help='Number of evaluation samples'
)
# ===================================================================
# Hyperparameters arguments
# ===================================================================
HYPER_GROUP.add_argument(
'--train_batch_size',
type=int,
default=4,
metavar='N',
help='Batch size (per GPU) used during training'
)
HYPER_GROUP.add_argument(
'--eval_batch_size',
type=int,
default=8,
metavar='N',
help='Batch size used during evaluation'
)
HYPER_GROUP.add_argument(
'--seed',
type=int,
default=None,
metavar='SEED',
help='Set a constant seed for reproducibility'
)
HYPER_GROUP.add_argument(
'--l2_weight_decay',
type=float,
default=1e-4,
metavar='L2D',
help='Weight of l2 regularization'
)
HYPER_GROUP.add_argument(
'--init_learning_rate',
type=float,
default=0.0,
metavar='LR',
help='Initial learning rate'
)
HYPER_GROUP.add_argument(
'--learning_rate_values',
type=float,
nargs='*',
default=[1e-2, 1e-3, 1e-4],
metavar='D',
help='Learning rate decay levels that are then scaled by global batch size'
)
HYPER_GROUP.add_argument(
'--learning_rate_boundaries',
type=float,
nargs='*',
metavar='N',
default=[0.3, 8.0, 10.0],
help='Steps (in epochs) at which learning rate changes'
)
HYPER_GROUP.add_argument(
'--momentum',
type=float,
default=0.9,
help='Optimizer momentum'
)
HYPER_GROUP.add_argument(
'--finetune_bn',
action='store_true',
help='Is batchnorm finetuned training mode'
)
HYPER_GROUP.add_argument(
'--use_synthetic_data',
action='store_true',
help='Use synthetic input data, meant for testing only'
)
HYPER_GROUP.add_argument(
'--xla',
action='store_true',
help='Enable XLA JIT Compiler'
)
HYPER_GROUP.add_argument(
'--amp',
action='store_true',
help='Enable automatic mixed precision'
)
# ===================================================================
# Logging arguments
# ===================================================================
LOGGING_GROUP.add_argument(
'--log_file',
type=str,
default='mrcnn-dlll.json',
metavar='FILE',
help='Output file for DLLogger logs'
)
LOGGING_GROUP.add_argument(
'--log_every',
type=int,
default=100,
metavar='N',
help='Log performance every N steps'
)
LOGGING_GROUP.add_argument(
'--log_warmup_steps',
type=int,
default=100,
metavar='N',
help='Number of steps that will be ignored when collecting perf stats'
)
LOGGING_GROUP.add_argument(
'--log_graph',
action='store_true',
help='Print details about TF graph'
)
LOGGING_GROUP.add_argument(
'--log_tensorboard',
type=str,
metavar='PATH',
help='When provided saves tensorboard logs to given dir'
)
# ===================================================================
# Utility arguments
# ===================================================================
UTILITY_GROUP.add_argument(
'-h', '--help',
action='help',
help='Show this help message and exit'
)
UTILITY_GROUP.add_argument(
'-v', '--verbose',
action='store_true',
help='Displays debugging logs'
)
UTILITY_GROUP.add_argument(
'--eagerly',
action='store_true',
help='Runs model in eager mode. Use for debugging only as it reduces performance.'
)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/arguments.py |
DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/__init__.py |
|
from .dataset import Dataset
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/dataset/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data loading and processing.
Defines dataset class that exports input functions of Mask-RCNN
for training and evaluation using Estimator API.
The train_fn includes training data for category classification,
bounding box regression, and number of positive examples to normalize
the loss during training.
"""
import glob
import logging
import os
import tensorflow as tf
from mrcnn_tf2.dataset.dataset_parser import dataset_parser
TRAIN_SPLIT_PATTERN = 'train*.tfrecord'
EVAL_SPLIT_PATTERN = 'val*.tfrecord'
TRAIN_SPLIT_SAMPLES = 118287
class Dataset:
""" Load and preprocess the coco dataset. """
def __init__(self, params):
""" Configures dataset. """
self._params = params
self._train_files = glob.glob(os.path.join(self._params.data_dir, TRAIN_SPLIT_PATTERN))
self._eval_files = glob.glob(os.path.join(self._params.data_dir, EVAL_SPLIT_PATTERN))
self._logger = logging.getLogger('dataset')
def train_fn(self, batch_size):
""" Input function for training. """
data = tf.data.TFRecordDataset(self._train_files)
data = data.cache()
data = data.shuffle(buffer_size=4096, reshuffle_each_iteration=True, seed=self._params.seed)
data = data.repeat()
data = data.map(
lambda x: dataset_parser(
value=x,
mode='train',
params=self._params,
use_instance_mask=self._params.include_mask,
seed=self._params.seed
),
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
data = data.batch(batch_size=batch_size, drop_remainder=True)
if self._params.use_synthetic_data:
self._logger.info("Using fake dataset loop")
data = data.take(1).cache().repeat()
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
data = data.with_options(self._data_options)
return data
def eval_fn(self, batch_size):
""" Input function for validation. """
data = tf.data.TFRecordDataset(self._eval_files)
if self._params.eval_samples:
self._logger.info(f'Amount of samples limited to {self._params.eval_samples}')
data = data.take(self._params.eval_samples)
data = data.cache()
data = data.map(
lambda x: dataset_parser(
value=x,
# dataset parser expects mode to be PREDICT even for evaluation
mode='eval',
params=self._params,
use_instance_mask=self._params.include_mask,
seed=self._params.seed
),
num_parallel_calls=16
)
data = data.batch(batch_size=batch_size, drop_remainder=True)
if self._params.use_synthetic_data:
self._logger.info("Using fake dataset loop")
data = data.take(1).cache().repeat()
data = data.take(5000 // batch_size)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# FIXME: This is a walkaround for a bug and should be removed as soon as the fix is merged
# http://nvbugs/2967052 [V100][JoC][MaskRCNN][TF1] performance regression with 1 GPU
data = data.apply(tf.data.experimental.prefetch_to_device('/gpu:0', buffer_size=1))
return data
@property
def train_size(self):
""" Size of the train dataset. """
return TRAIN_SPLIT_SAMPLES
@property
def _data_options(self):
""" Constructs tf.data.Options for this dataset. """
data_options = tf.data.Options()
data_options.experimental_optimization.parallel_batch = True
data_options.experimental_slack = True
data_options.experimental_threading.max_intra_op_parallelism = 1
data_options.experimental_optimization.map_parallelization = True
map_vectorization_options = tf.data.experimental.MapVectorizationOptions()
map_vectorization_options.enabled = True
map_vectorization_options.use_choose_fastest = True
data_options.experimental_optimization.map_vectorization = map_vectorization_options
return data_options
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/dataset/dataset.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loader and processing.
Defines input_fn of Mask-RCNN for TF Estimator. The input_fn includes training
data for category classification, bounding box regression, and number of
positive examples to normalize the loss during training.
"""
import tensorflow as tf
from mrcnn_tf2.model import anchors
from mrcnn_tf2.object_detection import tf_example_decoder
from mrcnn_tf2.ops import preprocess_ops
from mrcnn_tf2.utils import coco_utils
MAX_NUM_INSTANCES = 100
MAX_NUM_VERTICES_PER_INSTANCE = 1500
MAX_NUM_POLYGON_LIST_LEN = 2 * MAX_NUM_VERTICES_PER_INSTANCE * MAX_NUM_INSTANCES
POLYGON_PAD_VALUE = coco_utils.POLYGON_PAD_VALUE
__all__ = [
# dataset parser
"dataset_parser",
# common functions
"preprocess_image",
"process_groundtruth_is_crowd",
"process_source_id",
# eval
"prepare_labels_for_eval",
# training
"augment_image",
"process_boxes_classes_indices_for_training",
"process_gt_masks_for_training",
"process_labels_for_training",
"process_targets_for_training"
]
def dataset_parser(value, mode, params, use_instance_mask, seed=None, regenerate_source_id=False):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: A dictionary contains an image and groundtruth annotations.
Returns:
features: a dictionary that contains the image and auxiliary
information. The following describes {key: value} pairs in the
dictionary.
image: Image tensor that is preproessed to have normalized value and
fixed dimension [image_size, image_size, 3]
image_info: image information that includes the original height and
width, the scale of the proccessed image to the original image, and
the scaled height and width.
source_ids: Source image id. Default value -1 if the source id is
empty in the groundtruth annotation.
labels: a dictionary that contains auxiliary information plus (optional)
labels. The following describes {key: value} pairs in the dictionary.
`labels` is only for training.
score_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of objectiveness score at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
gt_boxes: Groundtruth bounding box annotations. The box is represented
in [y1, x1, y2, x2] format. The tennsor is padded with -1 to the
fixed dimension [MAX_NUM_INSTANCES, 4].
gt_classes: Groundtruth classes annotations. The tennsor is padded
with -1 to the fixed dimension [MAX_NUM_INSTANCES].
cropped_gt_masks: groundtrugh masks cropped by the bounding box and
resized to a fixed size determined by params.gt_mask_size
regenerate_source_id: `bool`, if True TFExampleParser will use hashed
value of `image/encoded` for `image/source_id`.
"""
if mode not in ['train', 'eval']:
raise ValueError("Unknown execution mode received: %s" % mode)
def create_example_decoder():
return tf_example_decoder.TfExampleDecoder(
use_instance_mask=use_instance_mask,
regenerate_source_id=regenerate_source_id
)
example_decoder = create_example_decoder()
with tf.xla.experimental.jit_scope(compile_ops=True):
with tf.name_scope('parser'):
data = example_decoder.decode(value)
data['groundtruth_is_crowd'] = process_groundtruth_is_crowd(data)
image = tf.image.convert_image_dtype(data['image'], dtype=tf.float32)
source_id = process_source_id(data['source_id'])
if mode == 'eval':
features = {
'source_ids': source_id,
}
features["images"], features["image_info"], _, _ = preprocess_image(
image,
boxes=None,
instance_masks=None,
image_size=params.image_size,
max_level=params.max_level,
augment_input_data=False,
seed=seed
)
return features, {}
elif mode == 'train':
features = {
'source_ids': source_id
}
boxes, classes, indices, instance_masks = process_boxes_classes_indices_for_training(
data,
skip_crowd_during_training=params.skip_crowd_during_training,
use_category=params.use_category,
use_instance_mask=use_instance_mask
)
image, image_info, boxes, instance_masks = preprocess_image(
image,
boxes=boxes,
instance_masks=instance_masks,
image_size=params.image_size,
max_level=params.max_level,
augment_input_data=params.augment_input_data,
seed=seed
)
features.update({
'images': image,
'image_info': image_info,
})
padded_image_size = image.get_shape().as_list()[:2]
if use_instance_mask:
features['cropped_gt_masks'] = process_gt_masks_for_training(
instance_masks,
boxes,
gt_mask_size=params.gt_mask_size,
padded_image_size=padded_image_size,
max_num_instances=MAX_NUM_INSTANCES
)
with tf.xla.experimental.jit_scope(compile_ops=False):
(score_targets, box_targets), input_anchor = process_targets_for_training(
padded_image_size=padded_image_size,
boxes=boxes,
classes=classes,
params=params
)
features['gt_boxes'], features['gt_classes'], additional_labels = process_labels_for_training(
image_info, boxes, classes, score_targets, box_targets,
max_num_instances=MAX_NUM_INSTANCES,
min_level=params.min_level,
max_level=params.max_level
)
features.update(additional_labels)
# Features
# {
# 'source_ids': <tf.Tensor 'parser/StringToNumber:0' shape=() dtype=float32>,
# 'images': <tf.Tensor 'parser/pad_to_bounding_box/Squeeze:0' shape=(1024, 1024, 3) dtype=float32>,
# 'image_info': <tf.Tensor 'parser/stack_1:0' shape=(5,) dtype=float32>,
# 'cropped_gt_masks': <tf.Tensor 'parser/Reshape_4:0' shape=(100, 116, 116) dtype=float32>,
# 'gt_boxes': <tf.Tensor 'parser/Reshape_20:0' shape=(100, 4) dtype=float32>,
# 'gt_classes': <tf.Tensor 'parser/Reshape_22:0' shape=(100, 1) dtype=float32>,
# 'score_targets_2': <tf.Tensor 'parser/Reshape_9:0' shape=(256, 256, 3) dtype=int32>,
# 'box_targets_2': <tf.Tensor 'parser/Reshape_14:0' shape=(256, 256, 12) dtype=float32>,
# 'score_targets_3': <tf.Tensor 'parser/Reshape_10:0' shape=(128, 128, 3) dtype=int32>,
# 'box_targets_3': <tf.Tensor 'parser/Reshape_15:0' shape=(128, 128, 12) dtype=float32>,
# 'score_targets_4': <tf.Tensor 'parser/Reshape_11:0' shape=(64, 64, 3) dtype=int32>,
# 'box_targets_4': <tf.Tensor 'parser/Reshape_16:0' shape=(64, 64, 12) dtype=float32>,
# 'score_targets_5': <tf.Tensor 'parser/Reshape_12:0' shape=(32, 32, 3) dtype=int32>,
# 'box_targets_5': <tf.Tensor 'parser/Reshape_17:0' shape=(32, 32, 12) dtype=float32>,
# 'score_targets_6': <tf.Tensor 'parser/Reshape_13:0' shape=(16, 16, 3) dtype=int32>,
# 'box_targets_6': <tf.Tensor 'parser/Reshape_18:0' shape=(16, 16, 12) dtype=float32>,
# }
# due to the way keras losses work we are passing all the targets as features
# it is impossible to access labels in custom losses that we are using
# Labels
# {
# }
return features, {}
def preprocess_image(image, boxes, instance_masks, image_size, max_level, augment_input_data=False, seed=None):
image = preprocess_ops.normalize_image(image)
if augment_input_data:
image, boxes, instance_masks = augment_image(image=image, boxes=boxes, instance_masks=instance_masks, seed=seed)
# Scaling and padding.
image, image_info, boxes, instance_masks = preprocess_ops.resize_and_pad(
image=image,
target_size=image_size,
stride=2 ** max_level,
boxes=boxes,
masks=instance_masks
)
return image, image_info, boxes, instance_masks
def process_groundtruth_is_crowd(data):
return tf.cond(
pred=tf.greater(tf.size(input=data['groundtruth_is_crowd']), 0),
true_fn=lambda: data['groundtruth_is_crowd'],
false_fn=lambda: tf.zeros_like(data['groundtruth_classes'], dtype=tf.bool)
)
def process_source_id(source_id):
"""Processes source_id to the right format."""
if source_id.dtype == tf.string:
source_id = tf.cast(tf.strings.to_number(source_id), tf.int64)
with tf.control_dependencies([source_id]):
source_id = tf.cond(
pred=tf.equal(tf.size(input=source_id), 0),
true_fn=lambda: tf.cast(tf.constant(-1), tf.int64),
false_fn=lambda: tf.identity(source_id)
)
return source_id
# eval
def prepare_labels_for_eval(
data,
target_num_instances=MAX_NUM_INSTANCES,
target_polygon_list_len=MAX_NUM_POLYGON_LIST_LEN,
use_instance_mask=False
):
"""Create labels dict for infeed from data of tf.Example."""
image = data['image']
height, width = tf.shape(input=image)[:2]
boxes = data['groundtruth_boxes']
classes = tf.cast(data['groundtruth_classes'], dtype=tf.float32)
num_labels = tf.shape(input=classes)[0]
boxes = preprocess_ops.pad_to_fixed_size(boxes, -1, [target_num_instances, 4])
classes = preprocess_ops.pad_to_fixed_size(classes, -1, [target_num_instances, 1])
is_crowd = tf.cast(data['groundtruth_is_crowd'], dtype=tf.float32)
is_crowd = preprocess_ops.pad_to_fixed_size(is_crowd, 0, [target_num_instances, 1])
labels = dict()
labels['width'] = width
labels['height'] = height
labels['groundtruth_boxes'] = boxes
labels['groundtruth_classes'] = classes
labels['num_groundtruth_labels'] = num_labels
labels['groundtruth_is_crowd'] = is_crowd
if use_instance_mask:
data['groundtruth_polygons'] = preprocess_ops.pad_to_fixed_size(
data=data['groundtruth_polygons'],
pad_value=POLYGON_PAD_VALUE,
output_shape=[target_polygon_list_len, 1]
)
if 'groundtruth_area' in data:
labels['groundtruth_area'] = preprocess_ops.pad_to_fixed_size(
data=labels['groundtruth_area'],
pad_value=0,
output_shape=[target_num_instances, 1]
)
return labels
# training
def augment_image(image, boxes, instance_masks, seed):
flipped_results = preprocess_ops.random_horizontal_flip(
image,
boxes=boxes,
masks=instance_masks,
seed=seed
)
if instance_masks is not None:
image, boxes, instance_masks = flipped_results
else:
image, boxes = flipped_results
# image = tf.image.random_brightness(image, max_delta=0.1, seed=seed)
# image = tf.image.random_contrast(image, lower=0.9, upper=1.1, seed=seed)
# image = tf.image.random_saturation(image, lower=0.9, upper=1.1, seed=seed)
# image = tf.image.random_jpeg_quality(image, min_jpeg_quality=80, max_jpeg_quality=100, seed=seed)
return image, boxes, instance_masks
def process_boxes_classes_indices_for_training(data, skip_crowd_during_training, use_category, use_instance_mask):
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
indices = None
instance_masks = None
if not use_category:
classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32)
if skip_crowd_during_training:
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
if use_instance_mask:
instance_masks = tf.gather_nd(data['groundtruth_instance_masks'], indices)
return boxes, classes, indices, instance_masks
def process_gt_masks_for_training(instance_masks, boxes, gt_mask_size, padded_image_size, max_num_instances):
cropped_gt_masks = preprocess_ops.crop_gt_masks(
instance_masks=instance_masks,
boxes=boxes,
gt_mask_size=gt_mask_size,
image_size=padded_image_size
)
# cropped_gt_masks = tf.reshape(cropped_gt_masks, [max_num_instances, -1])
cropped_gt_masks = preprocess_ops.pad_to_fixed_size(
data=cropped_gt_masks,
pad_value=-1,
output_shape=[max_num_instances, (gt_mask_size + 4) ** 2]
)
return tf.reshape(cropped_gt_masks, [max_num_instances, gt_mask_size + 4, gt_mask_size + 4])
def process_labels_for_training(
image_info, boxes, classes,
score_targets, box_targets,
max_num_instances, min_level, max_level
):
labels = {}
# Pad groundtruth data.
# boxes *= image_info[2]
boxes = preprocess_ops.pad_to_fixed_size(boxes, -1, [max_num_instances, 4])
classes = preprocess_ops.pad_to_fixed_size(classes, -1, [max_num_instances, 1])
for level in range(min_level, max_level + 1):
labels['score_targets_%d' % level] = score_targets[level]
labels['box_targets_%d' % level] = box_targets[level]
return boxes, classes, labels
def process_targets_for_training(padded_image_size, boxes, classes, params):
input_anchors = anchors.Anchors(
params.min_level,
params.max_level,
params.num_scales,
params.aspect_ratios,
params.anchor_scale,
padded_image_size
)
anchor_labeler = anchors.AnchorLabeler(
input_anchors,
params.num_classes,
params.rpn_positive_overlap,
params.rpn_negative_overlap,
params.rpn_batch_size_per_im,
params.rpn_fg_fraction
)
return anchor_labeler.label_anchors(boxes, classes), input_anchors
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/dataset/dataset_parser.py |
""" Custom mapping that maps model backbone to weights from NVIDIA ResNet50 v1.5 checkpoint. """
# pylint: disable=line-too-long
WEIGHTS_MAPPING = {
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/batch_normalization_2/beta': 'resnet50/btlnck_block_0_0/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/batch_normalization_2/gamma': 'resnet50/btlnck_block_0_0/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/batch_normalization_2/moving_mean': 'resnet50/btlnck_block_0_0/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/batch_normalization_2/moving_variance': 'resnet50/btlnck_block_0_0/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_1/conv2d_2/kernel': 'resnet50/btlnck_block_0_0/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/batch_normalization_3/beta': 'resnet50/btlnck_block_0_0/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/batch_normalization_3/gamma': 'resnet50/btlnck_block_0_0/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/batch_normalization_3/moving_mean': 'resnet50/btlnck_block_0_0/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/batch_normalization_3/moving_variance': 'resnet50/btlnck_block_0_0/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_2/conv2d_3/kernel': 'resnet50/btlnck_block_0_0/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/batch_normalization_4/beta': 'resnet50/btlnck_block_0_0/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/batch_normalization_4/gamma': 'resnet50/btlnck_block_0_0/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/batch_normalization_4/moving_mean': 'resnet50/btlnck_block_0_0/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/batch_normalization_4/moving_variance': 'resnet50/btlnck_block_0_0/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/conv2d_block_3/conv2d_4/kernel': 'resnet50/btlnck_block_0_0/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/batch_normalization_1/beta': 'resnet50/btlnck_block_0_0/shortcut/conv2d/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/batch_normalization_1/gamma': 'resnet50/btlnck_block_0_0/shortcut/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/batch_normalization_1/moving_mean': 'resnet50/btlnck_block_0_0/shortcut/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/batch_normalization_1/moving_variance': 'resnet50/btlnck_block_0_0/shortcut/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block/shortcut/conv2d_1/kernel': 'resnet50/btlnck_block_0_0/shortcut/conv2d/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/batch_normalization_5/beta': 'resnet50/btlnck_block_0_1/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/batch_normalization_5/gamma': 'resnet50/btlnck_block_0_1/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/batch_normalization_5/moving_mean': 'resnet50/btlnck_block_0_1/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/batch_normalization_5/moving_variance': 'resnet50/btlnck_block_0_1/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_4/conv2d_5/kernel': 'resnet50/btlnck_block_0_1/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/batch_normalization_6/beta': 'resnet50/btlnck_block_0_1/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/batch_normalization_6/gamma': 'resnet50/btlnck_block_0_1/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/batch_normalization_6/moving_mean': 'resnet50/btlnck_block_0_1/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/batch_normalization_6/moving_variance': 'resnet50/btlnck_block_0_1/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_5/conv2d_6/kernel': 'resnet50/btlnck_block_0_1/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/batch_normalization_7/beta': 'resnet50/btlnck_block_0_1/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/batch_normalization_7/gamma': 'resnet50/btlnck_block_0_1/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/batch_normalization_7/moving_mean': 'resnet50/btlnck_block_0_1/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/batch_normalization_7/moving_variance': 'resnet50/btlnck_block_0_1/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_1/conv2d_block_6/conv2d_7/kernel': 'resnet50/btlnck_block_0_1/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/batch_normalization_8/beta': 'resnet50/btlnck_block_0_2/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/batch_normalization_8/gamma': 'resnet50/btlnck_block_0_2/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/batch_normalization_8/moving_mean': 'resnet50/btlnck_block_0_2/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/batch_normalization_8/moving_variance': 'resnet50/btlnck_block_0_2/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_7/conv2d_8/kernel': 'resnet50/btlnck_block_0_2/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/batch_normalization_9/beta': 'resnet50/btlnck_block_0_2/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/batch_normalization_9/gamma': 'resnet50/btlnck_block_0_2/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/batch_normalization_9/moving_mean': 'resnet50/btlnck_block_0_2/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/batch_normalization_9/moving_variance': 'resnet50/btlnck_block_0_2/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_8/conv2d_9/kernel': 'resnet50/btlnck_block_0_2/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/batch_normalization_10/beta': 'resnet50/btlnck_block_0_2/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/batch_normalization_10/gamma': 'resnet50/btlnck_block_0_2/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/batch_normalization_10/moving_mean': 'resnet50/btlnck_block_0_2/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/batch_normalization_10/moving_variance': 'resnet50/btlnck_block_0_2/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group/bottleneck_block_2/conv2d_block_9/conv2d_10/kernel': 'resnet50/btlnck_block_0_2/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/batch_normalization_12/beta': 'resnet50/btlnck_block_1_0/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/batch_normalization_12/gamma': 'resnet50/btlnck_block_1_0/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/batch_normalization_12/moving_mean': 'resnet50/btlnck_block_1_0/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/batch_normalization_12/moving_variance': 'resnet50/btlnck_block_1_0/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_10/conv2d_12/kernel': 'resnet50/btlnck_block_1_0/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/batch_normalization_13/beta': 'resnet50/btlnck_block_1_0/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/batch_normalization_13/gamma': 'resnet50/btlnck_block_1_0/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/batch_normalization_13/moving_mean': 'resnet50/btlnck_block_1_0/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/batch_normalization_13/moving_variance': 'resnet50/btlnck_block_1_0/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_11/conv2d_13/kernel': 'resnet50/btlnck_block_1_0/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/batch_normalization_14/beta': 'resnet50/btlnck_block_1_0/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/batch_normalization_14/gamma': 'resnet50/btlnck_block_1_0/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/batch_normalization_14/moving_mean': 'resnet50/btlnck_block_1_0/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/batch_normalization_14/moving_variance': 'resnet50/btlnck_block_1_0/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/conv2d_block_12/conv2d_14/kernel': 'resnet50/btlnck_block_1_0/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/batch_normalization_11/beta': 'resnet50/btlnck_block_1_0/shortcut/conv2d/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/batch_normalization_11/gamma': 'resnet50/btlnck_block_1_0/shortcut/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/batch_normalization_11/moving_mean': 'resnet50/btlnck_block_1_0/shortcut/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/batch_normalization_11/moving_variance': 'resnet50/btlnck_block_1_0/shortcut/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_3/shortcut/conv2d_11/kernel': 'resnet50/btlnck_block_1_0/shortcut/conv2d/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/batch_normalization_15/beta': 'resnet50/btlnck_block_1_1/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/batch_normalization_15/gamma': 'resnet50/btlnck_block_1_1/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/batch_normalization_15/moving_mean': 'resnet50/btlnck_block_1_1/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/batch_normalization_15/moving_variance': 'resnet50/btlnck_block_1_1/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_13/conv2d_15/kernel': 'resnet50/btlnck_block_1_1/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/batch_normalization_16/beta': 'resnet50/btlnck_block_1_1/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/batch_normalization_16/gamma': 'resnet50/btlnck_block_1_1/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/batch_normalization_16/moving_mean': 'resnet50/btlnck_block_1_1/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/batch_normalization_16/moving_variance': 'resnet50/btlnck_block_1_1/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_14/conv2d_16/kernel': 'resnet50/btlnck_block_1_1/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/batch_normalization_17/beta': 'resnet50/btlnck_block_1_1/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/batch_normalization_17/gamma': 'resnet50/btlnck_block_1_1/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/batch_normalization_17/moving_mean': 'resnet50/btlnck_block_1_1/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/batch_normalization_17/moving_variance': 'resnet50/btlnck_block_1_1/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_4/conv2d_block_15/conv2d_17/kernel': 'resnet50/btlnck_block_1_1/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/batch_normalization_18/beta': 'resnet50/btlnck_block_1_2/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/batch_normalization_18/gamma': 'resnet50/btlnck_block_1_2/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/batch_normalization_18/moving_mean': 'resnet50/btlnck_block_1_2/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/batch_normalization_18/moving_variance': 'resnet50/btlnck_block_1_2/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_16/conv2d_18/kernel': 'resnet50/btlnck_block_1_2/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/batch_normalization_19/beta': 'resnet50/btlnck_block_1_2/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/batch_normalization_19/gamma': 'resnet50/btlnck_block_1_2/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/batch_normalization_19/moving_mean': 'resnet50/btlnck_block_1_2/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/batch_normalization_19/moving_variance': 'resnet50/btlnck_block_1_2/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_17/conv2d_19/kernel': 'resnet50/btlnck_block_1_2/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/batch_normalization_20/beta': 'resnet50/btlnck_block_1_2/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/batch_normalization_20/gamma': 'resnet50/btlnck_block_1_2/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/batch_normalization_20/moving_mean': 'resnet50/btlnck_block_1_2/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/batch_normalization_20/moving_variance': 'resnet50/btlnck_block_1_2/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_5/conv2d_block_18/conv2d_20/kernel': 'resnet50/btlnck_block_1_2/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/batch_normalization_21/beta': 'resnet50/btlnck_block_1_3/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/batch_normalization_21/gamma': 'resnet50/btlnck_block_1_3/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/batch_normalization_21/moving_mean': 'resnet50/btlnck_block_1_3/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/batch_normalization_21/moving_variance': 'resnet50/btlnck_block_1_3/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_19/conv2d_21/kernel': 'resnet50/btlnck_block_1_3/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/batch_normalization_22/beta': 'resnet50/btlnck_block_1_3/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/batch_normalization_22/gamma': 'resnet50/btlnck_block_1_3/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/batch_normalization_22/moving_mean': 'resnet50/btlnck_block_1_3/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/batch_normalization_22/moving_variance': 'resnet50/btlnck_block_1_3/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_20/conv2d_22/kernel': 'resnet50/btlnck_block_1_3/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/batch_normalization_23/beta': 'resnet50/btlnck_block_1_3/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/batch_normalization_23/gamma': 'resnet50/btlnck_block_1_3/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/batch_normalization_23/moving_mean': 'resnet50/btlnck_block_1_3/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/batch_normalization_23/moving_variance': 'resnet50/btlnck_block_1_3/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_1/bottleneck_block_6/conv2d_block_21/conv2d_23/kernel': 'resnet50/btlnck_block_1_3/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/batch_normalization_25/beta': 'resnet50/btlnck_block_2_0/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/batch_normalization_25/gamma': 'resnet50/btlnck_block_2_0/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/batch_normalization_25/moving_mean': 'resnet50/btlnck_block_2_0/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/batch_normalization_25/moving_variance': 'resnet50/btlnck_block_2_0/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_22/conv2d_25/kernel': 'resnet50/btlnck_block_2_0/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/batch_normalization_26/beta': 'resnet50/btlnck_block_2_0/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/batch_normalization_26/gamma': 'resnet50/btlnck_block_2_0/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/batch_normalization_26/moving_mean': 'resnet50/btlnck_block_2_0/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/batch_normalization_26/moving_variance': 'resnet50/btlnck_block_2_0/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_23/conv2d_26/kernel': 'resnet50/btlnck_block_2_0/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/batch_normalization_27/beta': 'resnet50/btlnck_block_2_0/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/batch_normalization_27/gamma': 'resnet50/btlnck_block_2_0/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/batch_normalization_27/moving_mean': 'resnet50/btlnck_block_2_0/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/batch_normalization_27/moving_variance': 'resnet50/btlnck_block_2_0/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/conv2d_block_24/conv2d_27/kernel': 'resnet50/btlnck_block_2_0/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/batch_normalization_24/beta': 'resnet50/btlnck_block_2_0/shortcut/conv2d/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/batch_normalization_24/gamma': 'resnet50/btlnck_block_2_0/shortcut/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/batch_normalization_24/moving_mean': 'resnet50/btlnck_block_2_0/shortcut/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/batch_normalization_24/moving_variance': 'resnet50/btlnck_block_2_0/shortcut/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_7/shortcut/conv2d_24/kernel': 'resnet50/btlnck_block_2_0/shortcut/conv2d/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/batch_normalization_28/beta': 'resnet50/btlnck_block_2_1/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/batch_normalization_28/gamma': 'resnet50/btlnck_block_2_1/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/batch_normalization_28/moving_mean': 'resnet50/btlnck_block_2_1/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/batch_normalization_28/moving_variance': 'resnet50/btlnck_block_2_1/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_25/conv2d_28/kernel': 'resnet50/btlnck_block_2_1/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/batch_normalization_29/beta': 'resnet50/btlnck_block_2_1/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/batch_normalization_29/gamma': 'resnet50/btlnck_block_2_1/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/batch_normalization_29/moving_mean': 'resnet50/btlnck_block_2_1/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/batch_normalization_29/moving_variance': 'resnet50/btlnck_block_2_1/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_26/conv2d_29/kernel': 'resnet50/btlnck_block_2_1/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/batch_normalization_30/beta': 'resnet50/btlnck_block_2_1/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/batch_normalization_30/gamma': 'resnet50/btlnck_block_2_1/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/batch_normalization_30/moving_mean': 'resnet50/btlnck_block_2_1/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/batch_normalization_30/moving_variance': 'resnet50/btlnck_block_2_1/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_8/conv2d_block_27/conv2d_30/kernel': 'resnet50/btlnck_block_2_1/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/batch_normalization_31/beta': 'resnet50/btlnck_block_2_2/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/batch_normalization_31/gamma': 'resnet50/btlnck_block_2_2/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/batch_normalization_31/moving_mean': 'resnet50/btlnck_block_2_2/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/batch_normalization_31/moving_variance': 'resnet50/btlnck_block_2_2/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_28/conv2d_31/kernel': 'resnet50/btlnck_block_2_2/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/batch_normalization_32/beta': 'resnet50/btlnck_block_2_2/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/batch_normalization_32/gamma': 'resnet50/btlnck_block_2_2/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/batch_normalization_32/moving_mean': 'resnet50/btlnck_block_2_2/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/batch_normalization_32/moving_variance': 'resnet50/btlnck_block_2_2/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_29/conv2d_32/kernel': 'resnet50/btlnck_block_2_2/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/batch_normalization_33/beta': 'resnet50/btlnck_block_2_2/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/batch_normalization_33/gamma': 'resnet50/btlnck_block_2_2/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/batch_normalization_33/moving_mean': 'resnet50/btlnck_block_2_2/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/batch_normalization_33/moving_variance': 'resnet50/btlnck_block_2_2/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_9/conv2d_block_30/conv2d_33/kernel': 'resnet50/btlnck_block_2_2/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/batch_normalization_34/beta': 'resnet50/btlnck_block_2_3/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/batch_normalization_34/gamma': 'resnet50/btlnck_block_2_3/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/batch_normalization_34/moving_mean': 'resnet50/btlnck_block_2_3/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/batch_normalization_34/moving_variance': 'resnet50/btlnck_block_2_3/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_31/conv2d_34/kernel': 'resnet50/btlnck_block_2_3/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/batch_normalization_35/beta': 'resnet50/btlnck_block_2_3/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/batch_normalization_35/gamma': 'resnet50/btlnck_block_2_3/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/batch_normalization_35/moving_mean': 'resnet50/btlnck_block_2_3/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/batch_normalization_35/moving_variance': 'resnet50/btlnck_block_2_3/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_32/conv2d_35/kernel': 'resnet50/btlnck_block_2_3/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/batch_normalization_36/beta': 'resnet50/btlnck_block_2_3/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/batch_normalization_36/gamma': 'resnet50/btlnck_block_2_3/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/batch_normalization_36/moving_mean': 'resnet50/btlnck_block_2_3/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/batch_normalization_36/moving_variance': 'resnet50/btlnck_block_2_3/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_10/conv2d_block_33/conv2d_36/kernel': 'resnet50/btlnck_block_2_3/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/batch_normalization_37/beta': 'resnet50/btlnck_block_2_4/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/batch_normalization_37/gamma': 'resnet50/btlnck_block_2_4/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/batch_normalization_37/moving_mean': 'resnet50/btlnck_block_2_4/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/batch_normalization_37/moving_variance': 'resnet50/btlnck_block_2_4/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_34/conv2d_37/kernel': 'resnet50/btlnck_block_2_4/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/batch_normalization_38/beta': 'resnet50/btlnck_block_2_4/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/batch_normalization_38/gamma': 'resnet50/btlnck_block_2_4/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/batch_normalization_38/moving_mean': 'resnet50/btlnck_block_2_4/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/batch_normalization_38/moving_variance': 'resnet50/btlnck_block_2_4/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_35/conv2d_38/kernel': 'resnet50/btlnck_block_2_4/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/batch_normalization_39/beta': 'resnet50/btlnck_block_2_4/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/batch_normalization_39/gamma': 'resnet50/btlnck_block_2_4/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/batch_normalization_39/moving_mean': 'resnet50/btlnck_block_2_4/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/batch_normalization_39/moving_variance': 'resnet50/btlnck_block_2_4/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_11/conv2d_block_36/conv2d_39/kernel': 'resnet50/btlnck_block_2_4/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/batch_normalization_40/beta': 'resnet50/btlnck_block_2_5/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/batch_normalization_40/gamma': 'resnet50/btlnck_block_2_5/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/batch_normalization_40/moving_mean': 'resnet50/btlnck_block_2_5/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/batch_normalization_40/moving_variance': 'resnet50/btlnck_block_2_5/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_37/conv2d_40/kernel': 'resnet50/btlnck_block_2_5/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/batch_normalization_41/beta': 'resnet50/btlnck_block_2_5/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/batch_normalization_41/gamma': 'resnet50/btlnck_block_2_5/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/batch_normalization_41/moving_mean': 'resnet50/btlnck_block_2_5/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/batch_normalization_41/moving_variance': 'resnet50/btlnck_block_2_5/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_38/conv2d_41/kernel': 'resnet50/btlnck_block_2_5/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/batch_normalization_42/beta': 'resnet50/btlnck_block_2_5/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/batch_normalization_42/gamma': 'resnet50/btlnck_block_2_5/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/batch_normalization_42/moving_mean': 'resnet50/btlnck_block_2_5/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/batch_normalization_42/moving_variance': 'resnet50/btlnck_block_2_5/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_2/bottleneck_block_12/conv2d_block_39/conv2d_42/kernel': 'resnet50/btlnck_block_2_5/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/batch_normalization_44/beta': 'resnet50/btlnck_block_3_0/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/batch_normalization_44/gamma': 'resnet50/btlnck_block_3_0/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/batch_normalization_44/moving_mean': 'resnet50/btlnck_block_3_0/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/batch_normalization_44/moving_variance': 'resnet50/btlnck_block_3_0/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_40/conv2d_44/kernel': 'resnet50/btlnck_block_3_0/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/batch_normalization_45/beta': 'resnet50/btlnck_block_3_0/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/batch_normalization_45/gamma': 'resnet50/btlnck_block_3_0/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/batch_normalization_45/moving_mean': 'resnet50/btlnck_block_3_0/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/batch_normalization_45/moving_variance': 'resnet50/btlnck_block_3_0/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_41/conv2d_45/kernel': 'resnet50/btlnck_block_3_0/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/batch_normalization_46/beta': 'resnet50/btlnck_block_3_0/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/batch_normalization_46/gamma': 'resnet50/btlnck_block_3_0/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/batch_normalization_46/moving_mean': 'resnet50/btlnck_block_3_0/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/batch_normalization_46/moving_variance': 'resnet50/btlnck_block_3_0/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/conv2d_block_42/conv2d_46/kernel': 'resnet50/btlnck_block_3_0/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/batch_normalization_43/beta': 'resnet50/btlnck_block_3_0/shortcut/conv2d/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/batch_normalization_43/gamma': 'resnet50/btlnck_block_3_0/shortcut/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/batch_normalization_43/moving_mean': 'resnet50/btlnck_block_3_0/shortcut/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/batch_normalization_43/moving_variance': 'resnet50/btlnck_block_3_0/shortcut/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_13/shortcut/conv2d_43/kernel': 'resnet50/btlnck_block_3_0/shortcut/conv2d/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/batch_normalization_47/beta': 'resnet50/btlnck_block_3_1/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/batch_normalization_47/gamma': 'resnet50/btlnck_block_3_1/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/batch_normalization_47/moving_mean': 'resnet50/btlnck_block_3_1/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/batch_normalization_47/moving_variance': 'resnet50/btlnck_block_3_1/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_43/conv2d_47/kernel': 'resnet50/btlnck_block_3_1/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/batch_normalization_48/beta': 'resnet50/btlnck_block_3_1/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/batch_normalization_48/gamma': 'resnet50/btlnck_block_3_1/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/batch_normalization_48/moving_mean': 'resnet50/btlnck_block_3_1/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/batch_normalization_48/moving_variance': 'resnet50/btlnck_block_3_1/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_44/conv2d_48/kernel': 'resnet50/btlnck_block_3_1/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/batch_normalization_49/beta': 'resnet50/btlnck_block_3_1/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/batch_normalization_49/gamma': 'resnet50/btlnck_block_3_1/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/batch_normalization_49/moving_mean': 'resnet50/btlnck_block_3_1/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/batch_normalization_49/moving_variance': 'resnet50/btlnck_block_3_1/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_14/conv2d_block_45/conv2d_49/kernel': 'resnet50/btlnck_block_3_1/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/batch_normalization_50/beta': 'resnet50/btlnck_block_3_2/bottleneck_1/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/batch_normalization_50/gamma': 'resnet50/btlnck_block_3_2/bottleneck_1/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/batch_normalization_50/moving_mean': 'resnet50/btlnck_block_3_2/bottleneck_1/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/batch_normalization_50/moving_variance': 'resnet50/btlnck_block_3_2/bottleneck_1/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_46/conv2d_50/kernel': 'resnet50/btlnck_block_3_2/bottleneck_1/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/batch_normalization_51/beta': 'resnet50/btlnck_block_3_2/bottleneck_2/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/batch_normalization_51/gamma': 'resnet50/btlnck_block_3_2/bottleneck_2/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/batch_normalization_51/moving_mean': 'resnet50/btlnck_block_3_2/bottleneck_2/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/batch_normalization_51/moving_variance': 'resnet50/btlnck_block_3_2/bottleneck_2/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_47/conv2d_51/kernel': 'resnet50/btlnck_block_3_2/bottleneck_2/conv2d/kernel',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/batch_normalization_52/beta': 'resnet50/btlnck_block_3_2/bottleneck_3/BatchNorm/beta',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/batch_normalization_52/gamma': 'resnet50/btlnck_block_3_2/bottleneck_3/BatchNorm/gamma',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/batch_normalization_52/moving_mean': 'resnet50/btlnck_block_3_2/bottleneck_3/BatchNorm/moving_mean',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/batch_normalization_52/moving_variance': 'resnet50/btlnck_block_3_2/bottleneck_3/BatchNorm/moving_variance',
'mrcnn/resnet50/bottleneck_group_3/bottleneck_block_15/conv2d_block_48/conv2d_52/kernel': 'resnet50/btlnck_block_3_2/bottleneck_3/conv2d/kernel',
'mrcnn/resnet50/conv2d_block/batch_normalization/beta': 'resnet50/conv2d/BatchNorm/beta',
'mrcnn/resnet50/conv2d_block/batch_normalization/gamma': 'resnet50/conv2d/BatchNorm/gamma',
'mrcnn/resnet50/conv2d_block/batch_normalization/moving_mean': 'resnet50/conv2d/BatchNorm/moving_mean',
'mrcnn/resnet50/conv2d_block/batch_normalization/moving_variance': 'resnet50/conv2d/BatchNorm/moving_variance',
'mrcnn/resnet50/conv2d_block/conv2d/kernel': 'resnet50/conv2d/conv2d/kernel',
}
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/weights_mapping.py |
import logging
import os
import tensorflow as tf
import dllogger
from mrcnn_tf2.model.mask_rcnn import MaskRCNN
from mrcnn_tf2.runtime.callbacks import DLLoggerMetricsCallback, DLLoggerPerfCallback, PretrainedWeightsLoadingCallback
from mrcnn_tf2.runtime.evaluation import evaluate
from mrcnn_tf2.runtime.learning_rate import PiecewiseConstantWithWarmupSchedule
from mrcnn_tf2.runtime.weights_mapping import WEIGHTS_MAPPING
def run_training(dataset, params):
setup(params)
strategy = tf.distribute.MirroredStrategy()
params.replicas = strategy.num_replicas_in_sync
params.global_train_batch_size = params.train_batch_size * params.replicas
logging.info(f'Distributed Strategy is activated for {params.replicas} device(s)')
with strategy.scope():
learning_rate = PiecewiseConstantWithWarmupSchedule(
init_value=params.init_learning_rate,
# scale boundaries from epochs to steps
boundaries=[
int(b * dataset.train_size / params.global_train_batch_size)
for b in params.learning_rate_boundaries
],
values=params.learning_rate_values,
# scale only by local BS as distributed strategy later scales it by number of replicas
scale=params.train_batch_size
)
optimizer = tf.keras.optimizers.SGD(
learning_rate=learning_rate,
momentum=params.momentum
)
mask_rcnn_model = create_model(params)
mask_rcnn_model.compile(
optimizer=optimizer
)
# distributed strategy splits data between instances so we need global BS
train_data = dataset.train_fn(batch_size=params.global_train_batch_size)
if params.eagerly:
mask_rcnn_model.run_eagerly = True
logging.warning('Model is running in eager mode which might reduce performance')
mask_rcnn_model.fit(
x=train_data,
epochs=params.epochs,
steps_per_epoch=params.steps_per_epoch or (dataset.train_size // params.global_train_batch_size),
callbacks=list(create_callbacks(params)),
verbose=0
)
def run_evaluation(dataset, params):
setup(params)
mask_rcnn_model = create_model(params)
if params.eagerly:
mask_rcnn_model.run_eagerly = True
logging.warning('Model is running in eager mode which might reduce performance')
predictions = mask_rcnn_model.predict(
x=dataset.eval_fn(params.eval_batch_size),
callbacks=list(create_callbacks(params))
)
eval_results = evaluate(
predictions=predictions,
eval_file=params.eval_file,
include_mask=params.include_mask
)
dllogger.log(
step=tuple(),
data={k: float(v) for k, v in eval_results.items()}
)
def run_inference(dataset, params):
setup(params)
mask_rcnn_model = create_model(params)
if params.eagerly:
mask_rcnn_model.run_eagerly = True
logging.warning('Model is running in eager mode which might reduce performance')
mask_rcnn_model.predict(
x=dataset.eval_fn(params.eval_batch_size),
callbacks=list(create_callbacks(params))
)
def setup(params):
# enforces that AMP is enabled using --amp and not env var
# mainly for NGC where it is enabled by default
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
if params.xla:
tf.config.optimizer.set_jit(True)
logging.info('XLA is activated')
if params.amp:
policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16", loss_scale="dynamic")
tf.keras.mixed_precision.experimental.set_policy(policy)
logging.info('AMP is activated')
def create_model(params):
model = MaskRCNN(
params=params,
trainable='train' in params.mode
)
checkpoint_path = tf.train.latest_checkpoint(params.model_dir)
# if there is no checkpoint we are done
if checkpoint_path is None:
logging.info(f"No checkpoint was found in: {params.model_dir}")
return model
model.load_weights(checkpoint_path).expect_partial()
logging.info(f"Loaded weights from checkpoint: {checkpoint_path}")
# don't load backbone weights to do not override the checkpoint
if params.backbone_checkpoint:
params.backbone_checkpoint = None
logging.info("Pretrained backbone weights will not be loaded")
return model
def create_callbacks(params):
yield DLLoggerMetricsCallback(
dllogger=dllogger,
log_every=params.log_every
)
yield DLLoggerPerfCallback(
dllogger=dllogger,
batch_sizes={
'train': params.train_batch_size * getattr(params, 'replicas', 1),
'test': params.eval_batch_size * getattr(params, 'replicas', 1),
'predict': params.eval_batch_size * getattr(params, 'replicas', 1)
},
warmup_steps=params.log_warmup_steps,
log_every=params.log_every
)
if params.backbone_checkpoint:
yield PretrainedWeightsLoadingCallback(
checkpoint_path=params.backbone_checkpoint,
mapping=lambda name: WEIGHTS_MAPPING.get(name.replace(':0', ''), name)
)
yield tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(params.model_dir, params.checkpoint_name_format),
verbose=1
)
if params.log_tensorboard:
yield tf.keras.callbacks.TensorBoard(
log_dir=params.log_tensorboard,
update_freq='batch'
)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/run.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to perform COCO evaluation."""
import numpy as np
from mrcnn_tf2.utils import coco_utils, coco_metric
def process_predictions(predictions):
""" Process the model predictions for COCO eval.
Converts boxes from [y1, x1, y2, x2] to [x1, y1, w, h] and scales them by image scale.
Flattens source_ids
Args:
predictions (dict): Predictions returned by model
Returns:
Converted prediction.
"""
image_info = predictions['image_info']
detection_boxes = predictions['detection_boxes']
for pred_id, box_id in np.ndindex(*detection_boxes.shape[:2]):
# convert from [y1, x1, y2, x2] to [x1, y1, w, h] * scale
scale = image_info[pred_id, 2]
y1, x1, y2, x2 = detection_boxes[pred_id, box_id, :]
new_box = np.array([x1, y1, x2 - x1, y2 - y1]) * scale
detection_boxes[pred_id, box_id, :] = new_box
# flatten source ids
predictions['source_ids'] = predictions['source_ids'].flatten()
return predictions
def evaluate(predictions, eval_file=None, include_mask=True):
""" Evaluates given iterable of predictions.
Args:
predictions (Iterable): Iterable of predictions returned from.
eval_file (Optional(str)): Path to file with eval annotations.
If None then groundtruth from feature will be used.
include_mask (bool): Indicates if eval mask should be included.
Returns:
"""
# convert from [y1, x1, y2, x2] to [x1, y1, w, h] * scale
predictions = process_predictions(predictions)
# create evaluation metric
eval_metric = coco_metric.EvaluationMetric(filename=eval_file, include_mask=include_mask)
# eval using the file or groundtruth from features
if eval_file is not None:
eval_results = eval_metric.predict_metric_fn(predictions)
else:
images, annotations = coco_utils.extract_coco_groundtruth(predictions, include_mask)
coco_dataset = coco_utils.create_coco_format_dataset(images, annotations)
eval_results = eval_metric.predict_metric_fn(predictions, groundtruth_data=coco_dataset)
return eval_results
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/evaluation.py |
import tensorflow as tf
class PiecewiseConstantWithWarmupSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""
Schedule that starts with learning rate at `init_value` and monotonically increases
it up to `values[0]` at step `boundaries[0]`. After that the learning rate changes
on each boundary to corresponding value.
"""
def __init__(self, init_value, boundaries, values, scale=1.0, name='PiecewiseConstantWithWarmup'):
"""
Constructs piecewise constant learning rate with linear warmup.
Args:
init_value (float): Learning rate at step 0.
boundaries (List[int]): Steps at which the learning rate will change.
values (List[float]): Values to which the learning rate will be changed.
scale (float): Scales the computed lr by given constant.
name (str): Name of the operation.
"""
assert len(values) > 0
assert len(values) == len(boundaries)
self._init_value = float(init_value)
self._values = list(map(float, values))
self._boundaries = list(map(float, boundaries))
self._scale = float(scale)
self._name = name
def __call__(self, step):
with tf.name_scope(self._name):
# linear learning rate before first boundary
warmup_lr = self._init_value + (self._values[0] - self._init_value) * (step / self._boundaries[0])
warmup_pred = (tf.less(step, self._boundaries[0]), lambda: warmup_lr)
# step learning rate after first boundary
boundaries_pred = [
(tf.less(step, limit), lambda v=v: v)
for limit, v in zip(self._boundaries[1:], self._values)
]
learning_rate = tf.case(
pred_fn_pairs=[warmup_pred] + boundaries_pred,
default=lambda: self._values[-1]
)
return learning_rate * self._scale
def get_config(self):
return {
"init_value": self._init_value,
"values": self._values,
"boundaries": self._boundaries,
"name": self._name
}
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/learning_rate.py |
DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/__init__.py |
|
import logging
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from mrcnn_tf2.utils.keras import KerasCallback
CONFIDENCE_INTERVAL_Z = {
80.0: 1.282,
85.0: 1.440,
90.0: 1.645,
95.0: 1.960,
99.0: 2.576,
99.5: 2.807,
99.9: 3.291,
}
class DLLoggerMetricsCallback(KerasCallback):
"""
Keras callback that saves metrics using DLLogger.
"""
def __init__(self, dllogger, log_every=10, log_learning_rate=False):
"""
Args:
dllogger (DLLogger): DLLogger instance.
log_every (int): Logging interval.
log_learning_rate (bool): When set to true adds learning rate to metrics.
Cannot be used with AMP enabled as the used hack fails with AMP.
"""
super().__init__()
self._dllogger = dllogger
self._log_every = log_every
self._log_learning_rate = log_learning_rate
if not isinstance(log_every, dict):
self._log_every = defaultdict(lambda: log_every)
self._dllogger.metadata('loss', {'unit': None})
self._dllogger.metadata('AP', {'unit': None})
self._dllogger.metadata('mask_AP', {'unit': None})
logging.getLogger('hooks').info('Created metrics logging hook')
def on_any_batch_end(self, mode, epoch, batch, logs):
if (batch + 1) % self._log_every[mode] != 0:
return
step = (None if epoch is None else epoch + 1, batch + 1)
self._log_metrics(mode, logs, step=step)
def on_any_epoch_end(self, mode, epoch, logs):
step = (None if epoch is None else epoch + 1, )
self._log_metrics(mode, logs, step=step)
def on_any_end(self, mode, logs):
self._log_metrics(mode, logs)
def _log_metrics(self, mode, logs, step=tuple()):
logs = logs or {}
# remove outputs that are not in fact a metric
logs.pop('outputs', None)
if mode == 'train' and self._log_learning_rate:
logs['learning_rate'] = float(self.model.optimizer._decayed_lr(tf.float32))
# no point in logging with empty data
if not logs:
return
self._dllogger.log(step=step, data=logs)
class DLLoggerPerfCallback(KerasCallback):
"""
Keras callback that measures performance and logs it using DLLogger.
"""
def __init__(self, dllogger, batch_sizes, warmup_steps=0, log_every=None):
super().__init__()
self._dllogger = dllogger
self._batch_sizes = batch_sizes
self._warmup_steps = warmup_steps
self._log_every = log_every
if not isinstance(batch_sizes, dict):
self._batch_sizes = defaultdict(lambda: batch_sizes)
if not isinstance(warmup_steps, dict):
self._warmup_steps = defaultdict(lambda: warmup_steps)
if not isinstance(log_every, dict):
self._log_every = defaultdict(lambda: log_every)
self._deltas = {}
self._batch_timestamps = {}
self._start_timestamps = {}
for mode in ['train', 'test', 'predict']:
self._dllogger.metadata(f'{mode}_throughput', {'unit': 'images/s'})
self._dllogger.metadata(f'{mode}_latency', {'unit': 's'})
self._dllogger.metadata(f'{mode}_latency_90', {'unit': 's'})
self._dllogger.metadata(f'{mode}_latency_95', {'unit': 's'})
self._dllogger.metadata(f'{mode}_latency_99', {'unit': 's'})
self._dllogger.metadata(f'{mode}_time', {'unit': 's'})
self._logger = logging.getLogger('hooks')
self._logger.info('Created perf logging hooks')
def on_any_begin(self, mode, logs):
self._deltas[mode] = []
self._start_timestamps[mode] = time.time()
def on_any_batch_begin(self, mode, epoch, batch, logs):
self._batch_timestamps[mode] = time.time()
def on_any_batch_end(self, mode, epoch, batch, logs):
self._deltas[mode].append(time.time() - self._batch_timestamps[mode])
if self._log_every[mode] and (batch + 1) % self._log_every[mode] != 0:
return
step = (None if epoch is None else epoch + 1, batch + 1)
self._log_perf(self._deltas[mode][-self._log_every[mode]:], mode, step=step)
def on_any_end(self, mode, logs):
if len(self._deltas[mode]) > self._warmup_steps[mode]:
self._log_perf(self._deltas[mode][self._warmup_steps[mode]:], mode)
else:
self._logger.warning(
f'Number of all {mode} steps was smaller then number of warm up steps, '
f'no stats were collected.'
)
def _log_perf(self, deltas, mode, step=tuple()):
deltas = np.array(deltas)
self._dllogger.log(
step=step,
data={
f'{mode}_throughput': self._calculate_throughput(deltas, self._batch_sizes[mode]),
f'{mode}_latency': self._calculate_latency(deltas),
f'{mode}_latency_90': self._calculate_latency_confidence(deltas, 90.0),
f'{mode}_latency_95': self._calculate_latency_confidence(deltas, 95.0),
f'{mode}_latency_99': self._calculate_latency_confidence(deltas, 99.0),
f'{mode}_time': self._calculate_total_time(self._start_timestamps[mode], time.time())
}
)
@staticmethod
def _calculate_throughput(deltas, batch_size):
return batch_size / deltas.mean()
@staticmethod
def _calculate_latency(deltas):
return deltas.mean()
@staticmethod
def _calculate_latency_confidence(deltas, confidence_interval):
mean = deltas.mean()
std = deltas.std()
n = len(deltas)
z = CONFIDENCE_INTERVAL_Z[confidence_interval]
return mean + (z * std / np.sqrt(n))
@staticmethod
def _calculate_total_time(start_time, end_time):
return end_time - start_time
class PretrainedWeightsLoadingCallback(KerasCallback):
"""
Loads pretrained weights from given checkpoint after first batch.
"""
def __init__(self, checkpoint_path, mapping=None):
"""
Args:
checkpoint_path: Path to the checkpoint, as accepted by `tf.train.load_checkpoint()`
mapping: Callable that takes name of a variable and returns name of a corresponding
entry in the checkpoint.
"""
super().__init__()
self._checkpoint_path = checkpoint_path
self._mapping = mapping or (lambda x: x)
self._loaded = False
self._logger = logging.getLogger('hooks')
self._logger.info(f'Created pretrained backbone weights loading hook that loads from {checkpoint_path}')
def on_train_batch_end(self, batch, logs=None):
super().on_train_batch_end(batch, logs)
if not self._loaded:
self.load_weights()
self._loaded = True
def load_weights(self):
reader = tf.train.load_checkpoint(self._checkpoint_path)
variable_mapping = {
self._mapping(var.name): var
for var in self.model.variables
if reader.has_tensor(self._mapping(var.name))
}
for cp_name, var in variable_mapping.items():
var.assign(reader.get_tensor(cp_name))
self._logger.debug(f'Assigned "{cp_name}" from checkpoint to "{var.name}"')
self._logger.info(f'Loaded {len(variable_mapping)} pretrained backbone variables')
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime/callbacks.py |
import tensorflow as tf
class KerasCallback(tf.keras.callbacks.Callback):
""" Utility class that simplifies usage of Keras callback across different modes. """
def __init__(self):
super().__init__()
self._current_epoch = None
def on_any_begin(self, mode, logs):
pass
def on_any_end(self, mode, logs):
pass
def on_any_epoch_begin(self, mode, epoch, logs):
pass
def on_any_epoch_end(self, mode, epoch, logs):
pass
def on_any_batch_begin(self, mode, epoch, batch, logs):
pass
def on_any_batch_end(self, mode, epoch, batch, logs):
pass
def on_train_begin(self, logs=None):
self.on_any_begin('train', logs)
def on_test_begin(self, logs=None):
self.on_any_begin('test', logs)
def on_predict_begin(self, logs=None):
self.on_any_begin('predict', logs)
def on_train_end(self, logs=None):
self.on_any_end('train', logs)
def on_test_end(self, logs=None):
self.on_any_end('test', logs)
def on_predict_end(self, logs=None):
self.on_any_end('predict', logs)
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
self.on_any_epoch_begin('train', epoch, logs)
def on_epoch_end(self, epoch, logs=None):
self.on_any_epoch_end('train', epoch, logs)
self._current_epoch = None
def on_train_batch_begin(self, batch, logs=None):
self.on_any_batch_begin('train', self._current_epoch, batch, logs)
def on_test_batch_begin(self, batch, logs=None):
self.on_any_batch_begin('test', None, batch, logs)
def on_predict_batch_begin(self, batch, logs=None):
self.on_any_batch_begin('predict', None, batch, logs)
def on_train_batch_end(self, batch, logs=None):
self.on_any_batch_end('train', self._current_epoch, batch, logs)
def on_test_batch_end(self, batch, logs=None):
self.on_any_batch_end('test', None, batch, logs)
def on_predict_batch_end(self, batch, logs=None):
self.on_any_batch_end('predict', None, batch, logs)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils/keras.py |
DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils/__init__.py |
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions to manipulate boxes."""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
BBOX_XFORM_CLIP = np.log(1000. / 16.)
NMS_TILE_SIZE = 512
def bbox_overlap(boxes, gt_boxes):
"""Calculates the overlap between proposal and ground truth boxes.
Some `gt_boxes` may have been padded. The returned `iou` tensor for these
boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = tf.maximum(bb_x_min, tf.transpose(a=gt_x_min, perm=[0, 2, 1]))
i_xmax = tf.minimum(bb_x_max, tf.transpose(a=gt_x_max, perm=[0, 2, 1]))
i_ymin = tf.maximum(bb_y_min, tf.transpose(a=gt_y_min, perm=[0, 2, 1]))
i_ymax = tf.minimum(bb_y_max, tf.transpose(a=gt_y_max, perm=[0, 2, 1]))
i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0)
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(a=gt_area, perm=[0, 2, 1]) - i_area + 1e-8
# Calculates IoU.
iou = i_area / u_area
# Fills -1 for padded ground truth boxes.
padding_mask = tf.less(i_xmin, tf.zeros_like(i_xmin))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
return iou
def top_k(scores, k, boxes_list):
"""A wrapper that returns top-k scores and correponding boxes.
This functions selects the top-k scores and boxes as follows.
indices = argsort(scores)[:k]
scores = scores[indices]
outputs = []
for boxes in boxes_list:
outputs.append(boxes[indices, :])
return scores, outputs
Args:
scores: a tensor with a shape of [batch_size, N]. N is the number of scores.
k: an integer for selecting the top-k elements.
boxes_list: a list containing at least one element. Each element has a shape
of [batch_size, N, 4].
Returns:
scores: the selected top-k scores with a shape of [batch_size, k].
outputs: the list containing the corresponding boxes in the order of the
input `boxes_list`.
"""
assert isinstance(boxes_list, list)
assert boxes_list # not empty list
batch_size, _ = scores.get_shape().as_list()
scores, top_k_indices = tf.nn.top_k(scores, k=k)
outputs = []
for boxes in boxes_list:
if batch_size == 1:
boxes = tf.squeeze(tf.gather(boxes, top_k_indices, axis=1), axis=1)
else:
boxes_index_offsets = tf.range(batch_size) * tf.shape(input=boxes)[1]
boxes_indices = tf.reshape(
top_k_indices + tf.expand_dims(boxes_index_offsets, 1), [-1])
boxes = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), boxes_indices),
[batch_size, -1, 4])
outputs.append(boxes)
return scores, outputs
def _self_suppression(iou, _, iou_sum):
batch_size = tf.shape(input=iou)[0]
can_suppress_others = tf.cast(
tf.reshape(tf.reduce_max(input_tensor=iou, axis=1) <= 0.5, [batch_size, -1, 1]), iou.dtype)
iou_suppressed = tf.reshape(
tf.cast(tf.reduce_max(input_tensor=can_suppress_others * iou, axis=1) <= 0.5, iou.dtype),
[batch_size, -1, 1]) * iou
iou_sum_new = tf.reduce_sum(input_tensor=iou_suppressed, axis=[1, 2])
return [
iou_suppressed,
tf.reduce_any(input_tensor=iou_sum - iou_sum_new > 0.5), iou_sum_new
]
def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx):
batch_size = tf.shape(input=boxes)[0]
new_slice = tf.slice(boxes, [0, inner_idx * NMS_TILE_SIZE, 0],
[batch_size, NMS_TILE_SIZE, 4])
iou = bbox_overlap(new_slice, box_slice)
ret_slice = tf.expand_dims(
tf.cast(tf.reduce_all(input_tensor=iou < iou_threshold, axis=[1]), box_slice.dtype),
2) * box_slice
return boxes, ret_slice, iou_threshold, inner_idx + 1
def _suppression_loop_body(boxes, iou_threshold, output_size, idx):
"""Process boxes in the range [idx*NMS_TILE_SIZE, (idx+1)*NMS_TILE_SIZE).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
"""
num_tiles = tf.shape(input=boxes)[1] // NMS_TILE_SIZE
batch_size = tf.shape(input=boxes)[0]
# Iterates over tiles that can possibly suppress the current tile.
box_slice = tf.slice(boxes, [0, idx * NMS_TILE_SIZE, 0],
[batch_size, NMS_TILE_SIZE, 4])
_, box_slice, _, _ = tf.while_loop(
cond=lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
body=_cross_suppression, loop_vars=[boxes, box_slice, iou_threshold,
tf.constant(0)])
# Iterates over the current tile to compute self-suppression.
iou = bbox_overlap(box_slice, box_slice)
mask = tf.expand_dims(
tf.reshape(tf.range(NMS_TILE_SIZE), [1, -1]) > tf.reshape(
tf.range(NMS_TILE_SIZE), [-1, 1]), 0)
iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _ = tf.while_loop(
cond=lambda _iou, loop_condition, _iou_sum: loop_condition, body=_self_suppression,
loop_vars=[iou, tf.constant(True),
tf.reduce_sum(input_tensor=iou, axis=[1, 2])])
suppressed_box = tf.reduce_sum(input_tensor=suppressed_iou, axis=1) > 0
box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)
# Uses box_slice to update the input boxes.
mask = tf.reshape(
tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])
boxes = tf.tile(tf.expand_dims(
box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape(
boxes, [batch_size, num_tiles, NMS_TILE_SIZE, 4]) * (1 - mask)
boxes = tf.reshape(boxes, [batch_size, -1, 4])
# Updates output_size.
output_size += tf.reduce_sum(
input_tensor=tf.cast(tf.reduce_any(input_tensor=box_slice > 0, axis=[2]), tf.int32), axis=[1])
return boxes, iou_threshold, output_size, idx + 1
def sorted_non_max_suppression_padded(scores,
boxes,
max_output_size,
iou_threshold):
"""A wrapper that handles non-maximum suppression.
Assumption:
* The boxes are sorted by scores unless the box is a dot (all coordinates
are zero).
* Boxes with higher scores can be used to suppress boxes with lower scores.
The overal design of the algorithm is to handle boxes tile-by-tile:
boxes = boxes.pad_to_multiply_of(tile_size)
num_tiles = len(boxes) // tile_size
output_boxes = []
for i in range(num_tiles):
box_tile = boxes[i*tile_size : (i+1)*tile_size]
for j in range(i - 1):
suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]
iou = bbox_overlap(box_tile, suppressing_tile)
# if the box is suppressed in iou, clear it to a dot
box_tile *= _update_boxes(iou)
# Iteratively handle the diagnal tile.
iou = _box_overlap(box_tile, box_tile)
iou_changed = True
while iou_changed:
# boxes that are not suppressed by anything else
suppressing_boxes = _get_suppressing_boxes(iou)
# boxes that are suppressed by suppressing_boxes
suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)
# clear iou to 0 for boxes that are suppressed, as they cannot be used
# to suppress other boxes any more
new_iou = _clear_iou(iou, suppressed_boxes)
iou_changed = (new_iou != iou)
iou = new_iou
# remaining boxes that can still suppress others, are selected boxes.
output_boxes.append(_get_suppressing_boxes(iou))
if len(output_boxes) >= max_output_size:
break
Args:
scores: a tensor with a shape of [batch_size, anchors].
boxes: a tensor with a shape of [batch_size, anchors, 4].
max_output_size: a scalar integer `Tensor` representing the maximum number
of boxes to be selected by non max suppression.
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
Returns:
nms_scores: a tensor with a shape of [batch_size, anchors]. It has same
dtype as input scores.
nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has
same dtype as input boxes.
"""
batch_size = tf.shape(input=boxes)[0]
num_boxes = tf.shape(input=boxes)[1]
pad = tf.cast(
tf.math.ceil(tf.cast(num_boxes, tf.float32) / NMS_TILE_SIZE),
tf.int32) * NMS_TILE_SIZE - num_boxes
boxes = tf.pad(tensor=tf.cast(boxes, tf.float32), paddings=[[0, 0], [0, pad], [0, 0]])
scores = tf.pad(tensor=tf.cast(scores, tf.float32), paddings=[[0, 0], [0, pad]])
num_boxes += pad
def _loop_cond(unused_boxes, unused_threshold, output_size, idx):
return tf.logical_and(
tf.reduce_min(input_tensor=output_size) < max_output_size,
idx < num_boxes // NMS_TILE_SIZE)
selected_boxes, _, output_size, _ = tf.while_loop(
cond=_loop_cond, body=_suppression_loop_body, loop_vars=[
boxes, iou_threshold,
tf.zeros([batch_size], tf.int32),
tf.constant(0)
])
idx = num_boxes - tf.cast(
tf.nn.top_k(
tf.cast(tf.reduce_any(input_tensor=selected_boxes > 0, axis=[2]), tf.int32) *
tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],
tf.int32)
idx = tf.minimum(idx, num_boxes - 1)
idx = tf.reshape(
idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1])
boxes = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), idx),
[batch_size, max_output_size, 4])
boxes = boxes * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape(
output_size, [-1, 1, 1]), boxes.dtype)
scores = tf.reshape(
tf.gather(tf.reshape(scores, [-1, 1]), idx),
[batch_size, max_output_size])
scores = scores * tf.cast(
tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape(
output_size, [-1, 1]), scores.dtype)
return scores, boxes
def encode_boxes(boxes, anchors, weights=None):
"""Encode boxes to targets.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as `boxes` representing the
coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
encoded box targets.
"""
with tf.name_scope('encode_box'):
boxes = tf.cast(boxes, dtype=anchors.dtype)
y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1)
# y_min = boxes[..., 0:1]
# x_min = boxes[..., 1:2]
# y_max = boxes[..., 2:3]
# x_max = boxes[..., 3:4]
box_h = y_max - y_min + 1.0
box_w = x_max - x_min + 1.0
box_yc = y_min + 0.5 * box_h
box_xc = x_min + 0.5 * box_w
anchor_ymin, anchor_xmin, anchor_ymax, anchor_xmax = tf.split(anchors, 4, axis=-1)
# anchor_ymin = anchors[..., 0:1]
# anchor_xmin = anchors[..., 1:2]
# anchor_ymax = anchors[..., 2:3]
# anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin + 1.0
anchor_w = anchor_xmax - anchor_xmin + 1.0
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
encoded_dy = (box_yc - anchor_yc) / anchor_h
encoded_dx = (box_xc - anchor_xc) / anchor_w
encoded_dh = tf.math.log(box_h / anchor_h)
encoded_dw = tf.math.log(box_w / anchor_w)
if weights:
encoded_dy *= weights[0]
encoded_dx *= weights[1]
encoded_dh *= weights[2]
encoded_dw *= weights[3]
encoded_boxes = tf.concat([encoded_dy, encoded_dx, encoded_dh, encoded_dw], axis=-1)
return encoded_boxes
def decode_boxes(encoded_boxes, anchors, weights=None):
"""Decode boxes.
Args:
encoded_boxes: a tensor whose last dimension is 4 representing the
coordinates of encoded boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as `boxes` representing the
coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
decoded box targets.
"""
with tf.name_scope('decode_box'):
encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)
dy, dx, dh, dw = tf.split(encoded_boxes, 4, axis=-1)
# dy = encoded_boxes[..., 0:1]
# dx = encoded_boxes[..., 1:2]
# dh = encoded_boxes[..., 2:3]
# dw = encoded_boxes[..., 3:4]
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
dh = tf.minimum(dh, BBOX_XFORM_CLIP)
dw = tf.minimum(dw, BBOX_XFORM_CLIP)
anchor_ymin, anchor_xmin, anchor_ymax, anchor_xmax = tf.split(anchors, 4, axis=-1)
# anchor_ymin = anchors[..., 0:1]
# anchor_xmin = anchors[..., 1:2]
# anchor_ymax = anchors[..., 2:3]
# anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin + 1.0
anchor_w = anchor_xmax - anchor_xmin + 1.0
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
decoded_boxes_yc = dy * anchor_h + anchor_yc
decoded_boxes_xc = dx * anchor_w + anchor_xc
decoded_boxes_h = tf.exp(dh) * anchor_h
decoded_boxes_w = tf.exp(dw) * anchor_w
decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h
decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w
decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h - 1.0
decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w - 1.0
decoded_boxes = tf.concat(
[decoded_boxes_ymin, decoded_boxes_xmin, decoded_boxes_ymax, decoded_boxes_xmax],
axis=-1
)
return decoded_boxes
def clip_boxes(boxes, height, width):
"""Clip boxes.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
height: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the height
of the image.
width: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the width
of the image.
Returns:
clipped_boxes: a tensor whose shape is the same as `boxes` representing the
clipped boxes.
"""
with tf.name_scope('clip_box'):
y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1)
# y_min = boxes[..., 0:1]
# x_min = boxes[..., 1:2]
# y_max = boxes[..., 2:3]
# x_max = boxes[..., 3:4]
height = tf.cast(height, dtype=boxes.dtype)
width = tf.cast(width, dtype=boxes.dtype)
clipped_y_min = tf.maximum(tf.minimum(y_min, height - 1.0), 0.0)
clipped_y_max = tf.maximum(tf.minimum(y_max, height - 1.0), 0.0)
clipped_x_min = tf.maximum(tf.minimum(x_min, width - 1.0), 0.0)
clipped_x_max = tf.maximum(tf.minimum(x_max, width - 1.0), 0.0)
clipped_boxes = tf.concat([clipped_y_min, clipped_x_min, clipped_y_max, clipped_x_max], axis=-1)
return clipped_boxes
def filter_boxes(boxes, scores, min_size, height, width, scale):
"""Filter out boxes that are too small.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
scores: a tensor such as all but the last dimensions are the same as
`boxes`. The last dimension is 1. It represents the scores.
min_size: an integer specifying the minimal size.
height: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the height
of the image.
width: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the width
of the image.
scale: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the scale
of the image.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` representing the
filtered boxes.
filtered_scores: a tensor whose shape is the same as `scores` representing
the filtered scores.
"""
with tf.name_scope('filter_box'):
y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1)
# y_min = boxes[..., 0:1]
# x_min = boxes[..., 1:2]
# y_max = boxes[..., 2:3]
# x_max = boxes[..., 3:4]
h = y_max - y_min + 1.0
w = x_max - x_min + 1.0
yc = y_min + h / 2.0
xc = x_min + w / 2.0
height = tf.cast(height, dtype=boxes.dtype)
width = tf.cast(width, dtype=boxes.dtype)
scale = tf.cast(scale, dtype=boxes.dtype)
min_size = tf.cast(tf.maximum(min_size, 1), dtype=boxes.dtype)
size_mask = tf.logical_and(
tf.greater_equal(h, min_size * scale),
tf.greater_equal(w, min_size * scale)
)
center_mask = tf.logical_and(tf.less(yc, height), tf.less(xc, width))
selected_mask = tf.logical_and(size_mask, center_mask)
filtered_scores = tf.where(selected_mask, scores, tf.zeros_like(scores))
filtered_boxes = tf.cast(selected_mask, dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def to_normalized_coordinates(boxes, height, width):
"""Converted absolute box coordinates to normalized ones.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
height: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the height
of the image.
width: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the width
of the image.
Returns:
normalized_boxes: a tensor whose shape is the same as `boxes` representing
the boxes in normalized coordinates.
"""
with tf.name_scope('normalize_box'):
height = tf.cast(height, dtype=boxes.dtype)
width = tf.cast(width, dtype=boxes.dtype)
y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1)
y_min = y_min / height
x_min = x_min / width
y_max = y_max / height
x_max = x_max / width
# y_min = boxes[..., 0:1] / height
# x_min = boxes[..., 1:2] / width
# y_max = boxes[..., 2:3] / height
# x_max = boxes[..., 3:4] / width
normalized_boxes = tf.concat([y_min, x_min, y_max, x_max], axis=-1)
return normalized_boxes
def to_absolute_coordinates(boxes, height, width):
"""Converted normalized box coordinates to absolute ones.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
height: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the height
of the image.
width: an integer, a scalar or a tensor such as all but the last dimensions
are the same as `boxes`. The last dimension is 1. It represents the width
of the image.
Returns:
absolute_boxes: a tensor whose shape is the same as `boxes` representing the
boxes in absolute coordinates.
"""
with tf.name_scope('denormalize_box'):
height = tf.cast(height, dtype=boxes.dtype)
width = tf.cast(width, dtype=boxes.dtype)
y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1)
y_min = y_min * height
x_min = x_min * width
y_max = y_max * height
x_max = x_max * width
# y_min = boxes[..., 0:1] * height
# x_min = boxes[..., 1:2] * width
# y_max = boxes[..., 2:3] * height
# x_max = boxes[..., 3:4] * width
absolute_boxes = tf.concat([y_min, x_min, y_max, x_max], axis=-1)
return absolute_boxes
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils/box_utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from dllogger import Backend
class LoggingBackend(Backend):
""" Simple DLLogger backend that uses python `logging` library. """
def __init__(self, verbosity, logger_name='dllogger', level=logging.INFO):
""" Creates backend for dllogger that uses `logging` library.
Args:
verbosity: DLLogger verbosity.
logger_name: Name for `logging.Logger`.
level: Logging level that will passed to `logging.Logger.log`.
"""
super().__init__(verbosity)
self._logger = logging.getLogger(logger_name)
self._level = level
def log(self, timestamp, elapsedtime, step, data):
self._logger.log(
level=self._level,
msg='{step} {data}'.format(
step=step,
data=', '.join(f'{k}: {v}' for k, v in data.items())
)
)
def metadata(self, timestamp, elapsedtime, metric, metadata):
""" For simplicity this logger ignores metadata. """
def flush(self):
pass
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils/dllogger.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COCO-style evaluation metrics.
Implements the interface of COCO API and metric_fn in tf.TPUEstimator.
COCO API: github.com/cocodataset/cocoapi/
"""
from __future__ import absolute_import, division, print_function
import atexit
import copy
import logging
import tempfile
import cv2
import numpy as np
import pycocotools.mask as maskUtils
import tensorflow as tf
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
class MaskCOCO(COCO):
"""COCO object for mask evaluation.
"""
def reset(self, dataset):
"""Reset the dataset and groundtruth data index in this object.
Args:
dataset: dict of groundtruth data. It should has similar structure as the
COCO groundtruth JSON file. Must contains three keys: {'images',
'annotations', 'categories'}.
'images': list of image information dictionary. Required keys: 'id',
'width' and 'height'.
'annotations': list of dict. Bounding boxes and segmentations related
information. Required keys: {'id', 'image_id', 'category_id', 'bbox',
'iscrowd', 'area', 'segmentation'}.
'categories': list of dict of the category information.
Required key: 'id'.
Refer to http://cocodataset.org/#format-data for more details.
Raises:
AttributeError: If the dataset is empty or not a dict.
"""
assert dataset, 'Groundtruth should not be empty.'
assert isinstance(dataset,
dict), 'annotation file format {} not supported'.format(
type(dataset))
self.anns, self.cats, self.imgs = dict(), dict(), dict()
self.dataset = copy.deepcopy(dataset)
self.createIndex()
def loadRes(self, detection_results, include_mask, is_image_mask=False):
"""Load result file and return a result api object.
Args:
detection_results: a dictionary containing predictions results.
include_mask: a boolean, whether to include mask in detection results.
is_image_mask: a boolean, where the predict mask is a whole image mask.
Returns:
res: result MaskCOCO api object
"""
res = MaskCOCO()
res.dataset['images'] = [img for img in self.dataset['images']]
logging.info('Loading and preparing results...')
predictions = self.load_predictions(
detection_results,
include_mask=include_mask,
is_image_mask=is_image_mask)
assert isinstance(predictions, list), 'results in not an array of objects'
if predictions:
image_ids = [pred['image_id'] for pred in predictions]
assert set(image_ids) == (set(image_ids) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if (predictions and 'bbox' in predictions[0] and predictions[0]['bbox']):
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for idx, pred in enumerate(predictions):
bb = pred['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if 'segmentation' not in pred:
pred['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
pred['area'] = bb[2] * bb[3]
pred['id'] = idx + 1
pred['iscrowd'] = 0
elif 'segmentation' in predictions[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for idx, pred in enumerate(predictions):
# now only support compressed RLE format as segmentation results
pred['area'] = maskUtils.area(pred['segmentation'])
if 'bbox' not in pred:
pred['bbox'] = maskUtils.toBbox(pred['segmentation'])
pred['id'] = idx + 1
pred['iscrowd'] = 0
res.dataset['annotations'] = predictions
res.createIndex()
return res
def load_predictions(self,
detection_results,
include_mask,
is_image_mask=False):
"""Create prediction dictionary list from detection and mask results.
Args:
detection_results: a dictionary containing numpy arrays which corresponds
to prediction results.
include_mask: a boolean, whether to include mask in detection results.
is_image_mask: a boolean, where the predict mask is a whole image mask.
Returns:
a list of dictionary including different prediction results from the model
in numpy form.
"""
predictions = []
num_detections = detection_results['detection_scores'].size
current_index = 0
for i, image_id in enumerate(detection_results['source_ids']):
if include_mask:
box_coorindates_in_image = detection_results['detection_boxes'][i]
segments = generate_segmentation_from_masks(
detection_results['detection_masks'][i],
box_coorindates_in_image,
int(detection_results['image_info'][i][3]),
int(detection_results['image_info'][i][4]),
is_image_mask=is_image_mask
)
# Convert the mask to uint8 and then to fortranarray for RLE encoder.
encoded_masks = [
maskUtils.encode(np.asfortranarray(instance_mask.astype(np.uint8)))
for instance_mask in segments
]
for box_index in range(int(detection_results['num_detections'][i])):
if current_index % 1000 == 0:
logging.info('{}/{}'.format(current_index, num_detections))
current_index += 1
prediction = {
'image_id': int(image_id),
'bbox': detection_results['detection_boxes'][i][box_index].tolist(),
'score': detection_results['detection_scores'][i][box_index],
'category_id': int(
detection_results['detection_classes'][i][box_index]),
}
if include_mask:
prediction['segmentation'] = encoded_masks[box_index]
predictions.append(prediction)
return predictions
def generate_segmentation_from_masks(masks,
detected_boxes,
image_height,
image_width,
is_image_mask=False):
"""Generates segmentation result from instance masks.
Args:
masks: a numpy array of shape [N, mask_height, mask_width] representing the
instance masks w.r.t. the `detected_boxes`.
detected_boxes: a numpy array of shape [N, 4] representing the reference
bounding boxes.
image_height: an integer representing the height of the image.
image_width: an integer representing the width of the image.
is_image_mask: bool. True: input masks are whole-image masks. False: input
masks are bounding-box level masks.
Returns:
segms: a numpy array of shape [N, image_height, image_width] representing
the instance masks *pasted* on the image canvas.
"""
def expand_boxes(boxes, scale):
"""Expands an array of boxes by a given scale."""
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227
# The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,
# whereas `boxes` here is in [x1, y1, w, h] form
w_half = boxes[:, 2] * .5
h_half = boxes[:, 3] * .5
x_c = boxes[:, 0] + w_half
y_c = boxes[:, 1] + h_half
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
_, mask_height, mask_width = masks.shape
scale = max((mask_width + 2.0) / mask_width,
(mask_height + 2.0) / mask_height)
ref_boxes = expand_boxes(detected_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)
segms = []
for mask_ind, mask in enumerate(masks):
im_mask = np.zeros((image_height, image_width), dtype=np.uint8)
if is_image_mask:
# Process whole-image masks.
im_mask[:, :] = mask[:, :]
else:
# Process mask inside bounding boxes.
padded_mask[1:-1, 1:-1] = mask[:, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > 0.5, dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, image_width)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, image_height)
im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]), (
x_0 - ref_box[
0]):(x_1 - ref_box[
0])]
segms.append(im_mask)
segms = np.array(segms)
assert masks.shape[0] == segms.shape[0]
return segms
class EvaluationMetric:
"""COCO evaluation metric class."""
def __init__(self, filename, include_mask):
"""Constructs COCO evaluation class.
The class provides the interface to metrics_fn in TPUEstimator. The
_evaluate() loads a JSON file in COCO annotation format as the
groundtruths and runs COCO evaluation.
Args:
filename: Ground truth JSON file name. If filename is None, use
groundtruth data passed from the dataloader for evaluation.
include_mask: boolean to indicate whether or not to include mask eval.
"""
if filename:
if filename.startswith('gs://'):
_, local_val_json = tempfile.mkstemp(suffix='.json')
tf.io.gfile.remove(local_val_json)
tf.io.gfile.copy(filename, local_val_json)
atexit.register(tf.io.gfile.remove, local_val_json)
else:
local_val_json = filename
self.coco_gt = MaskCOCO(local_val_json)
self.filename = filename
self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',
'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']
self._include_mask = include_mask
if self._include_mask:
mask_metric_names = ['mask_' + x for x in self.metric_names]
self.metric_names.extend(mask_metric_names)
self._reset()
def _reset(self):
"""Reset COCO API object."""
if self.filename is None and not hasattr(self, 'coco_gt'):
self.coco_gt = MaskCOCO()
def predict_metric_fn(self,
predictions,
is_predict_image_mask=False,
groundtruth_data=None):
"""Generates COCO metrics."""
image_ids = list(set(predictions['source_ids']))
if groundtruth_data is not None:
self.coco_gt.reset(groundtruth_data)
coco_dt = self.coco_gt.loadRes(
predictions, self._include_mask, is_image_mask=is_predict_image_mask)
coco_eval = COCOeval(self.coco_gt, coco_dt, iouType='bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
if self._include_mask:
# Create another object for instance segmentation metric evaluation.
mcoco_eval = COCOeval(self.coco_gt, coco_dt, iouType='segm')
mcoco_eval.params.imgIds = image_ids
mcoco_eval.evaluate()
mcoco_eval.accumulate()
mcoco_eval.summarize()
mask_coco_metrics = mcoco_eval.stats
if self._include_mask:
metrics = np.hstack((coco_metrics, mask_coco_metrics))
else:
metrics = coco_metrics
# clean up after evaluation is done.
self._reset()
metrics = metrics.astype(np.float32)
metrics_dict = {}
for i, name in enumerate(self.metric_names):
metrics_dict[name] = metrics[i]
return metrics_dict
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils/coco_metric.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions to manipulate masks."""
from __future__ import absolute_import, division, print_function
import numpy as np
import pycocotools.mask as coco_mask
POLYGON_PAD_VALUE = -3
POLYGON_SEPARATOR = -1
MASK_SEPARATOR = -2
def _np_array_split(a, v):
"""Split numpy array by separator value.
Args:
a: 1-D numpy.array.
v: number. Separator value. e.g -1.
Returns:
2-D list of clean separated arrays.
Example:
a = [1, 2, 3, 4, -1, 5, 6, 7, 8]
b = _np_array_split(a, -1)
# Output: b = [[1, 2, 3, 4], [5, 6, 7, 8]]
"""
a = np.array(a)
arrs = np.split(a, np.where(a[:] == v)[0])
return [e if (len(e) <= 0 or e[0] != v) else e[1:] for e in arrs]
def _unflat_polygons(x):
"""Unflats/recovers 1-d padded polygons to 3-d polygon list.
Args:
x: numpay.array. shape [num_elements, 1], num_elements = num_obj *
num_vertex + padding.
Returns:
A list of three dimensions: [#obj, #polygon, #vertex]
"""
num_segs = _np_array_split(x, MASK_SEPARATOR)
polygons = []
for s in num_segs:
polygons.append(_np_array_split(s, POLYGON_SEPARATOR))
polygons = [[polygon.tolist() for polygon in obj] for obj in polygons]
return polygons
def _denormalize_to_coco_bbox(bbox, height, width):
"""Denormalize bounding box.
Args:
bbox: numpy.array[float]. Normalized bounding box. Format: ['ymin', 'xmin',
'ymax', 'xmax'].
height: int. image height.
width: int. image width.
Returns:
[x, y, width, height]
"""
y1, x1, y2, x2 = bbox
y1 *= height
x1 *= width
y2 *= height
x2 *= width
box_height = y2 - y1
box_width = x2 - x1
return [float(x1), float(y1), float(box_width), float(box_height)]
def _extract_image_info(prediction, b):
return {
'id': int(prediction['source_ids'][b]),
'width': int(prediction['width'][b]),
'height': int(prediction['height'][b]),
}
def _extract_bbox_annotation(prediction, b, obj_i):
"""Constructs COCO format bounding box annotation."""
height = prediction['height'][b]
width = prediction['width'][b]
bbox = _denormalize_to_coco_bbox(
prediction['groundtruth_boxes'][b][obj_i, :], height, width)
if 'groundtruth_area' in prediction:
area = float(prediction['groundtruth_area'][b][obj_i])
else:
# Using the box area to replace the polygon area. This value will not affect
# real evaluation but may fail the unit test.
area = bbox[2] * bbox[3]
annotation = {
'id': b * 1000 + obj_i, # place holder of annotation id.
'image_id': int(prediction['source_ids'][b]), # source_id,
'category_id': int(prediction['groundtruth_classes'][b][obj_i]),
'bbox': bbox,
'iscrowd': int(prediction['groundtruth_is_crowd'][b][obj_i]),
'area': area,
'segmentation': [],
}
return annotation
def _extract_polygon_info(prediction, polygons, b, obj_i):
"""Constructs 'area' and 'segmentation' fields.
Args:
prediction: dict[str, numpy.array]. Model outputs. The value dimension is
[batch_size, #objects, #features, ...]
polygons: list[list[list]]. Dimensions are [#objects, #polygon, #vertex].
b: batch index.
obj_i: object index.
Returns:
dict[str, numpy.array]. COCO format annotation with 'area' and
'segmentation'.
"""
annotation = {}
if 'groundtruth_area' in prediction:
groundtruth_area = float(prediction['groundtruth_area'][b][obj_i])
else:
height = prediction['height'][b]
width = prediction['width'][b]
rles = coco_mask.frPyObjects(polygons[obj_i], height, width)
groundtruth_area = coco_mask.area(rles)
annotation['area'] = groundtruth_area
annotation['segmentation'] = polygons[obj_i]
# Add dummy polygon to is_crowd instance.
if not annotation['segmentation'][0]:
# Adds a dummy polygon in case there is no segmentation.
# Note that this could affect eval number in a very tiny amount since
# for the instance without masks, it creates a fake single pixel mask
# in the center of the box.
height = prediction['height'][b]
width = prediction['width'][b]
bbox = _denormalize_to_coco_bbox(
prediction['groundtruth_boxes'][b][obj_i, :], height, width)
xcenter = bbox[0] + bbox[2] / 2.0
ycenter = bbox[1] + bbox[3] / 2.0
annotation['segmentation'] = [[
xcenter, ycenter, xcenter, ycenter, xcenter, ycenter, xcenter, ycenter
]]
return annotation
def _extract_categories(annotations):
"""Extract categories from annotations."""
categories = {}
for anno in annotations:
category_id = int(anno['category_id'])
categories[category_id] = {'id': category_id}
return list(categories.values())
def extract_coco_groundtruth(prediction, include_mask=False):
"""Extract COCO format groundtruth.
Args:
prediction: dictionary of batch of prediction result. the first dimension
each element is the batch.
include_mask: True for including masks in the output annotations.
Returns:
Tuple of (images, annotations).
images: list[dict].Required keys: 'id', 'width' and 'height'. The values are
image id, width and height.
annotations: list[dict]. Required keys: {'id', 'source_ids', 'category_id',
'bbox', 'iscrowd'} when include_mask=False. If include_mask=True, also
required {'area', 'segmentation'}. The 'id' value is the annotation id
and can be any **positive** number (>=1).
Refer to http://cocodataset.org/#format-data for more details.
Raises:
ValueError: If any groundtruth fields is missing.
"""
required_fields = [
'source_ids', 'width', 'height', 'num_groundtruth_labels',
'groundtruth_boxes', 'groundtruth_classes'
]
if include_mask:
required_fields += ['groundtruth_polygons', 'groundtruth_area']
for key in required_fields:
if key not in prediction.keys():
raise ValueError('Missing groundtruth field: "{}" keys: {}'.format(
key, prediction.keys()))
images = []
annotations = []
for b in range(prediction['source_ids'].shape[0]):
# Constructs image info.
image = _extract_image_info(prediction, b)
images.append(image)
if include_mask:
flatten_padded_polygons = prediction['groundtruth_polygons'][b]
flatten_polygons = np.delete(
flatten_padded_polygons,
np.where(flatten_padded_polygons[:] == POLYGON_PAD_VALUE)[0])
polygons = _unflat_polygons(flatten_polygons)
# Constructs annotations.
num_labels = prediction['num_groundtruth_labels'][b]
for obj_i in range(num_labels):
annotation = _extract_bbox_annotation(prediction, b, obj_i)
if include_mask:
polygon_info = _extract_polygon_info(prediction, polygons, b, obj_i)
annotation.update(polygon_info)
annotations.append(annotation)
return images, annotations
def create_coco_format_dataset(images,
annotations,
regenerate_annotation_id=True):
"""Creates COCO format dataset with COCO format images and annotations."""
if regenerate_annotation_id:
for i in range(len(annotations)):
# WARNING: The annotation id must be positive.
annotations[i]['id'] = i + 1
categories = _extract_categories(annotations)
dataset = {
'images': images,
'annotations': annotations,
'categories': categories,
}
return dataset
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils/coco_utils.py |
DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/__init__.py |
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mask-RCNN anchor definition."""
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from mrcnn_tf2.object_detection import argmax_matcher, faster_rcnn_box_coder, region_similarity_calculator, \
box_list, balanced_positive_negative_sampler, target_assigner
def _generate_anchor_configs(min_level, max_level, num_scales, aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect raito anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append(
(2 ** level, scale_octave / float(num_scales), aspect))
return anchor_configs
def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):
"""Generates multiscale anchor boxes.
Args:
image_size: integer number of input image size. The input image has the
same dimension for width and height. The image_size should be divided by
the largest feature stride 2^max_level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
boxes_all = []
for _, configs in anchor_configs.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect = config
if image_size[0] % stride != 0 or image_size[1] % stride != 0:
raise ValueError('input size must be divided by the stride.')
base_anchor_size = anchor_scale * stride * 2 ** octave_scale
anchor_size_x_2 = base_anchor_size * aspect[0] / 2.0
anchor_size_y_2 = base_anchor_size * aspect[1] / 2.0
x = np.arange(stride / 2, image_size[1], stride)
y = np.arange(stride / 2, image_size[0], stride)
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
return anchor_boxes
class Anchors:
"""Mask-RCNN Anchors class."""
def __init__(self, min_level, max_level, num_scales, aspect_ratios, anchor_scale, image_size):
"""Constructs multiscale Mask-RCNN anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect raito anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: integer number of input image size. The input image has the
same dimension for width and height. The image_size should be divided by
the largest feature stride 2^max_level.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.image_size = image_size
self.config = self._generate_configs()
self.boxes = self._generate_boxes()
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
return _generate_anchor_configs(self.min_level, self.max_level,
self.num_scales, self.aspect_ratios)
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale,
self.config)
boxes = tf.convert_to_tensor(value=boxes, dtype=tf.float32)
return boxes
def get_anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
def get_unpacked_boxes(self):
return self.unpack_labels(self.boxes)
def unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
labels_unpacked = OrderedDict()
count = 0
for level in range(self.min_level, self.max_level + 1):
feat_size0 = int(self.image_size[0] / 2 ** level)
feat_size1 = int(self.image_size[1] / 2 ** level)
steps = feat_size0 * feat_size1 * self.get_anchors_per_location()
indices = tf.range(count, count + steps)
count += steps
labels_unpacked[level] = tf.reshape(
tf.gather(labels, indices), [feat_size0, feat_size1, -1])
return labels_unpacked
class AnchorLabeler:
"""Labeler for multiscale anchor boxes."""
def __init__(self, anchors, num_classes, match_threshold=0.7,
unmatched_threshold=0.3, rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: a float number between 0 and 1 representing the
lower-bound threshold to assign positive labels for anchors. An anchor
with a score over the threshold is labeled positive.
unmatched_threshold: a float number between 0 and 1 representing the
upper-bound threshold to assign negative labels for anchors. An anchor
with a score below the threshold is labeled negative.
rpn_batch_size_per_im: a integer number that represents the number of
sampled anchors per image in the first stage (region proposal network).
rpn_fg_fraction: a float number between 0 and 1 representing the fraction
of positive anchors (foreground) in the first stage.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=unmatched_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
self._anchors = anchors
self._match_threshold = match_threshold
self._unmatched_threshold = unmatched_threshold
self._rpn_batch_size_per_im = rpn_batch_size_per_im
self._rpn_fg_fraction = rpn_fg_fraction
self._num_classes = num_classes
def _get_rpn_samples(self, match_results):
"""Computes anchor labels.
This function performs subsampling for foreground (fg) and background (bg)
anchors.
Args:
match_results: A integer tensor with shape [N] representing the
matching results of anchors. (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Returns:
score_targets: a integer tensor with the a shape of [N].
(1) score_targets[i]=1, the anchor is a positive sample.
(2) score_targets[i]=0, negative. (3) score_targets[i]=-1, the anchor is
don't care (ignore).
"""
sampler = (
balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
positive_fraction=self._rpn_fg_fraction, is_static=False))
# indicator includes both positive and negative labels.
# labels includes only positives labels.
# positives = indicator & labels.
# negatives = indicator & !labels.
# ignore = !indicator.
indicator = tf.greater(match_results, -2)
labels = tf.greater(match_results, -1)
samples = sampler.subsample(
indicator, self._rpn_batch_size_per_im, labels)
positive_labels = tf.where(
tf.logical_and(samples, labels),
tf.constant(2, dtype=tf.int32, shape=match_results.shape),
tf.constant(0, dtype=tf.int32, shape=match_results.shape))
negative_labels = tf.where(
tf.logical_and(samples, tf.logical_not(labels)),
tf.constant(1, dtype=tf.int32, shape=match_results.shape),
tf.constant(0, dtype=tf.int32, shape=match_results.shape))
ignore_labels = tf.fill(match_results.shape, -1)
return (ignore_labels + positive_labels + negative_labels,
positive_labels, negative_labels)
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
score_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchors.boxes)
# cls_targets, cls_weights, box_weights are not used
_, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# score_targets contains the subsampled positive and negative anchors.
score_targets, _, _ = self._get_rpn_samples(matches.match_results)
# Unpack labels.
score_targets_dict = self._anchors.unpack_labels(score_targets)
box_targets_dict = self._anchors.unpack_labels(box_targets)
return score_targets_dict, box_targets_dict
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/anchors.py |
import tensorflow as tf
from mrcnn_tf2.model import anchors
from mrcnn_tf2.model.losses import MaskRCNNLoss, FastRCNNLoss, RPNLoss
from mrcnn_tf2.model.models.fpn import FPNNetwork
from mrcnn_tf2.model.models.heads import RPNHead, BoxHead, MaskHead
from mrcnn_tf2.model.models.resnet50 import ResNet50
from mrcnn_tf2.ops import roi_ops, spatial_transform_ops, postprocess_ops, training_ops
class MaskRCNN(tf.keras.Model):
def __init__(self, params, name='mrcnn', trainable=True, *args, **kwargs):
super().__init__(name=name, trainable=trainable, *args, **kwargs)
self._params = params
self.backbone = ResNet50()
self.fpn = FPNNetwork(
min_level=self._params.min_level,
max_level=self._params.max_level,
trainable=trainable
)
self.rpn_head = RPNHead(
name="rpn_head",
num_anchors=len(self._params.aspect_ratios * self._params.num_scales),
trainable=trainable
)
self.box_head = BoxHead(
num_classes=self._params.num_classes,
mlp_head_dim=self._params.fast_rcnn_mlp_head_dim,
trainable=trainable
)
self.mask_head = MaskHead(
num_classes=self._params.num_classes,
mrcnn_resolution=self._params.mrcnn_resolution,
trainable=trainable,
name="mask_head"
)
self.mask_rcnn_loss = MaskRCNNLoss()
self.fast_rcnn_loss = FastRCNNLoss(
num_classes=self._params.num_classes
)
self.rpn_loss = RPNLoss(
batch_size=self._params.train_batch_size,
rpn_batch_size_per_im=self._params.rpn_batch_size_per_im,
min_level=self._params.min_level,
max_level=self._params.max_level
)
def call(self, inputs, training=None, mask=None):
batch_size, image_height, image_width, _ = inputs['images'].get_shape().as_list()
if 'source_ids' not in inputs:
inputs['source_ids'] = -1 * tf.ones([batch_size], dtype=tf.float32)
outputs = dict(inputs)
all_anchors = anchors.Anchors(self._params.min_level, self._params.max_level,
self._params.num_scales, self._params.aspect_ratios,
self._params.anchor_scale,
(image_height, image_width))
backbone_feats = self.backbone(inputs['images'], training=training)
fpn_feats = self.fpn(backbone_feats, training=training)
outputs.update({'fpn_features': fpn_feats})
def rpn_head_fn(features, min_level=2, max_level=6):
"""Region Proposal Network (RPN) for Mask-RCNN."""
scores_outputs = dict()
box_outputs = dict()
for level in range(min_level, max_level + 1):
scores_outputs[level], box_outputs[level] = self.rpn_head(features[level], training=training)
return scores_outputs, box_outputs
rpn_score_outputs, rpn_box_outputs = rpn_head_fn(
features=fpn_feats,
min_level=self._params.min_level,
max_level=self._params.max_level
)
if training:
rpn_pre_nms_topn = self._params.train_rpn_pre_nms_topn
rpn_post_nms_topn = self._params.train_rpn_post_nms_topn
rpn_nms_threshold = self._params.train_rpn_nms_threshold
else:
rpn_pre_nms_topn = self._params.test_rpn_pre_nms_topn
rpn_post_nms_topn = self._params.test_rpn_post_nms_topn
rpn_nms_threshold = self._params.test_rpn_nms_thresh
rpn_box_scores, rpn_box_rois = roi_ops.multilevel_propose_rois(
scores_outputs=rpn_score_outputs,
box_outputs=rpn_box_outputs,
all_anchors=all_anchors,
image_info=inputs['image_info'],
rpn_pre_nms_topn=rpn_pre_nms_topn,
rpn_post_nms_topn=rpn_post_nms_topn,
rpn_nms_threshold=rpn_nms_threshold,
rpn_min_size=self._params.rpn_min_size,
bbox_reg_weights=None
)
rpn_box_rois = tf.cast(rpn_box_rois, dtype=tf.float32)
if training:
rpn_box_rois = tf.stop_gradient(rpn_box_rois)
rpn_box_scores = tf.stop_gradient(rpn_box_scores) # TODO Jonathan: Unused => Shall keep ?
# Sampling
box_targets, class_targets, rpn_box_rois, proposal_to_label_map = training_ops.proposal_label_op(
rpn_box_rois,
inputs['gt_boxes'],
inputs['gt_classes'],
batch_size_per_im=self._params.batch_size_per_im,
fg_fraction=self._params.fg_fraction,
fg_thresh=self._params.fg_thresh,
bg_thresh_hi=self._params.bg_thresh_hi,
bg_thresh_lo=self._params.bg_thresh_lo
)
# Performs multi-level RoIAlign.
box_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
features=fpn_feats,
boxes=rpn_box_rois,
output_size=7,
training=training
)
class_outputs, box_outputs, _ = self.box_head(inputs=box_roi_features)
if not training:
detections = postprocess_ops.generate_detections_gpu(
class_outputs=class_outputs,
box_outputs=box_outputs,
anchor_boxes=rpn_box_rois,
image_info=inputs['image_info'],
pre_nms_num_detections=self._params.test_rpn_post_nms_topn,
post_nms_num_detections=self._params.test_detections_per_image,
nms_threshold=self._params.test_nms,
bbox_reg_weights=self._params.bbox_reg_weights
)
outputs.update({
'num_detections': detections[0],
'detection_boxes': detections[1],
'detection_classes': detections[2],
'detection_scores': detections[3],
})
else: # is training
encoded_box_targets = training_ops.encode_box_targets(
boxes=rpn_box_rois,
gt_boxes=box_targets,
gt_labels=class_targets,
bbox_reg_weights=self._params.bbox_reg_weights
)
outputs.update({
'rpn_score_outputs': rpn_score_outputs,
'rpn_box_outputs': rpn_box_outputs,
'class_outputs': class_outputs,
'box_outputs': box_outputs,
'class_targets': class_targets,
'box_targets': encoded_box_targets,
'box_rois': rpn_box_rois,
})
# Faster-RCNN mode.
if not self._params.include_mask:
return outputs
# Mask sampling
if not training:
selected_box_rois = outputs['detection_boxes']
class_indices = outputs['detection_classes']
else:
selected_class_targets, selected_box_targets, \
selected_box_rois, proposal_to_label_map = training_ops.select_fg_for_masks(
class_targets=class_targets,
box_targets=box_targets,
boxes=rpn_box_rois,
proposal_to_label_map=proposal_to_label_map,
max_num_fg=int(self._params.batch_size_per_im * self._params.fg_fraction)
)
class_indices = tf.cast(selected_class_targets, dtype=tf.int32)
mask_roi_features = spatial_transform_ops.multilevel_crop_and_resize(
features=fpn_feats,
boxes=selected_box_rois,
output_size=14,
training=training
)
mask_outputs = self.mask_head(
inputs=(mask_roi_features, class_indices),
training=training
)
if training:
mask_targets = training_ops.get_mask_targets(
fg_boxes=selected_box_rois,
fg_proposal_to_label_map=proposal_to_label_map,
fg_box_targets=selected_box_targets,
mask_gt_labels=inputs['cropped_gt_masks'],
output_size=self._params.mrcnn_resolution
)
outputs.update({
'mask_outputs': mask_outputs,
'mask_targets': mask_targets,
'selected_class_targets': selected_class_targets,
})
else:
outputs.update({
'detection_masks': tf.nn.sigmoid(mask_outputs),
})
if training:
self._add_losses(outputs)
# filter out only the needed outputs
model_outputs = [
'source_ids', 'image_info',
'num_detections', 'detection_boxes',
'detection_classes', 'detection_scores',
'detection_masks'
]
return {
name: tf.identity(tensor, name=name)
for name, tensor in outputs.items()
if name in model_outputs
}
def _add_losses(self, model_outputs):
mask_rcnn_loss = self.mask_rcnn_loss(model_outputs)
mask_rcnn_loss *= self._params.mrcnn_weight_loss_mask
self.add_loss(mask_rcnn_loss)
self.add_metric(mask_rcnn_loss, name='mask_rcnn_loss')
fast_rcnn_class_loss, fast_rcnn_box_loss = self.fast_rcnn_loss(model_outputs)
fast_rcnn_box_loss *= self._params.fast_rcnn_box_loss_weight
self.add_loss(fast_rcnn_box_loss)
self.add_metric(fast_rcnn_box_loss, name='fast_rcnn_box_loss')
self.add_loss(fast_rcnn_class_loss)
self.add_metric(fast_rcnn_class_loss, name='fast_rcnn_class_loss')
rpn_score_loss, rpn_box_loss = self.rpn_loss(model_outputs)
rpn_box_loss *= self._params.rpn_box_loss_weight
self.add_loss(rpn_box_loss)
self.add_metric(rpn_box_loss, name='rpn_box_loss')
self.add_loss(rpn_score_loss)
self.add_metric(rpn_score_loss, name='rpn_score_loss')
l2_regularization_loss = tf.add_n([
tf.nn.l2_loss(tf.cast(v, dtype=tf.float32))
for v in self.trainable_variables
if not any([pattern in v.name for pattern in ["batch_normalization", "bias", "beta"]])
])
l2_regularization_loss *= self._params.l2_weight_decay
self.add_loss(l2_regularization_loss)
self.add_metric(l2_regularization_loss, name='l2_regularization_loss')
def get_config(self):
pass
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/mask_rcnn.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for Mask-RCNN."""
import tensorflow as tf
class MaskRCNNLoss(tf.keras.layers.Layer):
"""
Layer that computes the mask loss of Mask-RCNN.
This layer implements the mask loss of Mask-RCNN. As the `mask_outputs`
produces `num_classes` masks for each RoI, the reference model expands
`mask_targets` to match the shape of `mask_outputs` and selects only the
target that the RoI has a maximum overlap.
(Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py)
Instead, this implementation selects the `mask_outputs` by the `class_targets`
so that it doesn't expand `mask_targets`.
"""
def __init__(self):
super().__init__(trainable=False, dtype=tf.float32)
def call(self, inputs, **kwargs):
"""
Args:
inputs: dictionary with model outputs, which has to include:
mask_outputs: a float tensor representing the prediction for each mask,
with a shape of [batch_size, num_masks, mask_height, mask_width].
mask_targets: a float tensor representing the binary mask of ground truth
labels for each mask with a shape of [batch_size, num_masks, mask_height, mask_width].
select_class_targets: a tensor with a shape of [batch_size, num_masks],
representing the foreground mask targets.
Returns:
mask_loss: a float tensor representing total mask loss.
"""
mask_outputs = inputs['mask_outputs']
mask_targets = inputs['mask_targets']
select_class_targets = inputs['selected_class_targets']
batch_size, num_masks, mask_height, mask_width = mask_outputs.get_shape().as_list()
weights = tf.tile(
tf.reshape(tf.greater(select_class_targets, 0), [batch_size, num_masks, 1, 1]),
[1, 1, mask_height, mask_width]
)
weights = tf.cast(weights, tf.float32)
return _sigmoid_cross_entropy(
multi_class_labels=mask_targets,
logits=mask_outputs,
weights=weights,
sum_by_non_zeros_weights=True
)
class FastRCNNLoss(tf.keras.layers.Layer):
"""
Layer that computes the box and class loss (Fast-RCNN branch) of Mask-RCNN.
This layer implements the classification and box regression loss of the
Fast-RCNN branch in Mask-RCNN. As the `box_outputs` produces `num_classes`
boxes for each RoI, the reference model expands `box_targets` to match the
shape of `box_outputs` and selects only the target that the RoI has a maximum
overlap.
(Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py)
Instead, this function selects the `box_outputs` by the `class_targets` so
that it doesn't expand `box_targets`.
The loss computation has two parts: (1) classification loss is softmax on all
RoIs. (2) box loss is smooth L1-loss on only positive samples of RoIs.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py
"""
def __init__(self, num_classes):
super().__init__(trainable=False, dtype=tf.float32)
self._num_classes = num_classes
def call(self, inputs, **kwargs):
"""
Args:
inputs: dictionary with model outputs, which has to include:
class_outputs: a float tensor representing the class prediction for each box
with a shape of [batch_size, num_boxes, num_classes].
box_outputs: a float tensor representing the box prediction for each box
with a shape of [batch_size, num_boxes, num_classes * 4].
class_targets: a float tensor representing the class label for each box
with a shape of [batch_size, num_boxes].
box_targets: a float tensor representing the box label for each box
with a shape of [batch_size, num_boxes, 4].
Returns:
cls_loss: a float tensor representing total class loss.
box_loss: a float tensor representing total box regression loss.
"""
class_outputs = inputs['class_outputs']
box_outputs = inputs['box_outputs']
class_targets = inputs['class_targets']
box_targets = inputs['box_targets']
class_targets = tf.cast(class_targets, dtype=tf.int32)
# Selects the box from `box_outputs` based on `class_targets`, with which
# the box has the maximum overlap.
batch_size, num_rois, _ = box_outputs.get_shape().as_list()
box_outputs = tf.reshape(box_outputs, [batch_size, num_rois, self._num_classes, 4])
box_indices = tf.reshape(
class_targets +
tf.tile(tf.expand_dims(tf.range(batch_size) * num_rois * self._num_classes, 1), [1, num_rois]) +
tf.tile(tf.expand_dims(tf.range(num_rois) * self._num_classes, 0), [batch_size, 1]),
[-1]
)
box_outputs = tf.matmul(
tf.one_hot(
box_indices,
batch_size * num_rois * self._num_classes,
dtype=box_outputs.dtype
),
tf.reshape(box_outputs, [-1, 4])
)
box_outputs = tf.reshape(box_outputs, [batch_size, -1, 4])
box_loss = _fast_rcnn_box_loss(
box_outputs=box_outputs,
box_targets=box_targets,
class_targets=class_targets,
normalizer=1.0
)
class_targets = tf.one_hot(class_targets, self._num_classes)
class_loss = _fast_rcnn_class_loss(
class_outputs=class_outputs,
class_targets_one_hot=class_targets,
normalizer=1.0
)
return class_loss, box_loss
class RPNLoss(tf.keras.layers.Layer):
"""
Layer that computes total RPN detection loss.
Computes total RPN detection loss including box and score from all levels.
"""
def __init__(self, batch_size, rpn_batch_size_per_im, min_level, max_level):
super().__init__(trainable=False, dtype=tf.float32)
self._batch_size = batch_size
self._rpn_batch_size_per_im = rpn_batch_size_per_im
self._min_level = min_level
self._max_level = max_level
def call(self, inputs, **kwargs):
"""
Args:
inputs: dictionary with model outputs, which has to include:
score_outputs: an OrderDict with keys representing levels and values
representing scores in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width, num_anchors * 4].
score_targets_*: ground truth score targets
box_targets_*: ground truth box targets
Returns:
rpn_score_loss: a float tensor representing total score loss.
rpn_box_loss: a float tensor representing total box regression loss.
"""
score_outputs = inputs['rpn_score_outputs']
box_outputs = inputs['rpn_box_outputs']
score_losses = []
box_losses = []
for level in range(int(self._min_level), int(self._max_level + 1)):
score_targets_at_level = inputs['score_targets_%d' % level]
box_targets_at_level = inputs['box_targets_%d' % level]
score_losses.append(
_rpn_score_loss(
score_outputs=score_outputs[level],
score_targets=score_targets_at_level,
normalizer=tf.cast(self._batch_size * self._rpn_batch_size_per_im, dtype=tf.float32)
)
)
box_losses.append(_rpn_box_loss(
box_outputs=box_outputs[level],
box_targets=box_targets_at_level,
normalizer=1.0
))
# Sum per level losses to total loss.
rpn_score_loss = tf.add_n(score_losses)
rpn_box_loss = tf.add_n(box_losses)
return rpn_score_loss, rpn_box_loss
def _huber_loss(y_true, y_pred, weights, delta):
num_non_zeros = tf.math.count_nonzero(weights, dtype=tf.float32)
huber_keras_loss = tf.keras.losses.Huber(
delta=delta,
reduction=tf.keras.losses.Reduction.SUM,
name='huber_loss'
)
y_true = tf.expand_dims(y_true, axis=-1)
y_pred = tf.expand_dims(y_pred, axis=-1)
huber_loss = huber_keras_loss(
y_true,
y_pred,
sample_weight=weights
)
assert huber_loss.dtype == tf.float32
huber_loss = tf.math.divide_no_nan(huber_loss, num_non_zeros, name="huber_loss")
assert huber_loss.dtype == tf.float32
return huber_loss
def _sigmoid_cross_entropy(multi_class_labels, logits, weights, sum_by_non_zeros_weights=False):
assert weights.dtype == tf.float32
sigmoid_cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
labels=multi_class_labels,
logits=logits,
name="x-entropy"
)
assert sigmoid_cross_entropy.dtype == tf.float32
sigmoid_cross_entropy = tf.math.multiply(sigmoid_cross_entropy, weights)
sigmoid_cross_entropy = tf.math.reduce_sum(input_tensor=sigmoid_cross_entropy)
assert sigmoid_cross_entropy.dtype == tf.float32
if sum_by_non_zeros_weights:
num_non_zeros = tf.math.count_nonzero(weights, dtype=tf.float32)
sigmoid_cross_entropy = tf.math.divide_no_nan(
sigmoid_cross_entropy,
num_non_zeros,
name="sum_by_non_zeros_weights"
)
assert sigmoid_cross_entropy.dtype == tf.float32
return sigmoid_cross_entropy
def _softmax_cross_entropy(onehot_labels, logits):
num_non_zeros = tf.math.count_nonzero(onehot_labels, dtype=tf.float32)
softmax_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.stop_gradient(onehot_labels),
logits=logits
)
assert softmax_cross_entropy.dtype == tf.float32
softmax_cross_entropy = tf.math.reduce_sum(input_tensor=softmax_cross_entropy)
softmax_cross_entropy = tf.math.divide_no_nan(
softmax_cross_entropy,
num_non_zeros,
name="softmax_cross_entropy"
)
assert softmax_cross_entropy.dtype == tf.float32
return softmax_cross_entropy
def _rpn_score_loss(score_outputs, score_targets, normalizer=1.0):
"""Computes score loss."""
with tf.name_scope('rpn_score_loss'):
# score_targets has three values:
# * (1) score_targets[i]=1, the anchor is a positive sample.
# * (2) score_targets[i]=0, negative.
# * (3) score_targets[i]=-1, the anchor is don't care (ignore).
mask = tf.math.greater_equal(score_targets, 0)
mask = tf.cast(mask, dtype=tf.float32)
score_targets = tf.maximum(score_targets, tf.zeros_like(score_targets))
score_targets = tf.cast(score_targets, dtype=tf.float32)
assert score_outputs.dtype == tf.float32
assert score_targets.dtype == tf.float32
score_loss = _sigmoid_cross_entropy(
multi_class_labels=score_targets,
logits=score_outputs,
weights=mask,
sum_by_non_zeros_weights=False
)
assert score_loss.dtype == tf.float32
if isinstance(normalizer, tf.Tensor) or normalizer != 1.0:
score_loss /= normalizer
assert score_loss.dtype == tf.float32
return score_loss
def _rpn_box_loss(box_outputs, box_targets, normalizer=1.0, delta=1. / 9):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
with tf.name_scope('rpn_box_loss'):
mask = tf.not_equal(box_targets, 0.0)
mask = tf.cast(mask, tf.float32)
assert mask.dtype == tf.float32
# The loss is normalized by the sum of non-zero weights before additional
# normalizer provided by the function caller.
box_loss = _huber_loss(y_true=box_targets, y_pred=box_outputs, weights=mask, delta=delta)
assert box_loss.dtype == tf.float32
if isinstance(normalizer, tf.Tensor) or normalizer != 1.0:
box_loss /= normalizer
assert box_loss.dtype == tf.float32
return box_loss
def _fast_rcnn_class_loss(class_outputs, class_targets_one_hot, normalizer=1.0):
"""Computes classification loss."""
with tf.name_scope('fast_rcnn_class_loss'):
# The loss is normalized by the sum of non-zero weights before additional
# normalizer provided by the function caller.
class_loss = _softmax_cross_entropy(onehot_labels=class_targets_one_hot, logits=class_outputs)
if isinstance(normalizer, tf.Tensor) or normalizer != 1.0:
class_loss /= normalizer
return class_loss
def _fast_rcnn_box_loss(box_outputs, box_targets, class_targets, normalizer=1.0, delta=1.):
"""Computes box regression loss."""
# delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
with tf.name_scope('fast_rcnn_box_loss'):
mask = tf.tile(tf.expand_dims(tf.greater(class_targets, 0), axis=2), [1, 1, 4])
# The loss is normalized by the sum of non-zero weights before additional
# normalizer provided by the function caller.
box_loss = _huber_loss(y_true=box_targets, y_pred=box_outputs, weights=mask, delta=delta)
if isinstance(normalizer, tf.Tensor) or normalizer != 1.0:
box_loss /= normalizer
return box_loss
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/losses.py |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature Pyramid Network.
Feature Pyramid Networks were proposed in:
[1] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan,
, and Serge Belongie
Feature Pyramid Networks for Object Detection. CVPR 2017.
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from mrcnn_tf2.ops import spatial_transform_ops
class FPNNetwork(tf.keras.models.Model):
def __init__(self, min_level=3, max_level=7, filters=256, trainable=True):
"""Generates multiple scale feature pyramid (FPN).
Args:
feats_bottom_up: a dictionary of tensor with level as keys and bottom up
feature tensors as values. They are the features to generate FPN features.
min_level: the minimum level number to generate FPN features.
max_level: the maximum level number to generate FPN features.
filters: the FPN filter size.
Returns:
feats: a dictionary of tensor with level as keys and the generated FPN
features as values.
"""
super().__init__(name="fpn", trainable=trainable)
self._local_layers = dict()
self._min_level = min_level
self._max_level = max_level
self._filters = filters
self._backbone_max_level = 5 # max(feats_bottom_up.keys())
self._upsample_max_level = (
self._backbone_max_level if self._max_level > self._backbone_max_level else self._max_level
)
self._local_layers["stage1"] = dict()
for level in range(self._min_level, self._upsample_max_level + 1):
self._local_layers["stage1"][str(level)] = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=(1, 1),
padding='same',
name=f'l{level}',
trainable=trainable
)
self._local_layers["stage2"] = dict()
# add post-hoc 3x3 convolution kernel
for level in range(self._min_level, self._upsample_max_level + 1):
self._local_layers["stage2"][str(level)] = tf.keras.layers.Conv2D(
filters=self._filters,
strides=(1, 1),
kernel_size=(3, 3),
padding='same',
name=f'post_hoc_d{level}',
trainable=trainable
)
self._local_layers["stage3_1"] = dict()
self._local_layers["stage3_2"] = dict()
if self._max_level == self._upsample_max_level + 1:
self._local_layers["stage3_1"] = tf.keras.layers.MaxPool2D(
pool_size=1,
strides=2,
padding='valid',
name='p%d' % self._max_level,
trainable=trainable
)
else:
for level in range(self._upsample_max_level + 1, self._max_level + 1):
self._local_layers["stage3_2"][str(level)] = tf.keras.layers.Conv2D(
filters=self._filters,
strides=(2, 2),
kernel_size=(3, 3),
padding='same',
name=f'p{level}',
trainable=trainable
)
def call(self, inputs, *args, **kwargs):
feats_bottom_up = inputs
# lateral connections
feats_lateral = {}
for level in range(self._min_level, self._upsample_max_level + 1):
feats_lateral[level] = self._local_layers["stage1"][str(level)](feats_bottom_up[level])
# add top-down path
feats = {self._upsample_max_level: feats_lateral[self._upsample_max_level]}
for level in range(self._upsample_max_level - 1, self._min_level - 1, -1):
feats[level] = spatial_transform_ops.nearest_upsampling(
feats[level + 1], 2
) + feats_lateral[level]
# add post-hoc 3x3 convolution kernel
for level in range(self._min_level, self._upsample_max_level + 1):
feats[level] = self._local_layers["stage2"][str(level)](feats[level])
if self._max_level == self._upsample_max_level + 1:
feats[self._max_level] = self._local_layers["stage3_1"](feats[self._max_level - 1])
else:
for level in range(self._upsample_max_level + 1, self._max_level + 1):
feats[level] = self._local_layers["stage3_2"][str(level)](feats[level - 1])
return feats
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/fpn.py |
DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/__init__.py |
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to build various prediction heads in Mask-RCNN."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
class RPNHead(tf.keras.models.Model):
def __init__(self, name, num_anchors, trainable, *args, **kwargs):
super().__init__(name=name, trainable=trainable, *args, **kwargs)
"""Shared RPN heads."""
self._local_layers = dict()
# TODO(chiachenc): check the channel depth of the first convolution.
self._local_layers["conv1"] = tf.keras.layers.Conv2D(
256,
kernel_size=(3, 3),
strides=(1, 1),
activation=tf.nn.relu,
bias_initializer=tf.keras.initializers.Zeros(),
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
padding='same',
trainable=trainable,
name='rpn'
)
# Proposal classification scores
# scores = tf.keras.layers.Conv2D(
self._local_layers["conv2"] = tf.keras.layers.Conv2D(
num_anchors,
kernel_size=(1, 1),
strides=(1, 1),
bias_initializer=tf.keras.initializers.Zeros(),
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
padding='valid',
trainable=trainable,
name='rpn-class'
)
# Proposal bbox regression deltas
# bboxes = tf.keras.layers.Conv2D(
self._local_layers["conv3"] = tf.keras.layers.Conv2D(
4 * num_anchors,
kernel_size=(1, 1),
strides=(1, 1),
bias_initializer=tf.keras.initializers.Zeros(),
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
padding='valid',
trainable=trainable,
name='rpn-box'
)
def call(self, inputs, *args, **kwargs):
net = self._local_layers["conv1"](inputs)
scores = self._local_layers["conv2"](net)
bboxes = self._local_layers["conv3"](net)
return scores, bboxes
class BoxHead(tf.keras.Model):
def __init__(self, num_classes=91, mlp_head_dim=1024, name="box_head", trainable=True, *args, **kwargs):
"""Box and class branches for the Mask-RCNN model.
Args:
roi_features: A ROI feature tensor of shape
[batch_size, num_rois, height_l, width_l, num_filters].
num_classes: a integer for the number of classes.
mlp_head_dim: a integer that is the hidden dimension in the fully-connected
layers.
"""
super().__init__(name=name, trainable=trainable, *args, **kwargs)
self._num_classes = num_classes
self._mlp_head_dim = mlp_head_dim
self._dense_fc6 = tf.keras.layers.Dense(
units=mlp_head_dim,
activation=tf.nn.relu,
trainable=trainable,
name='fc6'
)
self._dense_fc7 = tf.keras.layers.Dense(
units=mlp_head_dim,
activation=tf.nn.relu,
trainable=trainable,
name='fc7'
)
self._dense_class = tf.keras.layers.Dense(
num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
bias_initializer=tf.keras.initializers.Zeros(),
trainable=trainable,
name='class-predict'
)
self._dense_box = tf.keras.layers.Dense(
num_classes * 4,
kernel_initializer=tf.random_normal_initializer(stddev=0.001),
bias_initializer=tf.keras.initializers.Zeros(),
trainable=trainable,
name='box-predict'
)
def call(self, inputs, **kwargs):
"""
Returns:
class_outputs: a tensor with a shape of
[batch_size, num_rois, num_classes], representing the class predictions.
box_outputs: a tensor with a shape of
[batch_size, num_rois, num_classes * 4], representing the box predictions.
box_features: a tensor with a shape of
[batch_size, num_rois, mlp_head_dim], representing the box features.
"""
# reshape inputs before FC.
batch_size, num_rois, height, width, filters = inputs.get_shape().as_list()
net = tf.reshape(inputs, [batch_size, num_rois, height * width * filters])
net = self._dense_fc6(net)
box_features = self._dense_fc7(net)
class_outputs = self._dense_class(box_features)
box_outputs = self._dense_box(box_features)
return class_outputs, box_outputs, box_features
class MaskHead(tf.keras.Model):
@staticmethod
def _get_stddev_equivalent_to_msra_fill(kernel_size, fan_out):
"""Returns the stddev of random normal initialization as MSRAFill."""
# Reference: https://github.com/pytorch/pytorch/blob/master/caffe2/operators/filler_op.h#L445-L463
# For example, kernel size is (3, 3) and fan out is 256, stddev is 0.029.
# stddev = (2/(3*3*256))^0.5 = 0.029
return (2 / (kernel_size[0] * kernel_size[1] * fan_out)) ** 0.5
def __init__(
self,
num_classes=91,
mrcnn_resolution=28,
name="mask_head",
trainable=True,
*args,
**kwargs
):
"""Mask branch for the Mask-RCNN model.
Args:
roi_features: A ROI feature tensor of shape
[batch_size, num_rois, height_l, width_l, num_filters].
num_classes: an integer for the number of classes.
mrcnn_resolution: an integer that is the resolution of masks.
"""
super().__init__(name=name, trainable=trainable, *args, **kwargs)
self._num_classes = num_classes
self._mrcnn_resolution = mrcnn_resolution
self._conv_stage1 = list()
kernel_size = (3, 3)
fan_out = 256
init_stddev = MaskHead._get_stddev_equivalent_to_msra_fill(kernel_size, fan_out)
for conv_id in range(4):
self._conv_stage1.append(tf.keras.layers.Conv2D(
fan_out,
kernel_size=kernel_size,
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=init_stddev),
bias_initializer=tf.keras.initializers.Zeros(),
trainable=trainable,
name='mask-conv-l%d' % conv_id
))
kernel_size = (2, 2)
fan_out = 256
init_stddev = MaskHead._get_stddev_equivalent_to_msra_fill(kernel_size, fan_out)
self._conv_stage2 = tf.keras.layers.Conv2DTranspose(
fan_out,
kernel_size=kernel_size,
strides=(2, 2),
padding='valid',
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(stddev=init_stddev),
bias_initializer=tf.keras.initializers.Zeros(),
trainable=trainable,
name='conv5-mask'
)
kernel_size = (1, 1)
fan_out = self._num_classes
init_stddev = MaskHead._get_stddev_equivalent_to_msra_fill(kernel_size, fan_out)
self._conv_stage3 = tf.keras.layers.Conv2D(
fan_out,
kernel_size=kernel_size,
strides=(1, 1),
padding='valid',
kernel_initializer=tf.random_normal_initializer(stddev=init_stddev),
bias_initializer=tf.keras.initializers.Zeros(),
trainable=trainable,
name='mask_fcn_logits'
)
def call(self, inputs, training=True, **kwargs):
"""
Args:
inputs: tuple of two tensors:
mask_roi_features: a Tensor of shape:
[batch_size, num_boxes, output_size, output_size, num_filters].
class_indices: a Tensor of shape [batch_size, num_rois], indicating
which class the ROI is.
training: whether to build the model for training (or inference).
Returns:
mask_outputs: a tensor with a shape of
[batch_size, num_masks, mask_height, mask_width],
representing the mask predictions.
fg_gather_indices: a tensor with a shape of [batch_size, num_masks, 2],
representing the fg mask targets.
Raises:
ValueError: If boxes is not a rank-3 tensor or the last dimension of
boxes is not 4.
"""
mask_roi_features, class_indices = inputs
indices_dtype = tf.int32
# fixed problems when running with Keras AMP
class_indices = tf.cast(class_indices, dtype=indices_dtype)
batch_size, num_rois, height, width, filters = mask_roi_features.get_shape().as_list()
net = tf.reshape(mask_roi_features, [-1, height, width, filters])
for conv_id in range(4):
net = self._conv_stage1[conv_id](net)
net = self._conv_stage2(net)
mask_outputs = self._conv_stage3(net)
mask_outputs = tf.reshape(
mask_outputs,
[-1, num_rois, self._mrcnn_resolution, self._mrcnn_resolution, self._num_classes]
)
with tf.name_scope('masks_post_processing'):
mask_outputs = tf.transpose(a=mask_outputs, perm=[0, 1, 4, 2, 3])
if batch_size == 1:
indices = tf.reshape(
tf.reshape(
tf.range(num_rois, dtype=indices_dtype),
[batch_size, num_rois, 1]
) * self._num_classes + tf.expand_dims(class_indices, axis=-1),
[batch_size, -1]
)
mask_outputs = tf.gather(
tf.reshape(mask_outputs,
[batch_size, -1, self._mrcnn_resolution, self._mrcnn_resolution]),
indices,
axis=1
)
mask_outputs = tf.squeeze(mask_outputs, axis=1)
mask_outputs = tf.reshape(
mask_outputs,
[batch_size, num_rois, self._mrcnn_resolution, self._mrcnn_resolution])
else:
batch_indices = (
tf.expand_dims(tf.range(batch_size, dtype=indices_dtype), axis=1) *
tf.ones([1, num_rois], dtype=indices_dtype)
)
mask_indices = (
tf.expand_dims(tf.range(num_rois, dtype=indices_dtype), axis=0) *
tf.ones([batch_size, 1], dtype=indices_dtype)
)
gather_indices = tf.stack([batch_indices, mask_indices, class_indices], axis=2)
mask_outputs = tf.gather_nd(mask_outputs, gather_indices)
return mask_outputs
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/heads.py |
import tensorflow as tf
from mrcnn_tf2.model.models.resnet50 import BottleneckBlock
class BottleneckGroup(tf.keras.layers.Layer):
def __init__(self, blocks, filters, strides, trainable=True):
super().__init__(trainable=trainable)
self.blocks = []
for block_id in range(blocks):
self.blocks.append(
BottleneckBlock(
filters=filters,
strides=strides if block_id == 0 else 1,
expansion=4,
shortcut='conv2d' if block_id == 0 else None
)
)
def call(self, inputs, training=None, **kwargs):
net = inputs
for block in self.blocks:
net = block(net, training=training)
return net
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50/bottleneck_group.py |
import tensorflow as tf
from mrcnn_tf2.model.models.resnet50 import Conv2DBlock
class BottleneckBlock(tf.keras.layers.Layer):
def __init__(self, filters, strides, expansion=1, shortcut='conv2d', trainable=True, *args, **kwargs):
super().__init__(trainable=trainable, *args, **kwargs)
if shortcut == 'conv2d':
self.shortcut = Conv2DBlock(
filters=filters * expansion,
kernel_size=1,
strides=strides,
use_batch_norm=True,
use_relu=False, # Applied at the end after addition with bottleneck
name='shortcut'
)
elif shortcut == 'avg_pool':
self.shortcut = tf.keras.layers.AveragePooling2D(
pool_size=1,
strides=strides,
name='shortcut'
)
else:
self.shortcut = tf.keras.layers.Layer(name='shortcut') # identity
self.conv2d_1 = Conv2DBlock(
filters=filters,
kernel_size=1,
strides=1,
use_batch_norm=True,
use_relu=True
)
self.conv2d_2 = Conv2DBlock(
filters=filters,
kernel_size=3,
strides=strides,
use_batch_norm=True,
use_relu=True
)
self.conv2d_3 = Conv2DBlock(
filters=filters * expansion,
kernel_size=1,
strides=1,
use_batch_norm=True,
use_relu=False # Applied at the end after addition with shortcut
)
self.add = tf.keras.layers.Add()
self.relu = tf.keras.layers.ReLU()
def call(self, inputs, training=None, **kwargs):
shortcut = self.shortcut(inputs)
bottleneck = self.conv2d_1(inputs, training=training)
bottleneck = self.conv2d_2(bottleneck, training=training)
bottleneck = self.conv2d_3(bottleneck, training=training)
net = self.add([bottleneck, shortcut])
net = self.relu(net)
return net
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50/bottleneck_block.py |
from .conv2d_block import Conv2DBlock
from .bottleneck_block import BottleneckBlock
from .bottleneck_group import BottleneckGroup
from .resnet import ResNet50
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50/__init__.py |
import tensorflow as tf
from mrcnn_tf2.model.models.resnet50 import BottleneckGroup, Conv2DBlock
class ResNet50(tf.keras.Model):
def __init__(self, name='resnet50', *args, **kwargs):
super().__init__(name=name, *args, **kwargs)
self.conv2d = Conv2DBlock(
filters=64,
kernel_size=7,
strides=2,
use_batch_norm=True,
use_relu=True,
trainable=False
)
self.maxpool2d = tf.keras.layers.MaxPool2D(
pool_size=3,
strides=2,
padding='SAME'
)
self.group_1 = BottleneckGroup(
blocks=3,
filters=64,
strides=1,
trainable=False
)
self.group_2 = BottleneckGroup(
blocks=4,
filters=128,
strides=2
)
self.group_3 = BottleneckGroup(
blocks=6,
filters=256,
strides=2
)
self.group_4 = BottleneckGroup(
blocks=3,
filters=512,
strides=2
)
def call(self, inputs, training=None, mask=None):
net = self.conv2d(inputs, training=training)
net = self.maxpool2d(net)
c2 = self.group_1(net, training=training)
c3 = self.group_2(c2, training=training)
c4 = self.group_3(c3, training=training)
c5 = self.group_4(c4, training=training)
return {2: c2, 3: c3, 4: c4, 5: c5}
def get_config(self):
pass
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50/resnet.py |
import tensorflow as tf
class Conv2DBlock(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size, strides, padding='SAME',
use_batch_norm=True, use_relu=True, trainable=True,
trainable_batch_norm=False, *args, **kwargs):
super().__init__(trainable=trainable, *args, **kwargs)
self.conv2d = None
self.batch_norm = None
self.relu = None
self.conv2d = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=not use_batch_norm,
trainable=trainable
)
if use_batch_norm:
self.batch_norm = tf.keras.layers.BatchNormalization(
momentum=0.9,
scale=True,
epsilon=1e-05,
trainable=trainable and trainable_batch_norm,
fused=True,
center=True
)
if use_relu:
self.relu = tf.keras.layers.ReLU()
def call(self, inputs, training=None, **kwargs):
net = inputs
net = self.conv2d(net)
if self.batch_norm:
net = self.batch_norm(net, training=training)
if self.relu:
net = self.relu(net)
return net
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models/resnet50/conv2d_block.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import box_list, shape_utils
KEYPOINTS_FIELD_NAME = 'keypoints'
class TargetAssigner:
"""Target assigner to compute classification and regression targets."""
def __init__(self, similarity_calc, matcher, box_coder,
negative_class_weight=1.0, unmatched_cls_target=None):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with
respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if unmatched_cls_target is None:
self._unmatched_cls_target = tf.constant([0], tf.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
@property
def box_coder(self):
return self._box_coder
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,
groundtruth_weights=None, **params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match)
cls_targets = self._create_classification_targets(groundtruth_labels, match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match, groundtruth_weights)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4)
)
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:])
)
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
matched_anchors_mask = tf.expand_dims(matched_anchors_mask, axis=1)
matched_anchors_mask = tf.broadcast_to(matched_anchors_mask, shape=matched_reg_targets.get_shape())
reg_targets = tf.where(matched_anchors_mask, matched_reg_targets, unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size * [0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=self._unmatched_cls_target,
ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/target_assigner.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
from abc import ABCMeta, abstractmethod
import tensorflow as tf
class Match:
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(input=self.matched_column_indices())
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.size(input=self.unmatched_column_indices())
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the colum is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.size(input=self.ignored_column_indices())
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
tf.gather(self._match_results, self.matched_column_indices()))
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]),
input_tensor], axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
class Matcher:
"""Abstract base class for matcher.
"""
__metaclass__ = ABCMeta
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.
"""
return Match(self._match(similarity_matrix, **params))
@abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/matcher.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import box_coder, box_list
EPSILON = 1e-8
class FasterRcnnBoxCoder(box_coder.BoxCoder):
"""Faster RCNN box coder."""
def __init__(self, scale_factors=None):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
if scale_factors is not None:
assert len(scale_factors) == 4
assert all([scalar > 0 for scalar in scale_factors])
self._scale_factors = scale_factors
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
ha += EPSILON
wa += EPSILON
h += EPSILON
w += EPSILON
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.math.log(w / wa)
th = tf.math.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(a=tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = tf.unstack(tf.transpose(a=rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return box_list.BoxList(tf.transpose(a=tf.stack([ymin, xmin, ymax, xmax])))
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/faster_rcnn_box_coder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils used to manipulate tensor shapes."""
import tensorflow as tf
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(input=tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(input=tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(input=clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(tensor=clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/shape_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
import tensorflow as tf
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder:
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overriden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overriden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(
encoded_boxes.get_shape()[1].value,
anchors.num_boxes_static()
)
)
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/box_coder.py |
DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/__init__.py |
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base minibatch sampler module.
The job of the minibatch_sampler is to subsample a minibatch based on some
criterion.
The main function call is:
subsample(indicator, batch_size, **params).
Indicator is a 1d boolean tensor where True denotes which examples can be
sampled. It returns a boolean indicator where True denotes an example has been
sampled..
Subclasses should implement the Subsample function and can make use of the
@staticmethod SubsampleIndicator.
This is originally implemented in TensorFlow Object Detection API.
"""
from abc import ABCMeta, abstractmethod
import tensorflow as tf
from mrcnn_tf2.object_detection import ops
class MinibatchSampler:
"""Abstract base class for subsampling minibatches."""
__metaclass__ = ABCMeta
def __init__(self):
"""Constructs a minibatch sampler."""
@abstractmethod
def subsample(self, indicator, batch_size, **params):
"""Returns subsample of entries in indicator.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size.
**params: additional keyword arguments for specific implementations of
the MinibatchSampler.
Returns:
sample_indicator: boolean tensor of shape [N] whose True entries have been
sampled. If sum(indicator) >= batch_size, sum(is_sampled) = batch_size
"""
@staticmethod
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
num_samples: int32 scalar tensor
Returns:
a boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random.shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(input=indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices,
tf.shape(input=indicator)[0])
return tf.equal(selected_indicator, 1)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/minibatch_sampler.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import box_list
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation,
scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
keypoints = tf.transpose(a=keypoints, perm=[1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(a=new_keypoints, perm=[1, 0, 2])
return new_keypoints
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None):
"""Randomly flips the image and detections horizontally.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.greater(tf.random.uniform([], seed=seed), 0.5)
# flip image
image = tf.cond(pred=do_a_flip_random, true_fn=lambda: _flip_image(image), false_fn=lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(pred=do_a_flip_random, true_fn=lambda: _flip_boxes_left_right(boxes),
false_fn=lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(pred=do_a_flip_random, true_fn=lambda: _flip_masks_left_right(masks),
false_fn=lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
pred=do_a_flip_random,
true_fn=lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),
false_fn=lambda: keypoints)
result.append(keypoints)
return tuple(result)
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = int(round(orig_height * small_scale_factor))
small_width = int(round(orig_width * small_scale_factor))
small_size = [small_height, small_width]
new_size = large_size
if max(large_size) > max_dimension:
new_size = small_size
else:
new_size = large_size
return tf.constant(new_size + [num_channels])
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
"""Compute new dynamic shape for resize_to_range method."""
image_shape = tf.shape(input=image)
orig_height = tf.cast(image_shape[0], dtype=tf.float32)
orig_width = tf.cast(image_shape[1], dtype=tf.float32)
num_channels = image_shape[2]
orig_min_dim = tf.minimum(orig_height, orig_width)
# Calculates the larger of the possible sizes
min_dimension = tf.constant(min_dimension, dtype=tf.float32)
large_scale_factor = min_dimension / orig_min_dim
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = tf.cast(tf.round(orig_height * large_scale_factor), dtype=tf.int32)
large_width = tf.cast(tf.round(orig_width * large_scale_factor), dtype=tf.int32)
large_size = tf.stack([large_height, large_width])
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = tf.maximum(orig_height, orig_width)
max_dimension = tf.constant(max_dimension, dtype=tf.float32)
small_scale_factor = max_dimension / orig_max_dim
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = tf.cast(tf.round(orig_height * small_scale_factor), dtype=tf.int32)
small_width = tf.cast(tf.round(orig_width * small_scale_factor), dtype=tf.int32)
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
pred=tf.cast(tf.reduce_max(input_tensor=large_size), dtype=tf.float32) > max_dimension,
true_fn=lambda: small_size, false_fn=lambda: large_size)
else:
new_size = large_size
return tf.stack(tf.unstack(new_size) + [num_channels])
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros
so the resulting image is of the spatial size
[max_dimension, max_dimension]. If masks are included they are padded
similarly.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
if image.get_shape().is_fully_defined():
new_size = _compute_new_static_size(image, min_dimension, max_dimension)
else:
new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)
new_image = tf.image.resize(
image, new_size[:-1], method=method)
if pad_to_max_dimension:
new_image = tf.image.pad_to_bounding_box(
new_image, 0, 0, max_dimension, max_dimension)
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
new_masks = tf.squeeze(new_masks, 3)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, max_dimension, max_dimension)
result.append(new_masks)
result.append(new_size)
return result
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def box_list_scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def keypoint_scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(input=image)[0]
image_width = tf.shape(input=image)[1]
scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/preprocessor.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module for helper tensorflow ops.
This is originally implemented in TensorFlow Object Detection API.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import shape_utils
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=tf.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
size = tf.cast(size, dtype=tf.int32)
zeros = tf.ones([size], dtype=dtype) * default_value
values = tf.ones_like(indices, dtype=dtype) * indices_value
return tf.dynamic_stitch([tf.range(size), tf.cast(indices, dtype=tf.int32)],
[zeros, values])
def matmul_gather_on_zeroth_axis(params, indices, scope=None):
"""Matrix multiplication based implementation of tf.gather on zeroth axis.
TODO(rathodv, jonathanhuang): enable sparse matmul option.
Args:
params: A float32 Tensor. The tensor from which to gather values.
Must be at least rank 1.
indices: A Tensor. Must be one of the following types: int32, int64.
Must be in range [0, params.shape[0])
scope: A name for the operation (optional).
Returns:
A Tensor. Has the same type as params. Values from params gathered
from indices given by indices, with shape indices.shape + params.shape[1:].
"""
params_shape = shape_utils.combined_static_and_dynamic_shape(params)
indices_shape = shape_utils.combined_static_and_dynamic_shape(indices)
params2d = tf.reshape(params, [params_shape[0], -1])
indicator_matrix = tf.one_hot(indices, params_shape[0])
gathered_result_flattened = tf.matmul(indicator_matrix, params2d)
return tf.reshape(gathered_result_flattened,
tf.stack(indices_shape + params_shape[1:]))
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/ops.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired batch_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired batch_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always batch_size, even
when number of examples set to True in indicator is less than batch_size.
This is originally implemented in TensorFlow Object Detection API.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import minibatch_sampler, ops
class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler):
"""Subsamples minibatches to a desired balance of positives and negatives."""
def __init__(self, positive_fraction=0.5, is_static=False):
"""Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
in the batch.
is_static: If True, uses an implementation with static shape guarantees.
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1
"""
if positive_fraction < 0 or positive_fraction > 1:
raise ValueError('positive_fraction should be in range [0,1]. '
'Received: %s.' % positive_fraction)
self._positive_fraction = positive_fraction
self._is_static = is_static
def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
"""Counts the number of positives and negatives numbers to be sampled.
Args:
sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
the signed indices of the examples where the sign is based on the label
value. The examples that cannot be sampled are set to 0. It samples
atmost sample_size*positive_fraction positive examples and remaining
from negative examples.
sample_size: Size of subsamples.
Returns:
A tuple containing the number of positive and negative labels in the
subsample.
"""
input_length = tf.shape(input=sorted_indices_tensor)[0]
valid_positive_index = tf.greater(sorted_indices_tensor,
tf.zeros(input_length, tf.int32))
num_sampled_pos = tf.reduce_sum(input_tensor=tf.cast(valid_positive_index, tf.int32))
max_num_positive_samples = tf.constant(
int(sample_size * self._positive_fraction), tf.int32)
num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
num_negative_samples = tf.constant(sample_size,
tf.int32) - num_positive_samples
return num_positive_samples, num_negative_samples
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input=input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
one_hot_selector, axes=[0, 0]), tf.int32)
def _static_subsample(self, indicator, batch_size, labels):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
N should be a complie time constant.
batch_size: desired batch size. This scalar cannot be None.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples. N should be a complie time constant.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled. It ensures the length of output of the subsample is always
batch_size, even when number of examples set to True in indicator is
less than batch_size.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
# Check if indicator and labels have a static size.
if not indicator.shape.is_fully_defined():
raise ValueError('indicator must be static in shape when is_static is'
'True')
if not labels.shape.is_fully_defined():
raise ValueError('labels must be static in shape when is_static is'
'True')
if not isinstance(batch_size, int):
raise ValueError('batch_size has to be an integer when is_static is'
'True.')
input_length = tf.shape(input=indicator)[0]
# Set the number of examples set True in indicator to be at least
# batch_size.
num_true_sampled = tf.reduce_sum(input_tensor=tf.cast(indicator, tf.float32))
additional_false_sample = tf.less_equal(
tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)),
batch_size - num_true_sampled)
indicator = tf.logical_or(indicator, additional_false_sample)
# Shuffle indicator and label. Need to store the permutation to restore the
# order post sampling.
permutation = tf.random.shuffle(tf.range(input_length))
indicator = ops.matmul_gather_on_zeroth_axis(
tf.cast(indicator, tf.float32), permutation)
labels = ops.matmul_gather_on_zeroth_axis(
tf.cast(labels, tf.float32), permutation)
# index (starting from 1) when indicator is True, 0 when False
indicator_idx = tf.where(
tf.cast(indicator, tf.bool), tf.range(1, input_length + 1),
tf.zeros(input_length, tf.int32))
# Replace -1 for negative, +1 for positive labels
signed_label = tf.where(
tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32),
tf.scalar_mul(-1, tf.ones(input_length, tf.int32)))
# negative of index for negative label, positive index for positive label,
# 0 when indicator is False.
signed_indicator_idx = tf.multiply(indicator_idx, signed_label)
sorted_signed_indicator_idx = tf.nn.top_k(
signed_indicator_idx, input_length, sorted=True).values
[num_positive_samples,
num_negative_samples] = self._get_num_pos_neg_samples(
sorted_signed_indicator_idx, batch_size)
sampled_idx = self._get_values_from_start_and_end(
sorted_signed_indicator_idx, num_positive_samples,
num_negative_samples, batch_size)
# Shift the indices to start from 0 and remove any samples that are set as
# False.
sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32)
sampled_idx = tf.multiply(
tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32),
sampled_idx)
sampled_idx_indicator = tf.cast(tf.reduce_sum(
input_tensor=tf.one_hot(sampled_idx, depth=input_length),
axis=0), tf.bool)
# project back the order based on stored permutations
reprojections = tf.one_hot(permutation, depth=input_length,
dtype=tf.float32)
return tf.cast(tf.tensordot(
tf.cast(sampled_idx_indicator, tf.float32),
reprojections, axes=[0, 0]), tf.bool)
def subsample(self, indicator, batch_size, labels, scope=None):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches self._positive_fraction. It cannot be None is is_static is True.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
scope: name scope.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
if len(indicator.get_shape().as_list()) != 1:
raise ValueError('indicator must be 1 dimensional, got a tensor of '
'shape %s' % indicator.get_shape())
if len(labels.get_shape().as_list()) != 1:
raise ValueError('labels must be 1 dimensional, got a tensor of '
'shape %s' % labels.get_shape())
if labels.dtype != tf.bool:
raise ValueError('labels should be of type bool. Received: %s' %
labels.dtype)
if indicator.dtype != tf.bool:
raise ValueError('indicator should be of type bool. Received: %s' %
indicator.dtype)
if self._is_static:
return self._static_subsample(indicator, batch_size, labels)
else:
# Only sample from indicated samples
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if batch_size is None:
max_num_pos = tf.reduce_sum(input_tensor=tf.cast(positive_idx, dtype=tf.int32))
else:
max_num_pos = int(self._positive_fraction * batch_size)
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(input_tensor=tf.cast(sampled_pos_idx, tf.int32))
if batch_size is None:
negative_positive_ratio = (
1 - self._positive_fraction) / self._positive_fraction
max_num_neg = tf.cast(
negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32)
else:
max_num_neg = batch_size - num_sampled_pos
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/balanced_positive_negative_sampler.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferrable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(input=self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferrable at graph construction time.
"""
try:
return self.data['boxes'].get_shape()[0].value
except AttributeError:
return self.data['boxes'].get_shape()[0]
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(a=box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/box_list.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta, abstractmethod
import tensorflow as tf
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(a=y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(a=y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(a=x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(a=x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
class RegionSimilarityCalculator:
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overriden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return iou(boxlist1, boxlist2)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/region_similarity_calculator.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow as tf
def _get_source_id_from_encoded_image(parsed_tensors):
return tf.strings.as_string(tf.strings.to_hash_bucket_fast(parsed_tensors['image/encoded'], 2 ** 63 - 1))
class TfExampleDecoder:
"""Tensorflow Example proto decoder."""
def __init__(self, use_instance_mask=False, regenerate_source_id=False):
self._include_mask = use_instance_mask
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.io.FixedLenFeature((), tf.string),
'image/source_id': tf.io.FixedLenFeature((), tf.string),
'image/height': tf.io.FixedLenFeature((), tf.int64),
'image/width': tf.io.FixedLenFeature((), tf.int64),
'image/object/bbox/xmin': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(tf.float32),
'image/object/class/label': tf.io.VarLenFeature(tf.int64),
'image/object/area': tf.io.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.io.VarLenFeature(tf.int64),
}
if use_instance_mask:
self._keys_to_features.update({
'image/object/mask': tf.io.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
pred=tf.greater(tf.size(input=masks), 0),
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32)
)
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- source_id: a string scalar tensor.
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
parsed_tensors = tf.io.parse_single_example(
serialized=serialized_example, features=self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse.to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
is_crowd = tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool)
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
if self._regenerate_source_id:
source_id = _get_source_id_from_encoded_image(parsed_tensors)
else:
source_id = tf.cond(
pred=tf.greater(tf.strings.length(input=parsed_tensors['image/source_id']), 0),
true_fn=lambda: parsed_tensors['image/source_id'],
false_fn=lambda: _get_source_id_from_encoded_image(parsed_tensors)
)
decoded_tensors = {
'image': image,
# 'source_id': parsed_tensors['image/source_id'],
'source_id': source_id,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowd,
'groundtruth_area': parsed_tensors['image/object/area'],
'groundtruth_boxes': boxes,
}
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/tf_example_decoder.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow as tf
from mrcnn_tf2.object_detection import matcher, shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and umatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: %s, unmatched: %s',
self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(input=similarity_matrix, axis=0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(input_tensor=similarity_matrix, axis=0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(input=similarity_matrix, axis=1,
output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(input=force_match_column_indicators, axis=0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(input_tensor=force_match_column_indicators, axis=0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0].value == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
pred=tf.greater(tf.shape(input=similarity_matrix)[0], 0),
true_fn=_match_when_rows_are_non_empty, false_fn=_match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection/argmax_matcher.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training specific ops, including sampling, building targets, etc."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from mrcnn_tf2.object_detection import balanced_positive_negative_sampler
from mrcnn_tf2.ops import spatial_transform_ops
from mrcnn_tf2.utils import box_utils
_EPSILON = 1e-8
def _add_class_assignments(iou, gt_boxes, gt_labels):
"""Computes object category assignment for each box.
Args:
iou: a tensor for the iou matrix with a shape of
[batch_size, K, MAX_NUM_INSTANCES]. K is the number of post-nms RoIs
(i.e., rpn_post_nms_topn).
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4].
This tensor might have paddings with negative values. The coordinates
of gt_boxes are in the pixel coordinates of the scaled image scale.
gt_labels: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with a value of -1.
Returns:
max_boxes: a tensor with a shape of [batch_size, K, 4], representing
the ground truth coordinates of each roi.
max_classes: a int32 tensor with a shape of [batch_size, K], representing
the ground truth class of each roi.
max_overlap: a tensor with a shape of [batch_size, K], representing
the maximum overlap of each roi.
argmax_iou: a tensor with a shape of [batch_size, K], representing the iou
argmax.
"""
with tf.name_scope('add_class_assignments'):
batch_size, _, _ = iou.get_shape().as_list()
argmax_iou = tf.argmax(input=iou, axis=2, output_type=tf.int32)
indices = tf.reshape(
argmax_iou + tf.expand_dims(tf.range(batch_size) * tf.shape(input=gt_labels)[1], 1),
shape=[-1]
)
max_classes = tf.reshape(tf.gather(tf.reshape(gt_labels, [-1, 1]), indices), [batch_size, -1])
max_overlap = tf.reduce_max(input_tensor=iou, axis=2)
bg_mask = tf.equal(max_overlap, tf.zeros_like(max_overlap))
max_classes = tf.where(bg_mask, tf.zeros_like(max_classes), max_classes)
max_boxes = tf.reshape(
tf.gather(tf.reshape(gt_boxes, [-1, 4]), indices),
[batch_size, -1, 4]
)
max_boxes = tf.where(
tf.tile(tf.expand_dims(bg_mask, axis=2), [1, 1, 4]),
tf.zeros_like(max_boxes),
max_boxes
)
return max_boxes, max_classes, max_overlap, argmax_iou
def encode_box_targets(boxes, gt_boxes, gt_labels, bbox_reg_weights):
"""Encodes predicted boxes with respect to ground truth boxes."""
with tf.name_scope('encode_box_targets'):
box_targets = box_utils.encode_boxes(boxes=gt_boxes, anchors=boxes, weights=bbox_reg_weights)
# If a target is background, the encoded box target should be zeros.
mask = tf.tile(tf.expand_dims(tf.equal(gt_labels, tf.zeros_like(gt_labels)), axis=2), [1, 1, 4])
box_targets = tf.where(mask, tf.zeros_like(box_targets), box_targets)
return box_targets
def proposal_label_op(boxes, gt_boxes, gt_labels,
batch_size_per_im=512, fg_fraction=0.25, fg_thresh=0.5,
bg_thresh_hi=0.5, bg_thresh_lo=0.):
"""Assigns the proposals with ground truth labels and performs subsmpling.
Given proposal `boxes`, `gt_boxes`, and `gt_labels`, the function uses the
following algorithm to generate the final `batch_size_per_im` RoIs.
1. Calculates the IoU between each proposal box and each gt_boxes.
2. Assigns each proposal box with a ground truth class and box label by
choosing the largest overlap.
3. Samples `batch_size_per_im` boxes from all proposal boxes, and returns
box_targets, class_targets, and RoIs.
The reference implementations of #1 and #2 are here:
https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/json_dataset.py
The reference implementation of #3 is here:
https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates of scaled images in
[ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a value of -1. The coordinates of gt_boxes
are in the pixel coordinates of the scaled image.
gt_labels: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This
tensor might have paddings with a value of -1.
batch_size_per_im: a integer represents RoI minibatch size per image.
fg_fraction: a float represents the target fraction of RoI minibatch that
is labeled foreground (i.e., class > 0).
fg_thresh: a float represents the overlap threshold for an RoI to be
considered foreground (if >= fg_thresh).
bg_thresh_hi: a float represents the overlap threshold for an RoI to be
considered background (class = 0 if overlap in [LO, HI)).
bg_thresh_lo: a float represents the overlap threshold for an RoI to be
considered background (class = 0 if overlap in [LO, HI)).
Returns:
box_targets: a tensor with a shape of [batch_size, K, 4]. The tensor
contains the ground truth pixel coordinates of the scaled images for each
roi. K is the number of sample RoIs (e.g., batch_size_per_im).
class_targets: a integer tensor with a shape of [batch_size, K]. The tensor
contains the ground truth class for each roi.
rois: a tensor with a shape of [batch_size, K, 4], representing the
coordinates of the selected RoI.
proposal_to_label_map: a tensor with a shape of [batch_size, K]. This tensor
keeps the mapping between proposal to labels. proposal_to_label_map[i]
means the index of the ground truth instance for the i-th proposal.
"""
with tf.name_scope('proposal_label'):
batch_size = boxes.shape[0]
# fixed problems when running with Keras AMP
gt_boxes = tf.cast(gt_boxes, dtype=tf.float32)
# The reference implementation intentionally includes ground truth boxes in
# the proposals.
# see:
# https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/json_dataset.py#L359
boxes = tf.concat([boxes, gt_boxes], axis=1)
iou = box_utils.bbox_overlap(boxes, gt_boxes)
(pre_sample_box_targets, pre_sample_class_targets, max_overlap,
proposal_to_label_map) = _add_class_assignments(iou, gt_boxes, gt_labels)
# Generates a random sample of RoIs comprising foreground and background
# examples.
# reference:
# https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py#L132
positives = tf.greater(max_overlap,
fg_thresh * tf.ones_like(max_overlap))
negatives = tf.logical_and(
tf.greater_equal(max_overlap, bg_thresh_lo * tf.ones_like(max_overlap)),
tf.less(max_overlap, bg_thresh_hi * tf.ones_like(max_overlap))
)
pre_sample_class_targets = tf.where(
negatives,
tf.zeros_like(pre_sample_class_targets),
pre_sample_class_targets
)
proposal_to_label_map = tf.where(
negatives,
tf.zeros_like(proposal_to_label_map),
proposal_to_label_map
)
# Handles ground truth paddings.
ignore_mask = tf.less(tf.reduce_min(input_tensor=iou, axis=2), tf.zeros_like(max_overlap))
# indicator includes both positive and negative labels.
# labels includes only positives labels.
# positives = indicator & labels.
# negatives = indicator & !labels.
# ignore = !indicator.
labels = positives
pos_or_neg = tf.logical_or(positives, negatives)
indicator = tf.logical_and(pos_or_neg, tf.logical_not(ignore_mask))
all_samples = []
sampler = balanced_positive_negative_sampler.BalancedPositiveNegativeSampler(
positive_fraction=fg_fraction,
is_static=True
)
# Batch-unroll the sub-sampling process.
for i in range(batch_size):
samples = sampler.subsample(indicator[i], batch_size_per_im, labels[i])
all_samples.append(samples)
all_samples = tf.stack([all_samples], axis=0)[0]
# A workaround to get the indices from the boolean tensors.
_, samples_indices = tf.nn.top_k(
tf.cast(all_samples, dtype=tf.int32),
k=batch_size_per_im,
sorted=True
)
# Contructs indices for gather.
samples_indices = tf.reshape(
samples_indices + tf.expand_dims(tf.range(batch_size) * tf.shape(input=boxes)[1], 1),
[-1]
)
rois = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), samples_indices),
[batch_size, -1, 4]
)
class_targets = tf.reshape(
tf.gather(tf.reshape(pre_sample_class_targets, [-1, 1]), samples_indices),
[batch_size, -1]
)
sample_box_targets = tf.reshape(
tf.gather(tf.reshape(pre_sample_box_targets, [-1, 4]), samples_indices),
[batch_size, -1, 4]
)
sample_proposal_to_label_map = tf.reshape(
tf.gather(tf.reshape(proposal_to_label_map, [-1, 1]), samples_indices),
[batch_size, -1]
)
return sample_box_targets, class_targets, rois, sample_proposal_to_label_map
def select_fg_for_masks(class_targets, box_targets, boxes, proposal_to_label_map, max_num_fg=128):
"""Selects the fore ground objects for mask branch during training.
Args:
class_targets: a tensor of shape [batch_size, num_boxes] representing the
class label for each box.
box_targets: a tensor with a shape of [batch_size, num_boxes, 4]. The tensor
contains the ground truth pixel coordinates of the scaled images for each
roi.
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
proposal_to_label_map: a tensor with a shape of [batch_size, num_boxes].
This tensor keeps the mapping between proposal to labels.
proposal_to_label_map[i] means the index of the ground truth instance for
the i-th proposal.
max_num_fg: a integer represents the number of masks per image.
Returns:
class_targets, boxes, proposal_to_label_map, box_targets that have
foreground objects.
"""
# Masks are for positive (fg) objects only.
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py
batch_size = boxes.shape[0]
_, fg_indices = tf.nn.top_k(tf.cast(tf.greater(class_targets, 0), dtype=tf.float32), k=max_num_fg)
# Contructs indices for gather.
indices = tf.reshape(
fg_indices + tf.expand_dims(tf.range(batch_size) * tf.shape(input=class_targets)[1], 1),
shape=[-1]
)
fg_class_targets = tf.reshape(
tf.gather(tf.reshape(class_targets, [-1, 1]), indices),
[batch_size, -1]
)
fg_box_targets = tf.reshape(
tf.gather(tf.reshape(box_targets, [-1, 4]), indices),
[batch_size, -1, 4]
)
fg_box_rois = tf.reshape(
tf.gather(tf.reshape(boxes, [-1, 4]), indices), [batch_size, -1, 4]
)
fg_proposal_to_label_map = tf.reshape(
tf.gather(tf.reshape(proposal_to_label_map, [-1, 1]), indices),
[batch_size, -1]
)
return (fg_class_targets, fg_box_targets, fg_box_rois,
fg_proposal_to_label_map)
def get_mask_targets(fg_boxes, fg_proposal_to_label_map, fg_box_targets, mask_gt_labels, output_size=28):
"""Crop and resize on multilevel feature pyramid.
Args:
fg_boxes: A 3-D tensor of shape [batch_size, num_masks, 4]. Each row
represents a box with [y1, x1, y2, x2] in un-normalized coordinates.
fg_proposal_to_label_map: A tensor of shape [batch_size, num_masks].
fg_box_targets: a float tensor representing the box label for each box
with a shape of [batch_size, num_masks, 4].
mask_gt_labels: A tensor with a shape of [batch_size, M, H+4, W+4]. M is
NUM_MAX_INSTANCES (i.e., 100 in this implementation) in each image, while
H and W are ground truth mask size. The `+4` comes from padding of two
zeros in both directions of height and width dimension.
output_size: A scalar to indicate the output crop size.
Returns:
A 4-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size].
"""
_, _, max_feature_height, max_feature_width = mask_gt_labels.get_shape().as_list()
# proposal_to_label_map might have a -1 paddings.
levels = tf.maximum(fg_proposal_to_label_map, 0)
# Projects box location and sizes to corresponding cropped ground truth
# mask coordinates.
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(value=fg_boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(value=fg_box_targets, num_or_size_splits=4, axis=2)
valid_feature_width = max_feature_width - 4
valid_feature_height = max_feature_height - 4
y_transform = (bb_y_min - gt_y_min) * valid_feature_height / (gt_y_max - gt_y_min + _EPSILON) + 2
x_transform = (bb_x_min - gt_x_min) * valid_feature_width / (gt_x_max - gt_x_min + _EPSILON) + 2
h_transform = (bb_y_max - bb_y_min) * valid_feature_height / (gt_y_max - gt_y_min + _EPSILON)
w_transform = (bb_x_max - bb_x_min) * valid_feature_width / (gt_x_max - gt_x_min + _EPSILON)
boundaries = tf.concat(
[
tf.cast(tf.ones_like(y_transform) * (max_feature_height - 1), dtype=tf.float32),
tf.cast(tf.ones_like(x_transform) * (max_feature_width - 1), dtype=tf.float32)
],
axis=-1
)
features_per_box = spatial_transform_ops.selective_crop_and_resize(
tf.expand_dims(mask_gt_labels, -1),
tf.concat([y_transform, x_transform, h_transform, w_transform], -1),
tf.expand_dims(levels, -1),
boundaries,
output_size
)
features_per_box = tf.squeeze(features_per_box, axis=-1)
# Masks are binary outputs.
features_per_box = tf.where(
tf.greater_equal(features_per_box, 0.5),
tf.ones_like(features_per_box),
tf.zeros_like(features_per_box)
)
# mask_targets depend on box RoIs, which have gradients. This stop_gradient
# prevents the flow of gradient to box RoIs.
features_per_box = tf.stop_gradient(features_per_box)
return features_per_box
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops/training_ops.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def nearest_upsampling(data, scale):
"""Nearest neighbor upsampling implementation.
Args:
data: A tensor with a shape of [batch, height_in, width_in, channels].
scale: An integer multiple to scale resolution of input data.
Returns:
data_up: A tensor with a shape of
[batch, height_in*scale, width_in*scale, channels]. Same dtype as input
data.
"""
with tf.name_scope('nearest_upsampling'):
bs, h, w, c = tf.unstack(tf.shape(input=data))
# Use reshape to quickly upsample the input.
# The nearest pixel is selected implicitly via broadcasting.
# data = tf.reshape(data, [bs, h, 1, w, 1, c]) * tf.ones([1, 1, scale, 1, scale, 1], dtype=data.dtype)
# Instead of broadcasting with a 6-d tensor, we're using stacking here
# for TfLite compatibity.
output = tf.stack([data] * scale, axis=3)
output = tf.stack([output] * scale, axis=2)
return tf.reshape(output, [bs, h * scale, w * scale, c])
def selective_crop_and_resize(features,
boxes,
box_levels,
boundaries,
output_size=7,
training=True):
"""Crop and resize boxes on a set of feature maps.
Given multiple features maps indexed by different levels, and a set of boxes
where each box is mapped to a certain level, it selectively crops and resizes
boxes from the corresponding feature maps to generate the box features.
We follow the ROIAlign technique (see https://arxiv.org/pdf/1703.06870.pdf,
figure 3 for reference). Specifically, for each feature map, we select an
(output_size, output_size) set of pixels corresponding to the box location,
and then use bilinear interpolation to select the feature value for each
pixel.
For performance, we perform the gather and interpolation on all layers as a
single operation. This is op the multi-level features are first stacked and
gathered into [2*output_size, 2*output_size] feature points. Then bilinear
interpolation is performed on the gathered feature points to generate
[output_size, output_size] RoIAlign feature map.
Here is the step-by-step algorithm:
1. The multi-level features are gathered into a
[batch_size, num_boxes, output_size*2, output_size*2, num_filters]
Tensor. The Tensor contains four neighboring feature points for each
vertice in the output grid.
2. Compute the interpolation kernel of shape
[batch_size, num_boxes, output_size*2, output_size*2]. The last 2 axis
can be seen as stacking 2x2 interpolation kernels for all vertices in the
output grid.
3. Element-wise multiply the gathered features and interpolation kernel.
Then apply 2x2 average pooling to reduce spatial dimension to
output_size.
Args:
features: a 5-D tensor of shape
[batch_size, num_levels, max_height, max_width, num_filters] where
cropping and resizing are based.
boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the
information of each box w.r.t. the corresponding feature map.
boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left
corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float)
in terms of the number of pixels of the corresponding feature map size.
box_levels: a 3-D tensor of shape [batch_size, num_boxes, 1] representing
the 0-based corresponding feature level index of each box.
boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing
the boundary (in (y, x)) of the corresponding feature map for each box.
Any resampled grid points that go beyond the bounary will be clipped.
output_size: a scalar indicating the output crop size.
training: whether to build the model for training (or inference).
Returns:
features_per_box: a 5-D tensor of shape
[batch_size, num_boxes, output_size, output_size, num_filters]
representing the cropped features.
"""
(batch_size, num_levels, max_feature_height, max_feature_width,
num_filters) = features.get_shape().as_list()
_, num_boxes, _ = boxes.get_shape().as_list()
# Compute the grid position w.r.t. the corresponding feature map.
box_grid_x = []
box_grid_y = []
for i in range(output_size):
box_grid_x.append(boxes[:, :, 1:2] +
(i + 0.5) * boxes[:, :, 3:4] / output_size)
box_grid_y.append(boxes[:, :, 0:1] +
(i + 0.5) * boxes[:, :, 2:3] / output_size)
box_grid_x = tf.concat(box_grid_x, axis=-1)
box_grid_y = tf.concat(box_grid_y, axis=-1)
# Compute indices for gather operation.
box_grid_y0 = tf.floor(box_grid_y)
box_grid_x0 = tf.floor(box_grid_x)
box_grid_x0 = tf.maximum(0., box_grid_x0)
box_grid_y0 = tf.maximum(0., box_grid_y0)
box_gridx0x1 = tf.stack([
tf.minimum(box_grid_x0, boundaries[:, :, 1:2]),
tf.minimum(box_grid_x0 + 1, boundaries[:, :, 1:2])
],
axis=3)
box_gridy0y1 = tf.stack([
tf.minimum(box_grid_y0, boundaries[:, :, 0:1]),
tf.minimum(box_grid_y0 + 1, boundaries[:, :, 0:1])
],
axis=3)
x_indices = tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2])
y_indices = tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2])
# If using GPU for inference, delay the cast until when Gather ops show up
# since GPU inference supports float point better.
# TODO(laigd): revisit this when newer versions of GPU libraries is released.
indices_dtype = tf.float32 if not training else tf.int32
if training:
x_indices = tf.cast(x_indices, tf.int32)
y_indices = tf.cast(y_indices, tf.int32)
height_dim_offset = max_feature_width
level_dim_offset = max_feature_height * height_dim_offset
batch_dim_offset = num_levels * level_dim_offset
batch_dim_indices = (
tf.reshape(tf.range(batch_size, dtype=indices_dtype) * batch_dim_offset, [batch_size, 1, 1, 1]) *
tf.ones([1, num_boxes, output_size * 2, output_size * 2], dtype=indices_dtype)
)
box_level_indices = (
tf.reshape(box_levels * level_dim_offset, [batch_size, num_boxes, 1, 1]) *
tf.ones([1, 1, output_size * 2, output_size * 2], dtype=indices_dtype)
)
height_indices = (
tf.reshape(y_indices * height_dim_offset, [batch_size, num_boxes, output_size * 2, 1]) *
tf.ones([1, 1, 1, output_size * 2], dtype=indices_dtype)
)
width_indices = (
tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]) *
tf.ones([1, 1, output_size * 2, 1], dtype=indices_dtype)
)
batch_dim_indices = tf.cast(batch_dim_indices, tf.float32)
box_level_indices = tf.cast(box_level_indices, tf.float32)
height_indices = tf.cast(height_indices, tf.float32)
width_indices = tf.cast(width_indices, tf.float32)
indices = tf.add_n([
batch_dim_indices,
box_level_indices,
height_indices,
width_indices,
])
indices = tf.cast(indices, tf.int32)
if batch_size == 1:
# Special handling for single batch input to make it friendly for GPU
# inference.
indices = tf.reshape(indices, [1, -1])
if not training:
indices = tf.cast(indices, dtype=tf.int32)
features = tf.reshape(features, [1, -1, num_filters])
# Cast should happen at last since GPU has better support for floating point
# operations.
features_per_box = tf.gather(features, indices, axis=1)
else:
indices = tf.reshape(indices, [-1])
if not training:
indices = tf.cast(indices, dtype=tf.int32)
features = tf.reshape(features, [-1, num_filters])
features_per_box = tf.gather(features, indices)
features_per_box = tf.reshape(
features_per_box,
[batch_size, num_boxes, output_size * 2, output_size * 2, num_filters]
)
# The RoIAlign feature f can be computed by bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3.
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
# f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
ly = box_grid_y - box_grid_y0
lx = box_grid_x - box_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_x = tf.reshape(tf.stack([hx, lx], axis=3), [batch_size, num_boxes, 1, output_size * 2])
kernel_y = tf.reshape(tf.stack([hy, ly], axis=3), [batch_size, num_boxes, output_size * 2, 1])
# Use implicit broadcast to generate the interpolation kernel. The
# multiplier `4` is for avg pooling.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features_per_box *= tf.cast(tf.expand_dims(interpolation_kernel, axis=4), dtype=features_per_box.dtype)
features_per_box = tf.reshape(
features_per_box,
[batch_size * num_boxes, output_size * 2, output_size * 2, num_filters]
)
features_per_box = tf.nn.avg_pool2d(input=features_per_box, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID')
features_per_box = tf.reshape(features_per_box,
[batch_size, num_boxes, output_size, output_size, num_filters])
return features_per_box
def multilevel_crop_and_resize(features,
boxes,
output_size=7,
training=True):
"""Crop and resize on multilevel feature pyramid.
Generate the (output_size, output_size) set of pixels for each input box
by first locating the box into the correct feature level, and then cropping
and resizing it using the correspoding feature map of that level.
Args:
features: A dictionary with key as pyramid level and value as features. The
features are in shape of [batch_size, height_l, width_l, num_filters].
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row represents
a box with [y1, x1, y2, x2] in un-normalized coordinates.
output_size: A scalar to indicate the output crop size.
training: whether to build the model for training (or inference).
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
with tf.name_scope('multilevel_crop_and_resize'):
levels = features.keys()
min_level = min(levels)
max_level = max(levels)
_, max_feature_height, max_feature_width, _ = (
features[min_level].get_shape().as_list())
# Stack feature pyramid into a features_all of shape
# [batch_size, levels, height, width, num_filters].
features_all = []
for level in range(min_level, max_level + 1):
features_all.append(
tf.image.pad_to_bounding_box(features[level], 0, 0, max_feature_height, max_feature_width))
features_all = tf.stack(features_all, axis=1)
# Assign boxes to the right level.
box_width = tf.squeeze(boxes[:, :, 3:4] - boxes[:, :, 1:2], axis=-1)
box_height = tf.squeeze(boxes[:, :, 2:3] - boxes[:, :, 0:1], axis=-1)
areas_sqrt = tf.sqrt(box_height * box_width)
levels = tf.math.floordiv(tf.math.log(tf.divide(areas_sqrt, 224.0)), tf.math.log(2.0)) + 4.0
if training:
levels = tf.cast(levels, dtype=tf.int32)
# Map levels between [min_level, max_level].
levels = tf.minimum(
float(max_level) if not training else max_level,
tf.maximum(levels, float(min_level) if not training else min_level)
)
# Project box location and sizes to corresponding feature levels.
scale_to_level = tf.cast(
tf.pow(tf.constant(2.0), levels if not training else tf.cast(levels, tf.float32)),
dtype=boxes.dtype
)
boxes /= tf.expand_dims(scale_to_level, axis=2)
box_width /= scale_to_level
box_height /= scale_to_level
boxes = tf.concat(
[boxes[:, :, 0:2],
tf.expand_dims(box_height, -1),
tf.expand_dims(box_width, -1)],
axis=-1
)
# Map levels to [0, max_level-min_level].
levels -= min_level
level_strides = tf.pow([[2.0]], levels if not training else tf.cast(levels, tf.float32))
boundary = tf.cast(
tf.concat(
[
tf.expand_dims([[tf.cast(max_feature_height, tf.float32)]] / level_strides - 1, axis=-1),
tf.expand_dims([[tf.cast(max_feature_width, tf.float32)]] / level_strides - 1, axis=-1),
],
axis=-1
),
boxes.dtype
)
return selective_crop_and_resize(
features=features_all,
boxes=boxes,
box_levels=levels,
boundaries=boundary,
output_size=output_size,
training=training
)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops/spatial_transform_ops.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops used to post-process raw detections."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from mrcnn_tf2.utils import box_utils
def generate_detections_per_image_tpu(cls_outputs,
box_outputs,
anchor_boxes,
image_info,
pre_nms_num_detections=1000,
post_nms_num_detections=100,
nms_threshold=0.3,
bbox_reg_weights=(10., 10., 5., 5.)):
"""Generate the final detections per image given the model outputs.
Args:
cls_outputs: a tensor with shape [N, num_classes], which stacks class
logit outputs on all feature levels. The N is the number of total anchors
on all levels. The num_classes is the number of classes predicted by the
model. Note that the cls_outputs should be the output of softmax().
box_outputs: a tensor with shape [N, num_classes*4], which stacks box
regression outputs on all feature levels. The N is the number of total
anchors on all levels.
anchor_boxes: a tensor with shape [N, 4], which stacks anchors on all
feature levels. The N is the number of total anchors on all levels.
image_info: a tensor of shape [5] which encodes the input image's [height,
width, scale, original_height, original_width]
pre_nms_num_detections: an integer that specifies the number of candidates
before NMS.
post_nms_num_detections: an integer that specifies the number of candidates
after NMS.
nms_threshold: a float number to specify the IOU threshold of NMS.
bbox_reg_weights: a list of 4 float scalars, which are default weights on
(dx, dy, dw, dh) for normalizing bbox regression targets.
Returns:
detections: Tuple of tensors corresponding to number of valid boxes,
box coordinates, object categories for each boxes, and box scores
-- respectively.
"""
num_boxes, num_classes = cls_outputs.get_shape().as_list()
# Remove background class scores.
cls_outputs = cls_outputs[:, 1:num_classes]
top_k_scores, top_k_indices_with_classes = tf.nn.top_k(
tf.reshape(cls_outputs, [-1]),
k=pre_nms_num_detections,
sorted=False
)
classes = tf.math.mod(top_k_indices_with_classes, num_classes - 1)
top_k_indices = tf.math.floordiv(top_k_indices_with_classes, num_classes - 1)
anchor_boxes = tf.gather(anchor_boxes, top_k_indices)
box_outputs = tf.reshape(box_outputs, [num_boxes, num_classes, 4])[:, 1:num_classes, :]
class_indices = classes
box_outputs = tf.gather_nd(box_outputs, tf.stack([top_k_indices, class_indices], axis=1))
# apply bounding box regression to anchors
boxes = box_utils.decode_boxes(box_outputs, anchor_boxes, bbox_reg_weights)
boxes = box_utils.clip_boxes(boxes, image_info[0], image_info[1])
list_of_all_boxes = []
list_of_all_scores = []
list_of_all_classes = []
# Skip background class.
for class_i in range(num_classes):
# Compute bitmask for the given classes.
class_i_bitmask = tf.cast(tf.equal(classes, class_i), top_k_scores.dtype)
# This works because score is in [0, 1].
class_i_scores = top_k_scores * class_i_bitmask
# The TPU and CPU have different behaviors for
# tf.image.non_max_suppression_padded (b/116754376).
class_i_post_nms_indices, class_i_nms_num_valid = tf.image.non_max_suppression_padded(
tf.cast(boxes, dtype=tf.float32),
tf.cast(class_i_scores, dtype=tf.float32),
post_nms_num_detections,
iou_threshold=nms_threshold,
score_threshold=0.05,
pad_to_max_output_size=True,
name='nms_detections_' + str(class_i)
)
class_i_post_nms_boxes = tf.gather(boxes, class_i_post_nms_indices)
class_i_post_nms_scores = tf.gather(class_i_scores, class_i_post_nms_indices)
mask = tf.less(tf.range(post_nms_num_detections), [class_i_nms_num_valid])
class_i_post_nms_scores = tf.where(
mask, class_i_post_nms_scores, tf.zeros_like(class_i_post_nms_scores)
)
class_i_classes = tf.fill(tf.shape(input=class_i_post_nms_scores), class_i + 1)
list_of_all_boxes.append(class_i_post_nms_boxes)
list_of_all_scores.append(class_i_post_nms_scores)
list_of_all_classes.append(class_i_classes)
post_nms_boxes = tf.concat(list_of_all_boxes, axis=0)
post_nms_scores = tf.concat(list_of_all_scores, axis=0)
post_nms_classes = tf.concat(list_of_all_classes, axis=0)
# sort all results.
post_nms_scores, sorted_indices = tf.nn.top_k(
tf.cast(post_nms_scores, dtype=tf.float32),
k=post_nms_num_detections,
sorted=True
)
post_nms_boxes = tf.gather(post_nms_boxes, sorted_indices)
post_nms_classes = tf.gather(post_nms_classes, sorted_indices)
valid_mask = tf.where(
tf.greater(post_nms_scores, 0), tf.ones_like(post_nms_scores),
tf.zeros_like(post_nms_scores)
)
num_valid_boxes = tf.reduce_sum(input_tensor=valid_mask, axis=-1)
box_classes = tf.cast(post_nms_classes, dtype=tf.float32)
return num_valid_boxes, post_nms_boxes, box_classes, post_nms_scores
def generate_detections_tpu(class_outputs,
box_outputs,
anchor_boxes,
image_info,
pre_nms_num_detections=1000,
post_nms_num_detections=100,
nms_threshold=0.3,
bbox_reg_weights=(10., 10., 5., 5.)
):
"""Generate the final detections given the model outputs (TPU version).
Args:
class_outputs: a tensor with shape [batch_size, N, num_classes], which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
box_outputs: a tensor with shape [batch_size, N, num_classes*4], which
stacks box regression outputs on all feature levels. The N is the number
of total anchors on all levels.
anchor_boxes: a tensor with shape [batch_size, N, 4], which stacks anchors
on all feature levels. The N is the number of total anchors on all levels.
image_info: a tensor of shape [batch_size, 5] which encodes each image's
[height, width, scale, original_height, original_width].
pre_nms_num_detections: an integer that specifies the number of candidates
before NMS.
post_nms_num_detections: an integer that specifies the number of candidates
after NMS.
nms_threshold: a float number to specify the IOU threshold of NMS.
bbox_reg_weights: a list of 4 float scalars, which are default weights on
(dx, dy, dw, dh) for normalizing bbox regression targets.
Returns:
a tuple of tensors corresponding to number of valid boxes,
box coordinates, object categories for each boxes, and box scores stacked
in batch_size.
"""
with tf.name_scope('generate_detections'):
batch_size, _, _ = class_outputs.get_shape().as_list()
softmax_class_outputs = tf.nn.softmax(class_outputs)
num_valid_boxes, box_coordinates, box_classes, box_scores = ([], [], [], [])
for i in range(batch_size):
result = generate_detections_per_image_tpu(
softmax_class_outputs[i], box_outputs[i], anchor_boxes[i],
image_info[i], pre_nms_num_detections, post_nms_num_detections,
nms_threshold, bbox_reg_weights)
num_valid_boxes.append(result[0])
box_coordinates.append(result[1])
box_classes.append(result[2])
box_scores.append(result[3])
num_valid_boxes = tf.stack(num_valid_boxes)
box_coordinates = tf.stack(box_coordinates)
box_classes = tf.stack(box_classes)
box_scores = tf.stack(box_scores)
return num_valid_boxes, box_coordinates, box_classes, box_scores
def generate_detections_gpu(class_outputs,
box_outputs,
anchor_boxes,
image_info,
pre_nms_num_detections=1000,
post_nms_num_detections=100,
nms_threshold=0.3,
bbox_reg_weights=(10., 10., 5., 5.)
):
"""Generate the final detections given the model outputs (GPU version).
Args:
class_outputs: a tensor with shape [batch_size, N, num_classes], which
stacks class logit outputs on all feature levels. The N is the number of
total anchors on all levels. The num_classes is the number of classes
predicted by the model. Note that the class_outputs here is the raw score.
box_outputs: a tensor with shape [batch_size, N, num_classes*4], which
stacks box regression outputs on all feature levels. The N is the number
of total anchors on all levels.
anchor_boxes: a tensor with shape [batch_size, N, 4], which stacks anchors
on all feature levels. The N is the number of total anchors on all levels.
image_info: a tensor of shape [batch_size, 5] which encodes each image's
[height, width, scale, original_height, original_width].
pre_nms_num_detections: an integer that specifies the number of candidates
before NMS.
post_nms_num_detections: an integer that specifies the number of candidates
after NMS.
nms_threshold: a float number to specify the IOU threshold of NMS.
bbox_reg_weights: a list of 4 float scalars, which are default weights on
(dx, dy, dw, dh) for normalizing bbox regression targets.
Returns:
a tuple of tensors corresponding to number of valid boxes,
box coordinates, object categories for each boxes, and box scores stacked
in batch_size.
"""
with tf.name_scope('generate_detections'):
batch_size, num_boxes, num_classes = class_outputs.get_shape().as_list()
softmax_class_outputs = tf.nn.softmax(class_outputs)
# Remove background
scores = tf.slice(softmax_class_outputs, [0, 0, 1], [-1, -1, -1])
boxes = tf.slice(
tf.reshape(box_outputs, [batch_size, num_boxes, num_classes, 4]),
[0, 0, 1, 0], [-1, -1, -1, -1]
)
anchor_boxes = tf.expand_dims(anchor_boxes, axis=2) * tf.ones([1, 1, num_classes - 1, 1])
num_detections = num_boxes * (num_classes - 1)
boxes = tf.reshape(boxes, [batch_size, num_detections, 4])
scores = tf.reshape(scores, [batch_size, num_detections, 1])
anchor_boxes = tf.reshape(anchor_boxes, [batch_size, num_detections, 4])
# Decode
boxes = box_utils.decode_boxes(boxes, anchor_boxes, bbox_reg_weights)
# Clip boxes
height = tf.expand_dims(image_info[:, 0:1], axis=-1)
width = tf.expand_dims(image_info[:, 1:2], axis=-1)
boxes = box_utils.clip_boxes(boxes, height, width)
# NMS
pre_nms_boxes = box_utils.to_normalized_coordinates(boxes, height, width)
pre_nms_boxes = tf.reshape(pre_nms_boxes, [batch_size, num_boxes, num_classes - 1, 4])
pre_nms_scores = tf.reshape(scores, [batch_size, num_boxes, num_classes - 1])
# fixed problems when running with Keras AMP
pre_nms_boxes = tf.cast(pre_nms_boxes, dtype=tf.float32)
pre_nms_scores = tf.cast(pre_nms_scores, dtype=tf.float32)
post_nms_boxes, post_nms_scores, post_nms_classes, \
post_nms_num_valid_boxes = tf.image.combined_non_max_suppression(
pre_nms_boxes,
pre_nms_scores,
max_output_size_per_class=pre_nms_num_detections,
max_total_size=post_nms_num_detections,
iou_threshold=nms_threshold,
score_threshold=0.0,
pad_per_class=False
)
post_nms_classes = post_nms_classes + 1
post_nms_boxes = box_utils.to_absolute_coordinates(post_nms_boxes, height, width)
return post_nms_num_valid_boxes, post_nms_boxes, tf.cast(post_nms_classes, dtype=tf.float32), post_nms_scores
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops/postprocess_ops.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ROI-related ops."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from mrcnn_tf2.utils import box_utils
def _propose_rois_gpu(scores,
boxes,
anchor_boxes,
height,
width,
scale,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights):
"""Proposes RoIs giva group of candidates (GPU version).
Args:
scores: a tensor with a shape of [batch_size, num_boxes].
boxes: a tensor with a shape of [batch_size, num_boxes, 4],
in the encoded form.
anchor_boxes: an Anchors object that contains the anchors with a shape of
[batch_size, num_boxes, 4].
height: a tensor of shape [batch_size, 1, 1] representing the image height.
width: a tensor of shape [batch_size, 1, 1] representing the image width.
scale: a tensor of shape [batch_size, 1, 1] representing the image scale.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
bbox_reg_weights: None or a list of four integer specifying the weights used
when decoding the box.
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals. It has same dtype as input
scores.
boxes: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
represneting the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax]. It has same dtype as
input boxes.
"""
batch_size, num_boxes = scores.get_shape().as_list()
topk_limit = min(num_boxes, rpn_pre_nms_topn)
boxes = box_utils.decode_boxes(boxes, anchor_boxes, bbox_reg_weights)
boxes = box_utils.clip_boxes(boxes, height, width)
if rpn_min_size > 0.0:
boxes, scores = box_utils.filter_boxes(
boxes,
tf.expand_dims(scores, axis=-1),
rpn_min_size,
height,
width,
scale
)
scores = tf.squeeze(scores, axis=-1)
post_nms_topk_limit = topk_limit if topk_limit < rpn_post_nms_topn else rpn_post_nms_topn
if rpn_nms_threshold > 0:
# Normalize coordinates as combined_non_max_suppression currently
# only support normalized coordinates.
pre_nms_boxes = box_utils.to_normalized_coordinates(boxes, height, width)
pre_nms_boxes = tf.reshape(pre_nms_boxes, [batch_size, num_boxes, 1, 4])
pre_nms_scores = tf.reshape(scores, [batch_size, num_boxes, 1])
# fixed problems when running with Keras AMP
pre_nms_boxes = tf.cast(pre_nms_boxes, dtype=tf.float32)
pre_nms_scores = tf.cast(pre_nms_scores, dtype=tf.float32)
with tf.device('CPU:0'):
boxes, scores, _, _ = tf.image.combined_non_max_suppression(
pre_nms_boxes,
pre_nms_scores,
max_output_size_per_class=topk_limit,
max_total_size=post_nms_topk_limit,
iou_threshold=rpn_nms_threshold,
score_threshold=0.0,
pad_per_class=False
)
boxes = box_utils.to_absolute_coordinates(boxes, height, width)
else:
scores, boxes = box_utils.top_k(scores, k=post_nms_topk_limit, boxes_list=[boxes])
boxes = boxes[0]
return scores, boxes
def multilevel_propose_rois(scores_outputs,
box_outputs,
all_anchors,
image_info,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights):
"""Proposes RoIs given a group of candidates from different FPN levels.
Args:
scores_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in
[batch_size, height, width, num_anchors * 4]
all_anchors: an Anchors object that contains the all anchors.
image_info: a tensor of shape [batch_size, 5] where the three columns
encode the input image's [height, width, scale,
original_height, original_width]. Height and width are for
the input to the network, not the original image; scale is the scale
factor used to scale the network input size to the original image size.
See dataloader.DetectionInputProcessor for details. The last two are
original height and width. See dataloader.DetectionInputProcessor for
details.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
bbox_reg_weights: None or a list of four integer specifying the weights used
when decoding the box.
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals.
rois: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
representing the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax].
"""
with tf.name_scope('multilevel_propose_rois'):
levels = scores_outputs.keys()
scores = []
rois = []
anchor_boxes = all_anchors.get_unpacked_boxes()
height = tf.expand_dims(image_info[:, 0:1], axis=-1)
width = tf.expand_dims(image_info[:, 1:2], axis=-1)
scale = tf.expand_dims(image_info[:, 2:3], axis=-1)
for level in levels:
with tf.name_scope('level_%d' % level) as scope:
batch_size, feature_h, feature_w, num_anchors_per_location = scores_outputs[level].get_shape().as_list()
num_boxes = feature_h * feature_w * num_anchors_per_location
this_level_scores = tf.reshape(scores_outputs[level], [batch_size, num_boxes])
this_level_scores = tf.sigmoid(this_level_scores)
this_level_boxes = tf.reshape(box_outputs[level], [batch_size, num_boxes, 4])
this_level_anchors = tf.cast(
tf.reshape(
tf.expand_dims(anchor_boxes[level], axis=0) *
tf.ones([batch_size, 1, 1, 1]),
[batch_size, num_boxes, 4]
),
dtype=this_level_scores.dtype
)
this_level_scores, this_level_boxes = _propose_rois_gpu(
this_level_scores,
this_level_boxes,
this_level_anchors,
height,
width,
scale,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights
)
scores.append(this_level_scores)
rois.append(this_level_boxes)
scores = tf.concat(scores, axis=1)
rois = tf.concat(rois, axis=1)
with tf.name_scope('roi_post_nms_topk'):
post_nms_num_anchors = scores.shape[1]
post_nms_topk_limit = min(post_nms_num_anchors, rpn_post_nms_topn)
top_k_scores, top_k_rois = box_utils.top_k(
scores,
k=post_nms_topk_limit,
boxes_list=[rois]
)
top_k_rois = top_k_rois[0]
return top_k_scores, top_k_rois
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops/roi_ops.py |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preprocessing ops."""
import math
import tensorflow as tf
from mrcnn_tf2.object_detection import preprocessor
def normalize_image(image):
"""Normalize the image.
Args:
image: a tensor of shape [height, width, 3] in dtype=tf.float32.
Returns:
normalized_image: a tensor which has the same shape and dtype as image,
with pixel values normalized.
"""
offset = tf.constant([0.485, 0.456, 0.406])
offset = tf.reshape(offset, shape=(1, 1, 3))
scale = tf.constant([0.229, 0.224, 0.225])
scale = tf.reshape(scale, shape=(1, 1, 3))
normalized_image = (image - offset) / scale
return normalized_image
def random_horizontal_flip(image, boxes=None, masks=None, seed=None):
"""Random horizontal flip the image, boxes, and masks.
Args:
image: a tensor of shape [height, width, 3] representing the image.
boxes: (Optional) a tensor of shape [num_boxes, 4] represneting the box
corners in normalized coordinates.
masks: (Optional) a tensor of shape [num_masks, height, width]
representing the object masks. Note that the size of the mask is the
same as the image.
Returns:
image: the processed image tensor after being randomly flipped.
boxes: None or the processed box tensor after being randomly flipped.
masks: None or the processed mask tensor after being randomly flipped.
"""
return preprocessor.random_horizontal_flip(image, boxes, masks, seed=seed)
def resize_and_pad(image, target_size, stride, boxes=None, masks=None):
"""Resize and pad images, boxes and masks.
Resize and pad images, (optionally boxes and masks) given the desired output
size of the image and stride size.
Here are the preprocessing steps.
1. For a given image, keep its aspect ratio and rescale the image to make it
the largest rectangle to be bounded by the rectangle specified by the
`target_size`.
2. Pad the rescaled image such that the height and width of the image become
the smallest multiple of the stride that is larger or equal to the desired
output diemension.
Args:
image: an image tensor of shape [original_height, original_width, 3].
target_size: a tuple of two integers indicating the desired output
image size. Note that the actual output size could be different from this.
stride: the stride of the backbone network. Each of the output image sides
must be the multiple of this.
boxes: (Optional) a tensor of shape [num_boxes, 4] represneting the box
corners in normalized coordinates.
masks: (Optional) a tensor of shape [num_masks, height, width]
representing the object masks. Note that the size of the mask is the
same as the image.
Returns:
image: the processed image tensor after being resized and padded.
image_info: a tensor of shape [5] which encodes the height, width before
and after resizing and the scaling factor.
boxes: None or the processed box tensor after being resized and padded.
After the processing, boxes will be in the absolute coordinates w.r.t.
the scaled image.
masks: None or the processed mask tensor after being resized and padded.
"""
input_height, input_width, _ = tf.unstack(
tf.cast(tf.shape(input=image), dtype=tf.float32),
axis=0
)
target_height, target_width = target_size
scale_if_resize_height = target_height / input_height
scale_if_resize_width = target_width / input_width
scale = tf.minimum(scale_if_resize_height, scale_if_resize_width)
scaled_height = tf.cast(scale * input_height, dtype=tf.int32)
scaled_width = tf.cast(scale * input_width, dtype=tf.int32)
image = tf.image.resize(image, [scaled_height, scaled_width], method=tf.image.ResizeMethod.BILINEAR)
padded_height = int(math.ceil(target_height * 1.0 / stride) * stride)
padded_width = int(math.ceil(target_width * 1.0 / stride) * stride)
image = tf.image.pad_to_bounding_box(image, 0, 0, padded_height, padded_width)
image.set_shape([padded_height, padded_width, 3])
image_info = tf.stack([
tf.cast(scaled_height, dtype=tf.float32),
tf.cast(scaled_width, dtype=tf.float32),
1.0 / scale,
input_height,
input_width]
)
if boxes is not None:
normalized_box_list = preprocessor.box_list.BoxList(boxes)
scaled_boxes = preprocessor.box_list_scale(normalized_box_list, scaled_height, scaled_width).get()
else:
scaled_boxes = None
if masks is not None:
scaled_masks = tf.image.resize(
tf.expand_dims(masks, -1),
[scaled_height, scaled_width],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR
)
# Check if there is any instance in this image or not.
num_masks = tf.shape(input=scaled_masks)[0]
scaled_masks = tf.cond(
pred=tf.greater(num_masks, 0),
true_fn=lambda: tf.image.pad_to_bounding_box(scaled_masks, 0, 0, padded_height, padded_width),
false_fn=lambda: tf.zeros([0, padded_height, padded_width, 1])
)
else:
scaled_masks = None
return image, image_info, scaled_boxes, scaled_masks
def crop_gt_masks(instance_masks, boxes, gt_mask_size, image_size):
"""Crops the ground truth binary masks and resize to fixed-size masks."""
num_masks = tf.shape(input=instance_masks)[0]
scale_sizes = tf.convert_to_tensor(value=[image_size[0], image_size[1]] * 2, dtype=tf.float32)
boxes = boxes / scale_sizes
cropped_gt_masks = tf.image.crop_and_resize(
image=instance_masks,
boxes=boxes,
box_indices=tf.range(num_masks, dtype=tf.int32),
crop_size=[gt_mask_size, gt_mask_size],
method='bilinear')[:, :, :, 0]
cropped_gt_masks = tf.pad(
tensor=cropped_gt_masks,
paddings=tf.constant([[0, 0], [2, 2], [2, 2]]),
mode='CONSTANT',
constant_values=0.
)
return cropped_gt_masks
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_num_instances, dimension].
"""
max_num_instances = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(input=data)[0]
pad_length = max_num_instances - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.reshape(tf.concat([data, paddings], axis=0), output_shape)
return padded_data
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops/preprocess_ops.py |
DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops/__init__.py |
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for bounding box processing."""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
EPSILON = 1e-8
BBOX_XFORM_CLIP = np.log(1000. / 16.)
def jitter_boxes(boxes, noise_scale=0.025):
"""Jitter the box coordinates by some noise distribution.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
noise_scale: a python float which specifies the magnitude of noise. The
rule of thumb is to set this between (0, 0.1]. The default value is found
to mimic the noisy detections best empirically.
Returns:
jittered_boxes: a tensor whose shape is the same as `boxes` representing
the jittered boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('jitter_boxes'):
bbox_jitters = tf.random.normal(boxes.get_shape(), stddev=noise_scale)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
width = xmax - xmin
height = ymax - ymin
new_center_x = (xmin + xmax) / 2.0 + bbox_jitters[..., 0:1] * width
new_center_y = (ymin + ymax) / 2.0 + bbox_jitters[..., 1:2] * height
new_width = width * tf.exp(bbox_jitters[..., 2:3])
new_height = height * tf.exp(bbox_jitters[..., 3:4])
jittered_boxes = tf.concat([
new_center_y - new_height * 0.5,
new_center_x - new_width * 0.5,
new_center_y + new_height * 0.5,
new_center_x + new_width * 0.5], axis=-1)
return jittered_boxes
def normalize_boxes(boxes, image_shape):
"""Converts boxes to the normalized coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
normalized_boxes: a tensor whose shape is the same as `boxes` representing
the normalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('normalize_boxes'):
if isinstance(image_shape, (list, tuple)):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0:1]
width = image_shape[..., 1:2]
ymin = boxes[..., 0:1] / height
xmin = boxes[..., 1:2] / width
ymax = boxes[..., 2:3] / height
xmax = boxes[..., 3:4] / width
normalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return normalized_boxes
def denormalize_boxes(boxes, image_shape):
"""Converts boxes normalized by [height, width] to pixel coordinates.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
denormalized_boxes: a tensor whose shape is the same as `boxes` representing
the denormalized boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
with tf.name_scope('denormalize_boxes'):
if isinstance(image_shape, (list, tuple)):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height, width = tf.split(image_shape, 2, axis=-1)
ymin, xmin, ymax, xmax = tf.split(boxes, 4, axis=-1)
ymin = ymin * height
xmin = xmin * width
ymax = ymax * height
xmax = xmax * width
denormalized_boxes = tf.concat([ymin, xmin, ymax, xmax], axis=-1)
return denormalized_boxes
def clip_boxes(boxes, image_shape):
"""Clips boxes to image boundaries.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
Returns:
clipped_boxes: a tensor whose shape is the same as `boxes` representing the
clipped boxes.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('clip_boxes'):
if isinstance(image_shape, (list, tuple)):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0:1]
width = image_shape[..., 1:2]
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
clipped_ymin = tf.maximum(tf.minimum(ymin, height - 1.0), 0.0)
clipped_ymax = tf.maximum(tf.minimum(ymax, height - 1.0), 0.0)
clipped_xmin = tf.maximum(tf.minimum(xmin, width - 1.0), 0.0)
clipped_xmax = tf.maximum(tf.minimum(xmax, width - 1.0), 0.0)
clipped_boxes = tf.concat(
[clipped_ymin, clipped_xmin, clipped_ymax, clipped_xmax],
axis=-1)
return clipped_boxes
def compute_outer_boxes(boxes, image_shape, scale=1.0):
"""Compute outer box encloses an object with a margin.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
image_shape: a list of two integers, a two-element vector or a tensor such
that all but the last dimensions are `broadcastable` to `boxes`. The last
dimension is 2, which represents [height, width].
scale: a float number specifying the scale of output outer boxes to input
`boxes`.
Returns:
outer_boxes: a tensor whose shape is the same as `boxes` representing the
outer boxes.
"""
if scale < 1.0:
raise ValueError(
'scale is {}, but outer box scale must be greater than 1.0.'.format(
scale))
centers_y = (boxes[..., 0] + boxes[..., 2]) / 2.0
centers_x = (boxes[..., 1] + boxes[..., 3]) / 2.0
box_height = (boxes[..., 2] - boxes[..., 0]) * scale
box_width = (boxes[..., 3] - boxes[..., 1]) * scale
outer_boxes = tf.stack([centers_y - box_height / 2.0,
centers_x - box_width / 2.0,
centers_y + box_height / 2.0,
centers_x + box_width / 2.0], axis=1)
outer_boxes = clip_boxes(outer_boxes, image_shape)
return outer_boxes
def encode_boxes(boxes, anchors, weights=None):
"""Encode boxes to targets.
Args:
boxes: a tensor whose last dimension is 4 representing the coordinates
of boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
encoded box targets.
Raises:
ValueError: If the last dimension of boxes is not 4.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[-1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('encode_boxes'):
boxes = tf.cast(boxes, dtype=anchors.dtype)
ymin = boxes[..., 0:1]
xmin = boxes[..., 1:2]
ymax = boxes[..., 2:3]
xmax = boxes[..., 3:4]
box_h = ymax - ymin + 1.0
box_w = xmax - xmin + 1.0
box_yc = ymin + 0.5 * box_h
box_xc = xmin + 0.5 * box_w
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin + 1.0
anchor_w = anchor_xmax - anchor_xmin + 1.0
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
encoded_dy = (box_yc - anchor_yc) / anchor_h
encoded_dx = (box_xc - anchor_xc) / anchor_w
encoded_dh = tf.math.log(box_h / anchor_h)
encoded_dw = tf.math.log(box_w / anchor_w)
if weights:
encoded_dy *= weights[0]
encoded_dx *= weights[1]
encoded_dh *= weights[2]
encoded_dw *= weights[3]
encoded_boxes = tf.concat(
[encoded_dy, encoded_dx, encoded_dh, encoded_dw],
axis=-1)
return encoded_boxes
def decode_boxes(encoded_boxes, anchors, weights=None):
"""Decode boxes.
Args:
encoded_boxes: a tensor whose last dimension is 4 representing the
coordinates of encoded boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as, or `broadcastable` to `boxes`,
representing the coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
decoded box targets.
"""
if encoded_boxes.shape[-1] != 4:
raise ValueError(
'encoded_boxes.shape[-1] is {:d}, but must be 4.'
.format(encoded_boxes.shape[-1]))
with tf.name_scope('decode_boxes'):
encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)
dy = encoded_boxes[..., 0:1]
dx = encoded_boxes[..., 1:2]
dh = encoded_boxes[..., 2:3]
dw = encoded_boxes[..., 3:4]
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
dh = tf.minimum(dh, BBOX_XFORM_CLIP)
dw = tf.minimum(dw, BBOX_XFORM_CLIP)
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin + 1.0
anchor_w = anchor_xmax - anchor_xmin + 1.0
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
decoded_boxes_yc = dy * anchor_h + anchor_yc
decoded_boxes_xc = dx * anchor_w + anchor_xc
decoded_boxes_h = tf.exp(dh) * anchor_h
decoded_boxes_w = tf.exp(dw) * anchor_w
decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h
decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w
decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h - 1.0
decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w - 1.0
decoded_boxes = tf.concat(
[decoded_boxes_ymin, decoded_boxes_xmin,
decoded_boxes_ymax, decoded_boxes_xmax],
axis=-1)
return decoded_boxes
def filter_boxes(boxes, scores, image_shape, min_size_threshold):
"""Filter and remove boxes that are too small or fall outside the image.
Args:
boxes: a tensor whose last dimension is 4 representing the
coordinates of boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
image_shape: a tensor whose shape is the same as, or `broadcastable` to
`boxes` except the last dimension, which is 2, representing
[height, width] of the scaled image.
min_size_threshold: a float representing the minimal box size in each
side (w.r.t. the scaled image). Boxes whose sides are smaller than it will
be filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with 0.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the positinon of the filtered boxes filled with 0.
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('filter_boxes'):
if isinstance(image_shape, (list, tuple)):
height, width = image_shape
else:
image_shape = tf.cast(image_shape, dtype=boxes.dtype)
height = image_shape[..., 0]
width = image_shape[..., 1]
ymin = boxes[..., 0]
xmin = boxes[..., 1]
ymax = boxes[..., 2]
xmax = boxes[..., 3]
h = ymax - ymin + 1.0
w = xmax - xmin + 1.0
yc = ymin + 0.5 * h
xc = xmin + 0.5 * w
min_size = tf.cast(tf.maximum(min_size_threshold, 1.0), dtype=boxes.dtype)
filtered_size_mask = tf.logical_and(
tf.greater(h, min_size), tf.greater(w, min_size))
filtered_center_mask = tf.logical_and(
tf.logical_and(tf.greater(yc, 0.0), tf.less(yc, height)),
tf.logical_and(tf.greater(xc, 0.0), tf.less(xc, width)))
filtered_mask = tf.logical_and(filtered_size_mask, filtered_center_mask)
filtered_scores = tf.where(filtered_mask, scores, tf.zeros_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def filter_boxes_by_scores(boxes, scores, min_score_threshold):
"""Filter and remove boxes whose scores are smaller than the threshold.
Args:
boxes: a tensor whose last dimension is 4 representing the
coordinates of boxes in ymin, xmin, ymax, xmax order.
scores: a tensor whose shape is the same as tf.shape(boxes)[:-1]
representing the original scores of the boxes.
min_score_threshold: a float representing the minimal box score threshold.
Boxes whose score are smaller than it will be filtered out.
Returns:
filtered_boxes: a tensor whose shape is the same as `boxes` but with
the position of the filtered boxes are filled with 0.
filtered_scores: a tensor whose shape is the same as 'scores' but with
the
"""
if boxes.shape[-1] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[-1]))
with tf.name_scope('filter_boxes_by_scores'):
filtered_mask = tf.greater(scores, min_score_threshold)
filtered_scores = tf.where(filtered_mask, scores, tf.zeros_like(scores))
filtered_boxes = tf.cast(
tf.expand_dims(filtered_mask, axis=-1), dtype=boxes.dtype) * boxes
return filtered_boxes, filtered_scores
def top_k_boxes(boxes, scores, k):
"""Sort and select top k boxes according to the scores.
Args:
boxes: a tensor of shape [batch_size, N, 4] representing the coordiante of
the boxes. N is the number of boxes per image.
scores: a tensor of shsape [batch_size, N] representing the socre of the
boxes.
k: an integer or a tensor indicating the top k number.
Returns:
selected_boxes: a tensor of shape [batch_size, k, 4] representing the
selected top k box coordinates.
selected_scores: a tensor of shape [batch_size, k] representing the selected
top k box scores.
"""
with tf.name_scope('top_k_boxes'):
selected_scores, top_k_indices = tf.nn.top_k(scores, k=k, sorted=True)
batch_size, _ = scores.get_shape().as_list()
if batch_size == 1:
selected_boxes = tf.squeeze(
tf.gather(boxes, top_k_indices, axis=1), axis=1)
else:
top_k_indices_shape = tf.shape(input=top_k_indices)
batch_indices = (
tf.expand_dims(tf.range(top_k_indices_shape[0]), axis=-1) *
tf.ones([1, top_k_indices_shape[-1]], dtype=tf.int32))
gather_nd_indices = tf.stack([batch_indices, top_k_indices], axis=-1)
selected_boxes = tf.gather_nd(boxes, gather_nd_indices)
return selected_boxes, selected_scores
def bbox_overlap(boxes, gt_boxes):
"""Calculates the overlap between proposal and ground truth boxes.
Some `gt_boxes` may have been padded. The returned `iou` tensor for these
boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = tf.maximum(bb_x_min, tf.transpose(a=gt_x_min, perm=[0, 2, 1]))
i_xmax = tf.minimum(bb_x_max, tf.transpose(a=gt_x_max, perm=[0, 2, 1]))
i_ymin = tf.maximum(bb_y_min, tf.transpose(a=gt_y_min, perm=[0, 2, 1]))
i_ymax = tf.minimum(bb_y_max, tf.transpose(a=gt_y_max, perm=[0, 2, 1]))
i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0)
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(a=gt_area, perm=[0, 2, 1]) - i_area + 1e-8
# Calculates IoU.
iou = i_area / u_area
# Fills -1 for padded ground truth boxes.
padding_mask = tf.less(i_xmin, tf.zeros_like(i_xmin))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
return iou
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops/box_utils.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert raw COCO dataset to TFRecord for object_detection.
Example usage:
python create_coco_tf_record.py --logtostderr \
--train_image_dir="${TRAIN_IMAGE_DIR}" \
--val_image_dir="${VAL_IMAGE_DIR}" \
--test_image_dir="${TEST_IMAGE_DIR}" \
--train_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--val_annotations_file="${VAL_ANNOTATIONS_FILE}" \
--testdev_annotations_file="${TESTDEV_ANNOTATIONS_FILE}" \
--output_dir="${OUTPUT_DIR}"
"""
from __future__ import absolute_import, division, print_function
import collections
import hashlib
import io
import json
import logging
import multiprocessing
import os
import PIL.Image
import numpy as np
import tensorflow as tf
from absl import app, flags
from pycocotools import mask
from research.object_detection.utils import dataset_util, label_map_util
flags.DEFINE_boolean('include_masks', False,
'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: False.')
flags.DEFINE_string('train_image_dir', '', 'Training image directory.')
flags.DEFINE_string('val_image_dir', '', 'Validation image directory.')
flags.DEFINE_string('test_image_dir', '', 'Test image directory.')
flags.DEFINE_string('train_object_annotations_file', '', '')
flags.DEFINE_string('val_object_annotations_file', '', '')
flags.DEFINE_string('train_caption_annotations_file', '', '')
flags.DEFINE_string('val_caption_annotations_file', '', '')
flags.DEFINE_string('testdev_annotations_file', '',
'Test-dev annotations JSON file.')
flags.DEFINE_string('output_dir', '/tmp/', 'Output data directory.')
FLAGS = flags.FLAGS
def create_tf_example(image,
bbox_annotations,
caption_annotations,
image_dir,
category_index,
include_masks=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys:
[u'license', u'file_name', u'coco_url', u'height', u'width',
u'date_captured', u'flickr_url', u'id']
bbox_annotations:
list of dicts with keys:
[u'segmentation', u'area', u'iscrowd', u'image_id',
u'bbox', u'category_id', u'id']
Notice that bounding box coordinates in the official COCO dataset are
given as [x, y, width, height] tuples using absolute coordinates where
x, y represent the top-left (0-indexed) corner. This function converts
to the format expected by the Tensorflow Object Detection API (which is
which is [ymin, xmin, ymax, xmax] with coordinates normalized relative
to image size).
image_dir: directory containing the image files.
category_index: a dict containing COCO category information keyed
by the 'id' field of each category. See the
label_map_util.create_category_index function.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
key = hashlib.sha256(encoded_jpg).hexdigest()
xmin = []
xmax = []
ymin = []
ymax = []
is_crowd = []
category_names = []
category_ids = []
area = []
encoded_mask_png = []
num_annotations_skipped = 0
for object_annotations in bbox_annotations:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
xmin.append(float(x) / image_width)
xmax.append(float(x + width) / image_width)
ymin.append(float(y) / image_height)
ymax.append(float(y + height) / image_height)
is_crowd.append(object_annotations['iscrowd'])
category_id = int(object_annotations['category_id'])
category_ids.append(category_id)
category_names.append(category_index[category_id]['name'].encode('utf8'))
area.append(object_annotations['area'])
if include_masks:
run_len_encoding = mask.frPyObjects(object_annotations['segmentation'],
image_height, image_width)
binary_mask = mask.decode(run_len_encoding)
if not object_annotations['iscrowd']:
binary_mask = np.amax(binary_mask, axis=2)
pil_image = PIL.Image.fromarray(binary_mask)
output_io = io.BytesIO()
pil_image.save(output_io, format='PNG')
encoded_mask_png.append(output_io.getvalue())
captions = []
for caption_annotation in caption_annotations:
captions.append(caption_annotation['caption'].encode('utf8'))
feature_dict = {
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/filename':
dataset_util.bytes_feature(filename.encode('utf8')),
'image/source_id':
dataset_util.bytes_feature(str(image_id).encode('utf8')),
'image/key/sha256':
dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded':
dataset_util.bytes_feature(encoded_jpg),
'image/caption':
dataset_util.bytes_list_feature(captions),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin':
dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax':
dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin':
dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax':
dataset_util.float_list_feature(ymax),
'image/object/class/text':
dataset_util.bytes_list_feature(category_names),
'image/object/class/label':
dataset_util.int64_list_feature(category_ids),
'image/object/is_crowd':
dataset_util.int64_list_feature(is_crowd),
'image/object/area':
dataset_util.float_list_feature(area),
}
if include_masks:
feature_dict['image/object/mask'] = (
dataset_util.bytes_list_feature(encoded_mask_png))
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return key, example, num_annotations_skipped
def _pool_create_tf_example(args):
return create_tf_example(*args)
def _load_object_annotations(object_annotations_file):
with tf.io.gfile.GFile(object_annotations_file, 'r') as fid:
obj_annotations = json.load(fid)
images = obj_annotations['images']
category_index = label_map_util.create_category_index(
obj_annotations['categories'])
img_to_obj_annotation = collections.defaultdict(list)
logging.info('Building bounding box index.')
for annotation in obj_annotations['annotations']:
image_id = annotation['image_id']
img_to_obj_annotation[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in img_to_obj_annotation:
missing_annotation_count += 1
logging.info('%d images are missing bboxes.', missing_annotation_count)
return images, img_to_obj_annotation, category_index
def _load_caption_annotations(caption_annotations_file):
with tf.io.gfile.GFile(caption_annotations_file, 'r') as fid:
caption_annotations = json.load(fid)
img_to_caption_annotation = collections.defaultdict(list)
logging.info('Building caption index.')
for annotation in caption_annotations['annotations']:
image_id = annotation['image_id']
img_to_caption_annotation[image_id].append(annotation)
missing_annotation_count = 0
images = caption_annotations['images']
for image in images:
image_id = image['id']
if image_id not in img_to_caption_annotation:
missing_annotation_count += 1
logging.info('%d images are missing captions.', missing_annotation_count)
return img_to_caption_annotation
def _create_tf_record_from_coco_annotations(
object_annotations_file,
caption_annotations_file,
image_dir, output_path, include_masks, num_shards):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
object_annotations_file: JSON file containing bounding box annotations.
caption_annotations_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
num_shards: Number of output files to create.
"""
logging.info('writing to output path: %s', output_path)
writers = [
tf.io.TFRecordWriter(output_path + '-%05d-of-%05d.tfrecord' %
(i, num_shards)) for i in range(num_shards)
]
images, img_to_obj_annotation, category_index = (
_load_object_annotations(object_annotations_file))
img_to_caption_annotation = (
_load_caption_annotations(caption_annotations_file))
pool = multiprocessing.Pool()
total_num_annotations_skipped = 0
for idx, (_, tf_example, num_annotations_skipped) in enumerate(
pool.imap(_pool_create_tf_example,
[(image,
img_to_obj_annotation[image['id']],
img_to_caption_annotation[image['id']],
image_dir,
category_index,
include_masks)
for image in images])):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(images))
total_num_annotations_skipped += num_annotations_skipped
writers[idx % num_shards].write(tf_example.SerializeToString())
pool.close()
pool.join()
for writer in writers:
writer.close()
logging.info('Finished writing, skipped %d annotations.',
total_num_annotations_skipped)
def main(_):
assert FLAGS.train_image_dir, '`train_image_dir` missing.'
assert FLAGS.val_image_dir, '`val_image_dir` missing.'
assert FLAGS.test_image_dir, '`test_image_dir` missing.'
if not tf.io.gfile.isdir(FLAGS.output_dir):
tf.io.gfile.makedirs(FLAGS.output_dir)
train_output_path = os.path.join(FLAGS.output_dir, 'train')
val_output_path = os.path.join(FLAGS.output_dir, 'val')
testdev_output_path = os.path.join(FLAGS.output_dir, 'test-dev')
_create_tf_record_from_coco_annotations(
FLAGS.train_object_annotations_file,
FLAGS.train_caption_annotations_file,
FLAGS.train_image_dir,
train_output_path,
FLAGS.include_masks,
num_shards=256)
_create_tf_record_from_coco_annotations(
FLAGS.val_object_annotations_file,
FLAGS.val_caption_annotations_file,
FLAGS.val_image_dir,
val_output_path,
FLAGS.include_masks,
num_shards=32)
if __name__ == '__main__':
app.run(main)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/dataset/create_coco_tf_record.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script that simplifies running training benchmark """
import argparse
import os
import shutil
import subprocess
from pathlib import Path
LOCK_FILE = Path('/tmp/mrcnn_tf2.lock')
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
if __name__ == '__main__':
# CLI flags
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description=(
'NVIDIA MaskRCNN TF2 train benchmark'
'\n\nNote: Any additional flags not specified below will be passed to main.py'
),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100)
)
parser.add_argument('--gpus', type=int, metavar='N',
help='Number of GPU\'s. Defaults to all available')
parser.add_argument('--batch_size', type=int, required=True,
help='Batch size used during training')
parser.add_argument('--amp', action='store_true',
help='Enable automatic mixed precision')
parser.add_argument('--no_xla', action='store_true',
help='Disables XLA - accelerated linear algebra')
parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data',
help='Input directory containing the dataset')
parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights',
help='Directory containing pre-trained resnet weights')
parser.add_argument('--slurm_lock', action='store_true',
help='Prevent this script from being launched multiple times when used in multi-gpu slurm setup')
flags, remainder = parser.parse_known_args()
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py'))
checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp")
# build command
cmd = (
f'python {main_path}'
f' train'
f' --data_dir "{flags.data_dir}"'
f' --backbone_checkpoint "{checkpoint_path}"'
f' --epochs 1'
f' --steps_per_epoch 200'
f' --log_every 10'
f' --train_batch_size {flags.batch_size}'
)
if not flags.no_xla:
cmd += ' --xla'
if flags.amp:
cmd += ' --amp'
if remainder:
cmd += ' ' + ' '.join(remainder)
if flags.gpus is not None:
cmd = f'CUDA_VISIBLE_DEVICES={",".join(map(str, range(flags.gpus)))} ' + cmd
# print command
line = '-' * shutil.get_terminal_size()[0]
print(line, cmd, line, sep='\n', flush=True)
# acquire lock if --slurm_lock is provided
try:
flags.slurm_lock and LOCK_FILE.touch(exist_ok=False)
except FileExistsError:
print(f'Failed to acquire lock ({LOCK_FILE}) - skipping')
exit(0)
# run model
code = subprocess.call(cmd, shell=True)
flags.slurm_lock and LOCK_FILE.unlink()
exit(code)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/scripts/benchmark_training.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script that simplifies running evaluation benchmark """
import argparse
import os
import shutil
import subprocess
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
if __name__ == '__main__':
# CLI flags
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description=(
'NVIDIA MaskRCNN TF2 evaluation benchmark'
'\n\nNote: Any additional flags not specified below will be passed to main.py'
),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100)
)
parser.add_argument('--batch_size', type=int, required=True,
help='Batch size used during training')
parser.add_argument('--amp', action='store_true',
help='Enable automatic mixed precision')
parser.add_argument('--no_xla', action='store_true',
help='Disables XLA - accelerated linear algebra')
parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data',
help='Input directory containing the dataset')
parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights',
help='Directory containing pre-trained resnet weights')
flags, remainder = parser.parse_known_args()
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py'))
checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp")
# build command
cmd = (
f'python {main_path}'
f' infer'
f' --data_dir "{flags.data_dir}"'
f' --backbone_checkpoint "{checkpoint_path}"'
f' --eval_samples {200 * flags.batch_size}'
f' --log_warmup_steps 100'
f' --log_every 10'
f' --eval_batch_size {flags.batch_size}'
)
if not flags.no_xla:
cmd += ' --xla'
if flags.amp:
cmd += ' --amp'
if remainder:
cmd += ' ' + ' '.join(remainder)
# print command
line = '-' * shutil.get_terminal_size()[0]
print(line, cmd, line, sep='\n', flush=True)
# run model
exit(subprocess.call(cmd, shell=True))
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/scripts/benchmark_inference.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Scripts that downloads pretrained weights for ResNet50 backbone. """
import argparse
from os import path
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
RESNET_NAME = 'NVIDIA ResNet50 v1.5'
RESNET_URL = 'https://api.ngc.nvidia.com/v2/models/nvidia/rn50_tf_amp_ckpt/versions/20.06.0/zip'
RESNET_DIR = 'rn50_tf_amp_ckpt_v20.06.0'
if __name__ == '__main__':
# cli
parser = argparse.ArgumentParser(
description='NVIDIA MaskRCNN TF2 backbone checkpoint download and conversion'
)
parser.add_argument('--save_dir', type=str, default='/weights',
help='Directory to which the checkpoint will be saved')
parser.add_argument('--download_url', type=str, default=RESNET_URL,
help='Override checkpoint download url')
params = parser.parse_args()
resnet_dir = path.join(params.save_dir, RESNET_DIR)
# download and extract
print(f'Downloading and extracting {RESNET_NAME} checkpoint from {params.download_url}')
with urlopen(params.download_url) as zip_stream:
with ZipFile(BytesIO(zip_stream.read())) as zip_file:
zip_file.extractall(resnet_dir)
print(f'{RESNET_NAME} checkpoint was extracted to {resnet_dir}')
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/scripts/download_weights.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script that simplifies model training followed by evaluation. """
import argparse
import os
import shutil
import subprocess
from pathlib import Path
LOCK_FILE = Path('/tmp/mrcnn_tf2.lock')
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
if __name__ == '__main__':
# CLI flags
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description=(
'NVIDIA MaskRCNN TF2 train'
'\n\nNote: Any additional flags not specified below will be passed to main.py'
),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100)
)
parser.add_argument('--gpus', type=int, metavar='N',
help='Number of GPU\'s. Defaults to all available')
parser.add_argument('--batch_size', type=int, metavar='N', default=4,
help='Batch size used during training')
parser.add_argument('--amp', action='store_true',
help='Enable automatic mixed precision')
parser.add_argument('--no_xla', action='store_true',
help='Disables XLA - accelerated linear algebra')
parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data',
help='Input directory containing the dataset')
parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights',
help='Directory containing pre-trained resnet weights')
parser.add_argument('--slurm_lock', action='store_true',
help='Prevent this script from being launched multiple times when used in multi-gpu slurm setup')
parser.add_argument('--no_eval', action='store_true', help='Disables evaluation after training.')
flags, remainder = parser.parse_known_args()
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py'))
checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp")
# build commands
cmd_train = (
f'python {main_path}'
f' train'
f' --data_dir "{flags.data_dir}"'
f' --backbone_checkpoint "{checkpoint_path}"'
f' --train_batch_size {flags.batch_size}'
)
cmd_eval = (
f'python {main_path}'
f' eval'
f' --data_dir "{flags.data_dir}"'
f' --eval_file "{os.path.join(flags.data_dir, "annotations/instances_val2017.json")}"'
)
if not flags.no_xla:
cmd_train += ' --xla'
cmd_eval += ' --xla'
if flags.amp:
cmd_train += ' --amp'
cmd_eval += ' --amp'
if remainder:
cmd_train += ' ' + ' '.join(remainder)
cmd_eval += ' ' + ' '.join(remainder)
if flags.gpus is not None:
cmd_train = f'CUDA_VISIBLE_DEVICES={",".join(map(str, range(flags.gpus)))} ' + cmd_train
# print command
line = '-' * shutil.get_terminal_size()[0]
print(line, cmd_train, line, sep='\n', flush=True)
# acquire lock if --slurm_lock is provided
try:
flags.slurm_lock and LOCK_FILE.touch(exist_ok=False)
except FileExistsError:
print(f'Failed to acquire lock ({LOCK_FILE}) - skipping')
exit(0)
# run training
code = subprocess.call(cmd_train, shell=True)
# evaluation
if not code and not flags.no_eval:
print(line, cmd_eval, line, sep='\n', flush=True)
code = subprocess.call(cmd_eval, shell=True)
flags.slurm_lock and LOCK_FILE.unlink()
exit(code)
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/scripts/train.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script that simplifies inference. """
import argparse
import os
import shutil
import subprocess
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
if __name__ == '__main__':
# CLI flags
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description=(
'NVIDIA MaskRCNN TF2 inference'
'\n\nNote: Any additional flags not specified below will be passed to main.py'
),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100)
)
parser.add_argument('--batch_size', type=int, metavar='N', default=8,
help='Batch size used during inference')
parser.add_argument('--amp', action='store_true',
help='Enable automatic mixed precision')
parser.add_argument('--no_xla', action='store_true',
help='Disables XLA - accelerated linear algebra')
parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data',
help='Input directory containing the dataset')
parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights',
help='Directory containing pre-trained resnet weights')
flags, remainder = parser.parse_known_args()
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py'))
checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp")
# build command
cmd = (
f'python {main_path}'
f' infer'
f' --data_dir "{flags.data_dir}"'
f' --backbone_checkpoint "{checkpoint_path}"'
f' --eval_batch_size {flags.batch_size}'
)
if not flags.no_xla:
cmd += ' --xla'
if flags.amp:
cmd += ' --amp'
if remainder:
cmd += ' ' + ' '.join(remainder)
# print command
line = '-' * shutil.get_terminal_size()[0]
print(line, cmd, line, sep='\n', flush=True)
# run model
exit(subprocess.call(cmd, shell=True))
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/scripts/inference.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script that simplifies evaluation. """
import argparse
import os
import shutil
import subprocess
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
if __name__ == '__main__':
# CLI flags
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description=(
'NVIDIA MaskRCNN TF2 evaluation'
'\n\nNote: Any additional flags not specified below will be passed to main.py'
),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100)
)
parser.add_argument('--batch_size', type=int, metavar='N', default=8,
help='Batch size used during evaluation')
parser.add_argument('--amp', action='store_true',
help='Enable automatic mixed precision')
parser.add_argument('--no_xla', action='store_true',
help='Disables XLA - accelerated linear algebra')
parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data',
help='Input directory containing the dataset')
parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights',
help='Directory containing pre-trained resnet weights')
flags, remainder = parser.parse_known_args()
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py'))
checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp")
# build command
cmd = (
f'python {main_path}'
f' eval'
f' --data_dir "{flags.data_dir}"'
f' --eval_file "{os.path.join(flags.data_dir, "annotations/instances_val2017.json")}"'
f' --backbone_checkpoint "{checkpoint_path}"'
f' --eval_batch_size {flags.batch_size}'
)
if not flags.no_xla:
cmd += ' --xla'
if flags.amp:
cmd += ' --amp'
if remainder:
cmd += ' ' + ' '.join(remainder)
# print command
line = '-' * shutil.get_terminal_size()[0]
print(line, cmd, line, sep='\n', flush=True)
# run model
exit(subprocess.call(cmd, shell=True))
| DeepLearningExamples-master | TensorFlow2/Segmentation/MaskRCNN/scripts/evaluate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
PARSER = argparse.ArgumentParser(description="U-Net medical")
PARSER.add_argument('--data_dir',
type=str,
default='./data',
help="""Directory where to download the dataset""")
def main():
FLAGS = PARSER.parse_args()
if not os.path.exists(FLAGS.data_dir):
os.makedirs(FLAGS.data_dir)
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-volume.tif -P {}'.format(FLAGS.data_dir))
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-labels.tif -P {}'.format(FLAGS.data_dir))
os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/test-volume.tif -P {}'.format(FLAGS.data_dir))
print("Finished downloading files for U-Net medical to {}".format(FLAGS.data_dir))
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/download_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point of the application.
This file serves as entry point to the run of UNet for segmentation of neuronal processes.
Example:
Training can be adjusted by modifying the arguments specified below::
$ python main.py --exec_mode train --model_dir /dataset ...
"""
import horovod.tensorflow as hvd
from model.unet import Unet
from runtime.run import train, evaluate, predict
from runtime.setup import get_logger, set_flags, prepare_model_dir
from runtime.arguments import PARSER, parse_args
from data_loading.data_loader import Dataset
def main():
"""
Starting point of the application
"""
hvd.init()
params = parse_args(PARSER.parse_args())
set_flags(params)
model_dir = prepare_model_dir(params)
params.model_dir = model_dir
logger = get_logger(params)
model = Unet()
dataset = Dataset(data_dir=params.data_dir,
batch_size=params.batch_size,
fold=params.fold,
augment=params.augment,
gpu_id=hvd.rank(),
num_gpus=hvd.size(),
seed=params.seed,
amp=params.use_amp)
if 'train' in params.exec_mode:
train(params, model, dataset, logger)
if 'evaluate' in params.exec_mode:
if hvd.rank() == 0:
evaluate(params, model, dataset, logger)
if 'predict' in params.exec_mode:
if hvd.rank() == 0:
predict(params, model, dataset, logger)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/main.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Dataset class encapsulates the data loading"""
import multiprocessing
import os
from collections import deque
import numpy as np
import tensorflow as tf
from PIL import Image, ImageSequence
class Dataset:
"""Load, separate and prepare the data for training and prediction"""
def __init__(self, data_dir, batch_size, fold, augment=False, gpu_id=0, num_gpus=1, seed=0, amp=False):
if not os.path.exists(data_dir):
raise FileNotFoundError('Cannot find data dir: {}'.format(data_dir))
self._data_dir = data_dir
self._batch_size = batch_size
self._augment = augment
self.precision = tf.float16 if amp else tf.float32
self._seed = seed
images = self._load_multipage_tiff(os.path.join(self._data_dir, 'train-volume.tif'))
masks = self._load_multipage_tiff(os.path.join(self._data_dir, 'train-labels.tif'))
self._test_images = \
self._load_multipage_tiff(os.path.join(self._data_dir, 'test-volume.tif'))
train_indices, val_indices = self._get_val_train_indices(len(images), fold)
self._train_images = images[train_indices]
self._train_masks = masks[train_indices]
self._val_images = images[val_indices]
self._val_masks = masks[val_indices]
self._num_gpus = num_gpus
self._gpu_id = gpu_id
@property
def train_size(self):
return len(self._train_images)
@property
def eval_size(self):
return len(self._val_images)
@property
def test_size(self):
return len(self._test_images)
def _load_multipage_tiff(self, path):
"""Load tiff images containing many images in the channel dimension"""
return np.array([np.array(p) for p in ImageSequence.Iterator(Image.open(path))])
def _get_val_train_indices(self, length, fold, ratio=0.8):
assert 0 < ratio <= 1, "Train/total data ratio must be in range (0.0, 1.0]"
np.random.seed(self._seed)
indices = np.arange(0, length, 1, dtype=np.int)
np.random.shuffle(indices)
if fold is not None:
indices = deque(indices)
indices.rotate(fold * int((1.0 - ratio) * length))
indices = np.array(indices)
train_indices = indices[:int(ratio * len(indices))]
val_indices = indices[int(ratio * len(indices)):]
else:
train_indices = indices
val_indices = []
return train_indices, val_indices
def _normalize_inputs(self, inputs):
"""Normalize inputs"""
inputs = tf.expand_dims(tf.cast(inputs, tf.float32), -1)
# Center around zero
inputs = tf.divide(inputs, 127.5) - 1
# Resize to match output size
inputs = tf.image.resize(inputs, (388, 388))
return tf.image.resize_with_crop_or_pad(inputs, 572, 572)
def _normalize_labels(self, labels):
"""Normalize labels"""
labels = tf.expand_dims(tf.cast(labels, tf.float32), -1)
labels = tf.divide(labels, 255)
# Resize to match output size
labels = tf.image.resize(labels, (388, 388))
labels = tf.image.resize_with_crop_or_pad(labels, 572, 572)
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(input=labels)))
labels = tf.where(cond, tf.zeros(tf.shape(input=labels)), tf.ones(tf.shape(input=labels)))
return tf.one_hot(tf.squeeze(tf.cast(labels, tf.int32)), 2)
@tf.function
def _preproc_samples(self, inputs, labels, augment=True):
"""Preprocess samples and perform random augmentations"""
inputs = self._normalize_inputs(inputs)
labels = self._normalize_labels(labels)
if self._augment and augment:
# Horizontal flip
h_flip = tf.random.uniform([]) > 0.5
inputs = tf.cond(pred=h_flip, true_fn=lambda: tf.image.flip_left_right(inputs), false_fn=lambda: inputs)
labels = tf.cond(pred=h_flip, true_fn=lambda: tf.image.flip_left_right(labels), false_fn=lambda: labels)
# Vertical flip
v_flip = tf.random.uniform([]) > 0.5
inputs = tf.cond(pred=v_flip, true_fn=lambda: tf.image.flip_up_down(inputs), false_fn=lambda: inputs)
labels = tf.cond(pred=v_flip, true_fn=lambda: tf.image.flip_up_down(labels), false_fn=lambda: labels)
# Prepare for batched transforms
inputs = tf.expand_dims(inputs, 0)
labels = tf.expand_dims(labels, 0)
# Random crop and resize
left = tf.random.uniform([]) * 0.3
right = 1 - tf.random.uniform([]) * 0.3
top = tf.random.uniform([]) * 0.3
bottom = 1 - tf.random.uniform([]) * 0.3
inputs = tf.image.crop_and_resize(inputs, [[top, left, bottom, right]], [0], (572, 572))
labels = tf.image.crop_and_resize(labels, [[top, left, bottom, right]], [0], (572, 572))
# Gray value variations
# Adjust brightness and keep values in range
inputs = tf.image.random_brightness(inputs, max_delta=0.2)
inputs = tf.clip_by_value(inputs, clip_value_min=-1, clip_value_max=1)
inputs = tf.squeeze(inputs, 0)
labels = tf.squeeze(labels, 0)
# Bring back labels to network's output size and remove interpolation artifacts
labels = tf.image.resize_with_crop_or_pad(labels, target_width=388, target_height=388)
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(input=labels)))
labels = tf.where(cond, tf.zeros(tf.shape(input=labels)), tf.ones(tf.shape(input=labels)))
return tf.cast(inputs, self.precision), labels
@tf.function
def _preproc_eval_samples(self, inputs, labels):
"""Preprocess samples and perform random augmentations"""
inputs = self._normalize_inputs(inputs)
labels = self._normalize_labels(labels)
# Bring back labels to network's output size and remove interpolation artifacts
labels = tf.image.resize_with_crop_or_pad(labels, target_width=388, target_height=388)
cond = tf.less(labels, 0.5 * tf.ones(tf.shape(input=labels)))
labels = tf.where(cond, tf.zeros(tf.shape(input=labels)), tf.ones(tf.shape(input=labels)))
return tf.cast(inputs, self.precision), labels
@tf.function
def _preproc_test_samples(self, inputs):
inputs = self._normalize_inputs(inputs)
return tf.cast(inputs, self.precision)
def train_fn(self, drop_remainder=False):
"""Input function for training"""
dataset = tf.data.Dataset.from_tensor_slices(
(self._train_images, self._train_masks))
dataset = dataset.shard(self._num_gpus, self._gpu_id)
dataset = dataset.repeat()
dataset = dataset.shuffle(self._batch_size * 3)
dataset = dataset.map(self._preproc_samples,
num_parallel_calls=multiprocessing.cpu_count()//self._num_gpus)
dataset = dataset.batch(self._batch_size, drop_remainder=drop_remainder)
dataset = dataset.prefetch(self._batch_size)
return dataset
def eval_fn(self, count, drop_remainder=False):
"""Input function for validation"""
dataset = tf.data.Dataset.from_tensor_slices(
(self._val_images, self._val_masks))
dataset = dataset.repeat(count=count)
dataset = dataset.map(self._preproc_eval_samples,
num_parallel_calls=multiprocessing.cpu_count())
dataset = dataset.batch(self._batch_size, drop_remainder=drop_remainder)
dataset = dataset.prefetch(self._batch_size)
return dataset
def test_fn(self, count, drop_remainder=False):
"""Input function for testing"""
dataset = tf.data.Dataset.from_tensor_slices(
self._test_images)
dataset = dataset.repeat(count=count)
dataset = dataset.map(self._preproc_test_samples)
dataset = dataset.batch(self._batch_size, drop_remainder=drop_remainder)
dataset = dataset.prefetch(self._batch_size)
return dataset
def synth_fn(self):
"""Synthetic data function for testing"""
inputs = tf.random.truncated_normal((572, 572, 1), dtype=tf.float32, mean=127.5, stddev=1, seed=self._seed,
name='synth_inputs')
masks = tf.random.truncated_normal((388, 388, 2), dtype=tf.float32, mean=0.01, stddev=0.1, seed=self._seed,
name='synth_masks')
dataset = tf.data.Dataset.from_tensors((inputs, masks))
dataset = dataset.cache()
dataset = dataset.repeat()
dataset = dataset.batch(self._batch_size)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return dataset
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/data_loading/data_loader.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from time import time
import numpy as np
from PIL import Image
import horovod.tensorflow as hvd
import tensorflow as tf
from runtime.losses import partial_losses
from runtime.parse_results import process_performance_stats
from model.tf_trt import export_model, TFTRTModel
def train(params, model, dataset, logger):
np.random.seed(params.seed)
tf.random.set_seed(params.seed)
max_steps = params.max_steps // hvd.size()
optimizer = tf.keras.optimizers.Adam(learning_rate=params.learning_rate)
if params.use_amp:
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer, dynamic=True)
ce_loss = tf.keras.metrics.Mean(name='ce_loss')
f1_loss = tf.keras.metrics.Mean(name='dice_loss')
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
if params.resume_training and params.model_dir:
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir))
@tf.function
def train_step(features, labels, warmup_batch=False):
with tf.GradientTape() as tape:
output_map = model(features)
crossentropy_loss, dice_loss = partial_losses(output_map, labels)
added_losses = tf.add(crossentropy_loss, dice_loss, name="total_loss_ref")
loss = added_losses + params.weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model.trainable_variables
if 'batch_normalization' not in v.name])
if params.use_amp:
loss = optimizer.get_scaled_loss(loss)
tape = hvd.DistributedGradientTape(tape)
gradients = tape.gradient(loss, model.trainable_variables)
if params.use_amp:
gradients = optimizer.get_unscaled_gradients(gradients)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# Note: broadcast should be done after the first gradient step to ensure optimizer
# initialization.
if warmup_batch:
hvd.broadcast_variables(model.variables, root_rank=0)
hvd.broadcast_variables(optimizer.variables(), root_rank=0)
ce_loss(crossentropy_loss)
f1_loss(dice_loss)
return loss
if params.benchmark:
assert max_steps * hvd.size() > params.warmup_steps, \
"max_steps value has to be greater than warmup_steps"
timestamps = []
for iteration, (images, labels) in enumerate(dataset.train_fn(drop_remainder=True)):
loss = train_step(images, labels, warmup_batch=iteration == 0).numpy()
if iteration > params.warmup_steps:
timestamps.append(time())
if iteration >= max_steps * hvd.size():
break
if hvd.rank() == 0:
deltas = np.array([timestamps[i + 1] - timestamps[i] for i in range(len(timestamps) - 1)])
stats = process_performance_stats(deltas, hvd.size() * params.batch_size, mode="train")
logger.log(step=(), data=stats)
else:
for iteration, (images, labels) in enumerate(dataset.train_fn()):
train_step(images, labels, warmup_batch=iteration == 0)
if hvd.rank() == 0:
if iteration % params.log_every == 0:
logger.log(step=(iteration, max_steps),
data={"train_ce_loss": float(ce_loss.result()),
"train_dice_loss": float(f1_loss.result()),
"train_total_loss": float(f1_loss.result() + ce_loss.result())})
if (params.evaluate_every > 0) and (iteration % params.evaluate_every == 0):
evaluate(params, model, dataset, logger, restore_checkpoint=False)
f1_loss.reset_states()
ce_loss.reset_states()
if iteration >= max_steps:
break
if hvd.rank() == 0:
checkpoint.save(file_prefix=os.path.join(params.model_dir, "checkpoint"))
if params.use_savedmodel:
prec = 'amp' if params.use_amp else 'fp32'
model.save(os.path.join(params.model_dir, f'saved_model_{prec}'))
if params.use_tftrt:
export_model(params.model_dir, prec, os.path.join(params.model_dir, f'tf-trt_model_{prec}'))
logger.flush()
def evaluate(params, model, dataset, logger, restore_checkpoint=True):
if params.fold is None:
print("No fold specified for evaluation. Please use --fold [int] to select a fold.")
ce_loss = tf.keras.metrics.Mean(name='ce_loss')
f1_loss = tf.keras.metrics.Mean(name='dice_loss')
if params.model_dir and restore_checkpoint:
prec = 'amp' if params.use_amp else 'fp32'
if params.use_savedmodel:
model = tf.keras.models.load_model(os.path.join(params.model_dir, f'saved_model_{prec}'))
elif params.use_tftrt:
model = TFTRTModel(model_dir=params.model_dir, precision=prec)
else:
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir)).expect_partial()
def validation_step(features, labels):
output_map = model(features, training=False)
crossentropy_loss, dice_loss = partial_losses(output_map, labels)
ce_loss(crossentropy_loss)
f1_loss(dice_loss)
for iteration, (images, labels) in enumerate(dataset.eval_fn(count=1)):
validation_step(images, labels)
if iteration >= dataset.eval_size // params.batch_size:
break
if dataset.eval_size > 0:
logger.log(step=(),
data={"eval_ce_loss": float(ce_loss.result()),
"eval_dice_loss": float(f1_loss.result()),
"eval_total_loss": float(f1_loss.result() + ce_loss.result()),
"eval_dice_score": 1.0 - float(f1_loss.result())})
logger.flush()
def predict(params, model, dataset, logger):
prec = 'amp' if params.use_amp else 'fp32'
if params.model_dir:
if params.use_savedmodel:
model = tf.keras.models.load_model(os.path.join(params.model_dir, f'saved_model_{prec}'))
elif params.use_tftrt:
model = TFTRTModel(model_dir=params.model_dir, precision=prec)
else:
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(tf.train.latest_checkpoint(params.model_dir)).expect_partial()
@tf.function
def prediction_step(features):
return tf.nn.softmax(model(features, training=False), axis=-1)
if params.benchmark:
assert params.max_steps > params.warmup_steps, \
"max_steps value has to be greater than warmup_steps"
timestamps = []
for iteration, images in enumerate(dataset.test_fn(count=None, drop_remainder=True)):
prediction_step(images)
if iteration > params.warmup_steps:
timestamps.append(time())
if iteration >= params.max_steps:
break
deltas = np.array([timestamps[i + 1] - timestamps[i] for i in range(len(timestamps) - 1)])
stats = process_performance_stats(deltas, params.batch_size, mode="test")
logger.log(step=(), data=stats)
else:
predictions = np.concatenate([prediction_step(images).numpy()
for images in dataset.test_fn(count=1)], axis=0)
binary_masks = [np.argmax(p, axis=-1).astype(np.uint8) * 255 for p in predictions]
multipage_tif = [Image.fromarray(mask).resize(size=(512, 512), resample=Image.BILINEAR)
for mask in binary_masks]
output_dir = os.path.join(params.model_dir, 'predictions')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
multipage_tif[0].save(os.path.join(output_dir, 'test-masks.tif'),
compression="tiff_deflate",
save_all=True,
append_images=multipage_tif[1:])
print("Predictions saved at {}".format(output_dir))
logger.flush()
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/runtime/run.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import argparse
def process_performance_stats(timestamps, batch_size, mode):
""" Get confidence intervals
:param timestamps: Collection of timestamps
:param batch_size: Number of samples per batch
:param mode: Estimator's execution mode
:return: Stats
"""
timestamps_ms = 1000 * timestamps
throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean()
stats = {f"throughput_{mode}": throughput_imgps,
f"latency_{mode}_mean": timestamps_ms.mean()}
for level in [90, 95, 99]:
stats.update({f"latency_{mode}_{level}": np.percentile(timestamps_ms, level)})
return stats
def parse_convergence_results(path, environment):
dice_scores = []
ce_scores = []
logfiles = [f for f in os.listdir(path) if "log" in f and environment in f]
if not logfiles:
raise FileNotFoundError("No logfile found at {}".format(path))
for logfile in logfiles:
with open(os.path.join(path, logfile), "r") as f:
content = f.readlines()[-1]
if "eval_dice_score" not in content:
print("Evaluation score not found. The file", logfile, "might be corrupted.")
continue
dice_scores.append(float([val for val in content.split(" ")
if "eval_dice_score" in val][0].split()[-1]))
ce_scores.append(float([val for val in content.split(" ")
if "eval_ce_loss" in val][0].split()[-1]))
if dice_scores:
print("Evaluation dice score:", sum(dice_scores) / len(dice_scores))
print("Evaluation cross-entropy loss:", sum(ce_scores) / len(ce_scores))
else:
print("All logfiles were corrupted, no loss was obtained.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="UNet-medical-utils")
parser.add_argument('--exec_mode',
choices=['convergence', 'benchmark'],
type=str,
help="""Which execution mode to run the model into""")
parser.add_argument('--model_dir',
type=str,
required=True)
parser.add_argument('--env',
choices=['FP32_1GPU', 'FP32_8GPU', 'TF-AMP_1GPU', 'TF-AMP_8GPU'],
type=str,
required=True)
args = parser.parse_args()
if args.exec_mode == 'convergence':
parse_convergence_results(path=args.model_dir, environment=args.env)
elif args.exec_mode == 'benchmark':
pass
print()
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/runtime/parse_results.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line argument parsing"""
import argparse
from munch import Munch
PARSER = argparse.ArgumentParser(description="UNet-medical")
PARSER.add_argument('--exec_mode',
choices=['train', 'train_and_predict', 'predict', 'evaluate', 'train_and_evaluate'],
type=str,
default='train_and_evaluate',
help="""Execution mode of running the model""")
PARSER.add_argument('--model_dir',
type=str,
default='/results',
help="""Output directory for information related to the model""")
PARSER.add_argument('--data_dir',
type=str,
required=True,
help="""Input directory containing the dataset for training the model""")
PARSER.add_argument('--log_dir',
type=str,
default=None,
help="""Output directory for training logs""")
PARSER.add_argument('--batch_size',
type=int,
default=1,
help="""Size of each minibatch per GPU""")
PARSER.add_argument('--learning_rate',
type=float,
default=0.0001,
help="""Learning rate coefficient for AdamOptimizer""")
PARSER.add_argument('--fold',
type=int,
default=None,
help="""Chosen fold for cross-validation. Use None to disable cross-validation""")
PARSER.add_argument('--max_steps',
type=int,
default=1000,
help="""Maximum number of steps (batches) used for training""")
PARSER.add_argument('--weight_decay',
type=float,
default=0.0005,
help="""Weight decay coefficient""")
PARSER.add_argument('--log_every',
type=int,
default=100,
help="""Log performance every n steps""")
PARSER.add_argument('--evaluate_every',
type=int,
default=0,
help="""Log performance every n steps""")
PARSER.add_argument('--warmup_steps',
type=int,
default=200,
help="""Number of warmup steps""")
PARSER.add_argument('--seed',
type=int,
default=0,
help="""Random seed""")
PARSER.add_argument('--augment', dest='augment', action='store_true',
help="""Perform data augmentation during training""")
PARSER.add_argument('--no-augment', dest='augment', action='store_false')
PARSER.set_defaults(augment=False)
PARSER.add_argument('--benchmark', dest='benchmark', action='store_true',
help="""Collect performance metrics during training""")
PARSER.add_argument('--no-benchmark', dest='benchmark', action='store_false')
PARSER.set_defaults(augment=False)
PARSER.add_argument('--use_amp', '--amp', dest='use_amp', action='store_true',
help="""Train using TF-AMP""")
PARSER.add_argument('--use_xla', '--xla', dest='use_xla', action='store_true',
help="""Train using XLA""")
PARSER.add_argument('--use_tftrt', dest='use_tftrt', action='store_true',
help="""Use TF-TRT""")
PARSER.add_argument('--use_savedmodel', dest='use_savedmodel', action='store_true',
help="""Use SavedModel""")
PARSER.add_argument('--resume_training', dest='resume_training', action='store_true',
help="""Resume training from a checkpoint""")
def parse_args(flags):
return Munch({
'exec_mode': flags.exec_mode,
'model_dir': flags.model_dir,
'data_dir': flags.data_dir,
'log_dir': flags.log_dir,
'batch_size': flags.batch_size,
'learning_rate': flags.learning_rate,
'fold': flags.fold,
'max_steps': flags.max_steps,
'weight_decay': flags.weight_decay,
'log_every': flags.log_every,
'evaluate_every': flags.evaluate_every,
'warmup_steps': flags.warmup_steps,
'augment': flags.augment,
'benchmark': flags.benchmark,
'seed': flags.seed,
'use_amp': flags.use_amp,
'use_tftrt': flags.use_tftrt,
'use_savedmodel': flags.use_savedmodel,
'use_xla': flags.use_xla,
'resume_training': flags.resume_training,
})
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/runtime/arguments.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import multiprocessing
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
import dllogger as logger
from dllogger import StdOutBackend, Verbosity, JSONStreamBackend
def set_flags(params):
os.environ['CUDA_CACHE_DISABLE'] = '1'
os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0'
os.environ['TF_ADJUST_HUE_FUSED'] = '1'
os.environ['TF_ADJUST_SATURATION_FUSED'] = '1'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '2'
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
np.random.seed(params.seed)
tf.random.set_seed(params.seed)
if params.use_xla:
tf.config.optimizer.set_jit(True)
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU')
tf.config.optimizer.set_experimental_options({'remapping': False})
tf.config.threading.set_intra_op_parallelism_threads(1)
tf.config.threading.set_inter_op_parallelism_threads(max(2, (multiprocessing.cpu_count() // hvd.size()) - 2))
if params.use_amp:
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
def prepare_model_dir(params):
# model_dir = os.path.join(params.model_dir, "model_checkpoint")
model_dir = params.model_dir if (hvd.rank() == 0 and not params.benchmark) else None
if model_dir is not None:
os.makedirs(model_dir, exist_ok=True)
if ('train' in params.exec_mode) and (not params.resume_training):
os.system('rm -rf {}/*'.format(model_dir))
return model_dir
def get_logger(params):
backends = []
if hvd.rank() == 0:
backends += [StdOutBackend(Verbosity.VERBOSE)]
if params.log_dir:
backends += [JSONStreamBackend(Verbosity.VERBOSE, params.log_dir)]
logger.init(backends=backends)
logger.metadata("eval_dice_score", {"unit": None})
logger.metadata("throughput_test", {"unit": "images/s"})
logger.metadata("throughput_train", {"unit": "images/s"})
return logger
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/runtime/setup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training and evaluation losses"""
import tensorflow as tf
# Class Dice coefficient averaged over batch
def dice_coef(predict, target, axis=1, eps=1e-6):
intersection = tf.reduce_sum(input_tensor=predict * target, axis=axis)
union = tf.reduce_sum(input_tensor=predict * predict + target * target, axis=axis)
dice = (2. * intersection + eps) / (union + eps)
return tf.reduce_mean(input_tensor=dice, axis=0) # average over batch
def partial_losses(predict, target):
n_classes = predict.shape[-1]
flat_logits = tf.reshape(tf.cast(predict, tf.float32),
[tf.shape(input=predict)[0], -1, n_classes])
flat_labels = tf.reshape(target,
[tf.shape(input=predict)[0], -1, n_classes])
crossentropy_loss = tf.reduce_mean(input_tensor=tf.nn.softmax_cross_entropy_with_logits(logits=flat_logits,
labels=flat_labels),
name='cross_loss_ref')
dice_loss = tf.reduce_mean(input_tensor=1 - dice_coef(tf.keras.activations.softmax(flat_logits, axis=-1),
flat_labels), name='dice_loss_ref')
return crossentropy_loss, dice_loss
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/runtime/losses.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Model construction utils
This module provides a convenient way to create different topologies
based around UNet.
"""
import tensorflow as tf
from model.layers import InputBlock, DownsampleBlock, BottleneckBlock, UpsampleBlock, OutputBlock
class Unet(tf.keras.Model):
""" U-Net: Convolutional Networks for Biomedical Image Segmentation
Source:
https://arxiv.org/pdf/1505.04597
"""
def __init__(self):
super().__init__(self)
self.input_block = InputBlock(filters=64)
self.bottleneck = BottleneckBlock(1024)
self.output_block = OutputBlock(filters=64, n_classes=2)
self.down_blocks = [DownsampleBlock(filters, idx)
for idx, filters in enumerate([128, 256, 512])]
self.up_blocks = [UpsampleBlock(filters, idx)
for idx, filters in enumerate([512, 256, 128])]
def call(self, x, training=True):
skip_connections = []
out, residual = self.input_block(x)
skip_connections.append(residual)
for down_block in self.down_blocks:
out, residual = down_block(out)
skip_connections.append(residual)
out = self.bottleneck(out, training)
for up_block in self.up_blocks:
out = up_block(out, skip_connections.pop())
out = self.output_block(out, skip_connections.pop())
return out
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/model/unet.py |
import os
from operator import itemgetter
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.compat.v1.saved_model import tag_constants, signature_constants
def export_model(model_dir, prec, tf_trt_model_dir=None):
model = tf.keras.models.load_model(os.path.join(model_dir, f'saved_model_{prec}'))
input_shape = [1, 572, 572, 1]
dummy_input = tf.constant(tf.zeros(input_shape, dtype=tf.float32 if prec=="fp32" else tf.float16))
_ = model(dummy_input, training=False)
trt_prec = trt.TrtPrecisionMode.FP32 if prec == "fp32" else trt.TrtPrecisionMode.FP16
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=os.path.join(model_dir, f'saved_model_{prec}'),
conversion_params=trt.TrtConversionParams(precision_mode=trt_prec),
)
converter.convert()
tf_trt_model_dir = tf_trt_model_dir or f'/tmp/tf-trt_model_{prec}'
converter.save(tf_trt_model_dir)
print(f"TF-TRT model saved at {tf_trt_model_dir}")
def _force_gpu_resync(func):
p = tf.constant(0.) # Create small tensor to force GPU resync
def wrapper(*args, **kwargs):
rslt = func(*args, **kwargs)
(p + 1.).numpy() # Sync the GPU
return rslt
return wrapper
class TFTRTModel:
def __init__(self, model_dir, precision, output_tensor_name="output_1"):
temp_tftrt_dir = f"/tmp/tf-trt_model_{precision}"
export_model(model_dir, precision, temp_tftrt_dir)
saved_model_loaded = tf.saved_model.load(temp_tftrt_dir, tags=[tag_constants.SERVING])
print(f"TF-TRT model loaded from {temp_tftrt_dir}")
self.graph_func = saved_model_loaded.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.output_tensor_name = output_tensor_name
self.precision = tf.float16 if precision == "amp" else tf.float32
def __call__(self, x, **kwargs):
return self.infer_step(x)
#@_force_gpu_resync
@tf.function(jit_compile=False)
def infer_step(self, batch_x):
if batch_x.dtype != self.precision:
batch_x = tf.cast(batch_x, self.precision)
output = self.graph_func(batch_x)
return itemgetter(self.output_tensor_name)(output)
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/model/tf_trt.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
""" Contains a set of utilities that allow building the UNet model
"""
import tensorflow as tf
def _crop_and_concat(inputs, residual_input):
""" Perform a central crop of ``residual_input`` and concatenate to ``inputs``
Args:
inputs (tf.Tensor): Tensor with input
residual_input (tf.Tensor): Residual input
Return:
Concatenated tf.Tensor with the size of ``inputs``
"""
factor = inputs.shape[1] / residual_input.shape[1]
return tf.concat([inputs, tf.image.central_crop(residual_input, factor)], axis=-1)
class InputBlock(tf.keras.Model):
def __init__(self, filters):
""" UNet input block
Perform two unpadded convolutions with a specified number of filters and downsample
through max-pooling. First convolution
Args:
filters (int): Number of filters in convolution
"""
super().__init__(self)
with tf.name_scope('input_block'):
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.maxpool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
def call(self, inputs):
out = self.conv1(inputs)
out = self.conv2(out)
mp = self.maxpool(out)
return mp, out
class DownsampleBlock(tf.keras.Model):
def __init__(self, filters, idx):
""" UNet downsample block
Perform two unpadded convolutions with a specified number of filters and downsample
through max-pooling
Args:
filters (int): Number of filters in convolution
idx (int): Index of block
Return:
Tuple of convolved ``inputs`` after and before downsampling
"""
super().__init__(self)
with tf.name_scope('downsample_block_{}'.format(idx)):
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.maxpool = tf.keras.layers.MaxPool2D(pool_size=(2, 2), strides=2)
def call(self, inputs):
out = self.conv1(inputs)
out = self.conv2(out)
mp = self.maxpool(out)
return mp, out
class BottleneckBlock(tf.keras.Model):
def __init__(self, filters):
""" UNet central block
Perform two unpadded convolutions with a specified number of filters and upsample
including dropout before upsampling for training
Args:
filters (int): Number of filters in convolution
"""
super().__init__(self)
with tf.name_scope('bottleneck_block'):
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.dropout = tf.keras.layers.Dropout(rate=0.5)
self.conv_transpose = tf.keras.layers.Conv2DTranspose(filters=filters // 2,
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
activation=tf.nn.relu)
def call(self, inputs, training):
out = self.conv1(inputs)
out = self.conv2(out)
out = self.dropout(out, training=training)
out = self.conv_transpose(out)
return out
class UpsampleBlock(tf.keras.Model):
def __init__(self, filters, idx):
""" UNet upsample block
Perform two unpadded convolutions with a specified number of filters and upsample
Args:
filters (int): Number of filters in convolution
idx (int): Index of block
"""
super().__init__(self)
with tf.name_scope('upsample_block_{}'.format(idx)):
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.conv_transpose = tf.keras.layers.Conv2DTranspose(filters=filters // 2,
kernel_size=(3, 3),
strides=(2, 2),
padding='same',
activation=tf.nn.relu)
def call(self, inputs, residual_input):
out = _crop_and_concat(inputs, residual_input)
out = self.conv1(out)
out = self.conv2(out)
out = self.conv_transpose(out)
return out
class OutputBlock(tf.keras.Model):
def __init__(self, filters, n_classes):
""" UNet output block
Perform three unpadded convolutions, the last one with the same number
of channels as classes we want to classify
Args:
filters (int): Number of filters in convolution
n_classes (int): Number of output classes
"""
super().__init__(self)
with tf.name_scope('output_block'):
self.conv1 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.conv2 = tf.keras.layers.Conv2D(filters=filters,
kernel_size=(3, 3),
activation=tf.nn.relu)
self.conv3 = tf.keras.layers.Conv2D(filters=n_classes,
kernel_size=(1, 1),
activation=None)
def call(self, inputs, residual_input):
out = _crop_and_concat(inputs, residual_input)
out = self.conv1(out)
out = self.conv2(out)
out = self.conv3(out)
return out
| DeepLearningExamples-master | TensorFlow2/Segmentation/UNet_Medical/model/layers.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from pathlib import Path
from data_preprocessing.preprocessor import Preprocessor
from runtime.utils import get_task_code
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--data", type=Path, default=Path("/data"), help="Path to data directory")
parser.add_argument("--results", type=Path, default=Path("/data"), help="Path for saving results directory")
parser.add_argument("-f", "--force", action="store_true", help="Force remove colliding files")
parser.add_argument(
"--exec_mode",
type=str,
default="training",
choices=["training", "val", "test"],
help="Mode for data preprocessing",
)
parser.add_argument("--task", type=str, help="Number of task to be run. MSD uses numbers 01-10")
parser.add_argument("--dim", type=int, default=3, choices=[2, 3], help="Data dimension to prepare")
parser.add_argument("--n_jobs", type=int, default=-1, help="Number of parallel jobs for data preprocessing")
if __name__ == "__main__":
args = parser.parse_args()
start = time.time()
Preprocessor(args).run()
end = time.time()
print(f"Preprocessing time: {(end - start):.2f}")
| DeepLearningExamples-master | TensorFlow2/Segmentation/nnUNet/preprocess.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from subprocess import call
from data_preprocessing.configs import task
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--task", type=str, required=True, help="Task to download")
parser.add_argument("--results", type=str, default="/data", help="Directory for data storage")
if __name__ == "__main__":
args = parser.parse_args()
tar_file = task[args.task] + ".tar"
file_path = os.path.join(args.results, tar_file)
call(f"aws s3 cp s3://msd-for-monai-eu/{tar_file} --no-sign-request {args.results}", shell=True)
call(f"tar -xf {file_path} -C {args.results}", shell=True)
call(f"rm -rf {file_path}", shell=True)
| DeepLearningExamples-master | TensorFlow2/Segmentation/nnUNet/download.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import nibabel
import numpy as np
from tqdm import tqdm
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--preds", type=str, required=True, help="Path to predictions")
parser.add_argument("--lbls", type=str, required=True, help="Path to labels")
def get_stats(pred, targ, class_idx):
tp_ = np.logical_and(pred == class_idx, targ == class_idx).sum()
fn_ = np.logical_and(pred != class_idx, targ == class_idx).sum()
fp_ = np.logical_and(pred == class_idx, targ != class_idx).sum()
return tp_, fn_, fp_
if __name__ == "__main__":
args = parser.parse_args()
y_pred = sorted(glob.glob(os.path.join(args.preds, "*.npy")))
y_true = [os.path.join(args.lbls, os.path.basename(pred).replace("npy", "nii.gz")) for pred in y_pred]
assert len(y_pred) > 0
n_class = np.load(y_pred[0]).shape[0] - 1
dice = [[] for _ in range(n_class)]
for pr, lb in tqdm(zip(y_pred, y_true), total=len(y_pred)):
prd = np.transpose(np.argmax(np.load(pr), axis=0), (2, 1, 0))
lbl = nibabel.load(lb).get_fdata().astype(np.uint8)
for i in range(1, n_class + 1):
counts = np.count_nonzero(lbl == i) + np.count_nonzero(prd == i)
if counts == 0: # no foreground class
dice[i - 1].append(1)
else:
tp, fn, fp = get_stats(prd, lbl, i)
denum = 2 * tp + fp + fn
dice[i - 1].append(2 * tp / denum if denum != 0 else 0)
dice_score = np.mean(np.array(dice), axis=-1)
dice_cls = " ".join([f"L{i+1} {round(dice_score[i], 4)}" for i, dice in enumerate(dice_score)])
print(f"mean dice: {round(np.mean(dice_score), 4)} - {dice_cls}") | DeepLearningExamples-master | TensorFlow2/Segmentation/nnUNet/evaluate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from data_loading.data_module import DataModule
from models.nn_unet import NNUnet
from runtime.args import get_main_args
from runtime.checkpoint import load_model
from runtime.logging import get_logger
from runtime.run import evaluate, export_model, predict, train
from runtime.utils import hvd_init, set_seed, set_tf_flags
def main(args):
hvd_init()
if args.seed is not None:
set_seed(args.seed)
set_tf_flags(args)
data = DataModule(args)
data.setup()
logger = get_logger(args)
logger.log_hyperparams(vars(args))
logger.log_metadata("dice_score", {"unit": None})
logger.log_metadata("eval_dice_nobg", {"unit": None})
logger.log_metadata("throughput_predict", {"unit": "images/s"})
logger.log_metadata("throughput_train", {"unit": "images/s"})
logger.log_metadata("latency_predict_mean", {"unit": "ms"})
logger.log_metadata("latency_train_mean", {"unit": "ms"})
if args.exec_mode == "train":
model = NNUnet(args)
train(args, model, data, logger)
elif args.exec_mode == "evaluate":
model = load_model(args)
evaluate(args, model, data, logger)
elif args.exec_mode == "predict":
model = NNUnet(args) if args.benchmark else load_model(args)
predict(args, model, data, logger)
elif args.exec_mode == "export":
# Export model
model = load_model(args)
export_model(args, model)
suffix = "amp" if args.amp else "fp32"
sm = f"{args.results}/saved_model_task_{args.task}_dim_{args.dim}_" + suffix
trt = f"{args.results}/trt_saved_model_task_{args.task}_dim_{args.dim}_" + suffix
args.saved_model_dir = sm if args.load_sm else trt
args.exec_mode = "evaluate" if args.validate else "predict"
# Run benchmarking
model = load_model(args)
data = DataModule(args)
data.setup()
if args.validate:
evaluate(args, model, data, logger)
else:
predict(args, model, data, logger)
else:
raise NotImplementedError
if __name__ == "__main__":
args = get_main_args()
main(args)
| DeepLearningExamples-master | TensorFlow2/Segmentation/nnUNet/main.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import horovod.tensorflow as hvd
from runtime.utils import get_config_file, is_main_process
from sklearn.model_selection import KFold
from data_loading.dali_loader import fetch_dali_loader
from data_loading.utils import get_path, get_split, get_test_fnames, load_data
class DataModule:
def __init__(self, args):
super().__init__()
self.args = args
self.train_imgs = []
self.train_lbls = []
self.val_imgs = []
self.val_lbls = []
self.test_imgs = []
self.kfold = KFold(n_splits=self.args.nfolds, shuffle=True, random_state=12345)
self.data_path = get_path(args)
configs = get_config_file(self.args)
self.patch_size = configs["patch_size"]
self.kwargs = {
"dim": self.args.dim,
"patch_size": self.patch_size,
"seed": self.args.seed,
"gpus": hvd.size(),
"num_workers": self.args.num_workers,
"oversampling": self.args.oversampling,
"benchmark": self.args.benchmark,
"nvol": self.args.nvol,
"bench_steps": self.args.bench_steps,
"meta": load_data(self.data_path, "*_meta.npy"),
}
def setup(self, stage=None):
imgs = load_data(self.data_path, "*_x.npy")
lbls = load_data(self.data_path, "*_y.npy")
self.test_imgs, self.kwargs["meta"] = get_test_fnames(self.args, self.data_path, self.kwargs["meta"])
if self.args.exec_mode != "predict" or self.args.benchmark:
train_idx, val_idx = list(self.kfold.split(imgs))[self.args.fold]
self.train_imgs = get_split(imgs, train_idx)
self.train_lbls = get_split(lbls, train_idx)
self.val_imgs = get_split(imgs, val_idx)
self.val_lbls = get_split(lbls, val_idx)
if is_main_process():
ntrain, nval = len(self.train_imgs), len(self.val_imgs)
print(f"Number of examples: Train {ntrain} - Val {nval}")
# Shard the validation data
self.val_imgs = self.val_imgs[hvd.rank() :: hvd.size()]
self.val_lbls = self.val_lbls[hvd.rank() :: hvd.size()]
self.cached_val_loader = None
elif is_main_process():
print(f"Number of test examples: {len(self.test_imgs)}")
def train_dataset(self):
return fetch_dali_loader(
self.train_imgs,
self.train_lbls,
self.args.batch_size,
"train",
**self.kwargs,
)
def train_size(self):
return len(self.train_imgs)
def val_dataset(self):
if self.cached_val_loader is None:
self.cached_val_loader = fetch_dali_loader(self.val_imgs, self.val_lbls, 1, "eval", **self.kwargs)
return self.cached_val_loader
def val_size(self):
return len(self.val_imgs)
def test_dataset(self):
if self.kwargs["benchmark"]:
return fetch_dali_loader(
self.train_imgs,
self.train_lbls,
self.args.batch_size,
"test",
**self.kwargs,
)
return fetch_dali_loader(self.test_imgs, None, 1, "test", **self.kwargs)
def test_size(self):
return len(self.test_imgs)
def test_fname(self, idx):
return self.test_imgs[idx]
| DeepLearningExamples-master | TensorFlow2/Segmentation/nnUNet/data_loading/data_module.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import horovod.tensorflow as hvd
import numpy as np
import nvidia.dali.fn as fn
import nvidia.dali.ops as ops
import nvidia.dali.plugin.tf as dali_tf
import nvidia.dali.types as types
import tensorflow as tf
from nvidia.dali.pipeline import Pipeline
def get_numpy_reader(files, shard_id, num_shards, seed, shuffle):
return ops.readers.Numpy(
seed=seed,
files=files,
device="cpu",
read_ahead=True,
shard_id=shard_id,
pad_last_batch=True,
num_shards=num_shards,
dont_use_mmap=True,
shuffle_after_epoch=shuffle,
)
def random_augmentation(probability, augmented, original):
condition = fn.cast(fn.random.coin_flip(probability=probability), dtype=types.DALIDataType.BOOL)
neg_condition = condition ^ True
return condition * augmented + neg_condition * original
class GenericPipeline(Pipeline):
def __init__(
self,
batch_size,
num_threads,
shard_id,
seed,
num_gpus,
dim,
shuffle_input=True,
input_x_files=None,
input_y_files=None,
):
super().__init__(
batch_size=batch_size,
num_threads=num_threads,
device_id=hvd.rank(),
seed=seed,
)
if input_x_files is not None:
self.input_x = get_numpy_reader(
files=input_x_files,
shard_id=shard_id,
seed=seed,
num_shards=num_gpus,
shuffle=shuffle_input,
)
if input_y_files is not None:
self.input_y = get_numpy_reader(
files=input_y_files,
shard_id=shard_id,
seed=seed,
num_shards=num_gpus,
shuffle=shuffle_input,
)
self.dim = dim
self.internal_seed = seed
class TrainPipeline(GenericPipeline):
def __init__(self, imgs, lbls, oversampling, patch_size, batch_size_2d=None, **kwargs):
super().__init__(input_x_files=imgs, input_y_files=lbls, shuffle_input=True, **kwargs)
self.oversampling = oversampling
self.patch_size = patch_size
if self.dim == 2 and batch_size_2d is not None:
self.patch_size = [batch_size_2d] + self.patch_size
self.crop_shape = types.Constant(np.array(self.patch_size), dtype=types.INT64)
self.crop_shape_float = types.Constant(np.array(self.patch_size), dtype=types.FLOAT)
def load_data(self):
img, lbl = self.input_x(name="ReaderX"), self.input_y(name="ReaderY")
img, lbl = fn.reshape(img, layout="DHWC"), fn.reshape(lbl, layout="DHWC")
return img, lbl
@staticmethod
def slice_fn(img):
return fn.slice(img, 1, 3, axes=[0])
def biased_crop_fn(self, img, lbl):
roi_start, roi_end = fn.segmentation.random_object_bbox(
lbl,
format="start_end",
foreground_prob=self.oversampling,
k_largest=2,
device="cpu",
cache_objects=True,
)
anchor = fn.roi_random_crop(
lbl,
roi_start=roi_start,
roi_end=roi_end,
crop_shape=[*self.patch_size, 1],
)
anchor = fn.slice(anchor, 0, 3, axes=[0])
img, lbl = fn.slice(
[img, lbl],
anchor,
self.crop_shape,
axis_names="DHW",
out_of_bounds_policy="pad",
device="cpu",
)
img, lbl = img.gpu(), lbl.gpu()
return img, lbl
def zoom_fn(self, img, lbl):
scale = random_augmentation(0.15, fn.random.uniform(range=(0.7, 1.0)), 1.0)
d, h, w = [scale * x for x in self.patch_size]
if self.dim == 2:
d = self.patch_size[0]
img, lbl = fn.crop(img, crop_h=h, crop_w=w, crop_d=d), fn.crop(lbl, crop_h=h, crop_w=w, crop_d=d)
img = fn.resize(
img,
interp_type=types.DALIInterpType.INTERP_CUBIC,
size=self.crop_shape_float,
)
lbl = fn.resize(lbl, interp_type=types.DALIInterpType.INTERP_NN, size=self.crop_shape_float)
return img, lbl
def noise_fn(self, img):
img_noised = fn.noise.gaussian(img, stddev=fn.random.uniform(range=(0.0, 0.3)))
return random_augmentation(0.15, img_noised, img)
def blur_fn(self, img):
img_blurred = fn.gaussian_blur(img, sigma=fn.random.uniform(range=(0.5, 1.5)))
return random_augmentation(0.15, img_blurred, img)
def brightness_contrast_fn(self, img):
img_transformed = fn.brightness_contrast(
img, brightness=fn.random.uniform(range=(0.7, 1.3)), contrast=fn.random.uniform(range=(0.65, 1.5))
)
return random_augmentation(0.15, img_transformed, img)
def flips_fn(self, img, lbl):
kwargs = {
"horizontal": fn.random.coin_flip(probability=0.5),
"vertical": fn.random.coin_flip(probability=0.5),
}
if self.dim == 3:
kwargs.update({"depthwise": fn.random.coin_flip(probability=0.5)})
return fn.flip(img, **kwargs), fn.flip(lbl, **kwargs)
def define_graph(self):
img, lbl = self.load_data()
img, lbl = self.biased_crop_fn(img, lbl)
img, lbl = self.zoom_fn(img, lbl)
img, lbl = self.flips_fn(img, lbl)
img = self.noise_fn(img)
img = self.blur_fn(img)
img = self.brightness_contrast_fn(img)
return img, lbl
class EvalPipeline(GenericPipeline):
def __init__(self, imgs, lbls, patch_size, **kwargs):
super().__init__(input_x_files=imgs, input_y_files=lbls, shuffle_input=False, **kwargs)
self.patch_size = patch_size
def define_graph(self):
img, lbl = self.input_x(name="ReaderX").gpu(), self.input_y(name="ReaderY").gpu()
img, lbl = fn.reshape(img, layout="DHWC"), fn.reshape(lbl, layout="DHWC")
return img, lbl
class TestPipeline(GenericPipeline):
def __init__(self, imgs, meta, **kwargs):
super().__init__(input_x_files=imgs, input_y_files=meta, shuffle_input=False, **kwargs)
def define_graph(self):
img, meta = self.input_x(name="ReaderX").gpu(), self.input_y(name="ReaderY").gpu()
img = fn.reshape(img, layout="DHWC")
return img, meta
class BenchmarkPipeline(GenericPipeline):
def __init__(self, imgs, lbls, patch_size, batch_size_2d=None, **kwargs):
super().__init__(input_x_files=imgs, input_y_files=lbls, shuffle_input=False, **kwargs)
self.patch_size = patch_size
if self.dim == 2 and batch_size_2d is not None:
self.patch_size = [batch_size_2d] + self.patch_size
def crop_fn(self, img, lbl):
img = fn.crop(img, crop=self.patch_size, out_of_bounds_policy="pad")
lbl = fn.crop(lbl, crop=self.patch_size, out_of_bounds_policy="pad")
return img, lbl
def define_graph(self):
img, lbl = self.input_x(name="ReaderX").gpu(), self.input_y(name="ReaderY").gpu()
img, lbl = self.crop_fn(img, lbl)
img, lbl = fn.reshape(img, layout="DHWC"), fn.reshape(lbl, layout="DHWC")
return img, lbl
def fetch_dali_loader(imgs, lbls, batch_size, mode, **kwargs):
assert len(imgs) > 0, "No images found"
if lbls is not None:
assert len(imgs) == len(lbls), f"Got {len(imgs)} images but {len(lbls)} lables"
gpus = hvd.size()
device_id = hvd.rank()
if kwargs["benchmark"]:
# Just to make sure the number of examples is large enough for benchmark run.
nbs = kwargs["bench_steps"]
if kwargs["dim"] == 3:
nbs *= batch_size
imgs = list(itertools.chain(*(100 * [imgs])))[: nbs * gpus]
lbls = list(itertools.chain(*(100 * [lbls])))[: nbs * gpus]
pipe_kwargs = {
"dim": kwargs["dim"],
"num_gpus": gpus,
"seed": kwargs["seed"],
"batch_size": batch_size,
"num_threads": kwargs["num_workers"],
"shard_id": device_id,
}
if kwargs["dim"] == 2:
if kwargs["benchmark"]:
pipe_kwargs.update({"batch_size_2d": batch_size})
batch_size = 1
elif mode == "train":
pipe_kwargs.update({"batch_size_2d": batch_size // kwargs["nvol"]})
batch_size = kwargs["nvol"]
if mode == "eval": # Validation data is manually sharded beforehand.
pipe_kwargs["shard_id"] = 0
pipe_kwargs["num_gpus"] = 1
output_dtypes = (tf.float32, tf.uint8)
if kwargs["benchmark"]:
pipeline = BenchmarkPipeline(imgs, lbls, kwargs["patch_size"], **pipe_kwargs)
elif mode == "train":
pipeline = TrainPipeline(imgs, lbls, kwargs["oversampling"], kwargs["patch_size"], **pipe_kwargs)
elif mode == "eval":
pipeline = EvalPipeline(imgs, lbls, kwargs["patch_size"], **pipe_kwargs)
else:
pipeline = TestPipeline(imgs, kwargs["meta"], **pipe_kwargs)
output_dtypes = (tf.float32, tf.int64)
tf_pipe = dali_tf.DALIDataset(pipeline, batch_size=batch_size, device_id=device_id, output_dtypes=output_dtypes)
return tf_pipe
| DeepLearningExamples-master | TensorFlow2/Segmentation/nnUNet/data_loading/dali_loader.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import numpy as np
from runtime.utils import get_task_code
from sklearn.model_selection import KFold
def get_split(data, idx):
return list(np.array(data)[idx])
def load_data(path, files_pattern):
return sorted(glob.glob(os.path.join(path, files_pattern)))
def get_path(args):
data_path = str(args.data)
if data_path != "/data":
return data_path
data_path = os.path.join(data_path, get_task_code(args))
if args.exec_mode == "predict" and not args.benchmark:
data_path = os.path.join(data_path, "test")
return data_path
def get_test_fnames(args, data_path, meta=None):
kfold = KFold(n_splits=args.nfolds, shuffle=True, random_state=12345)
test_imgs = load_data(data_path, "*_x.npy")
if args.exec_mode == "predict" and "val" in data_path:
_, val_idx = list(kfold.split(test_imgs))[args.fold]
test_imgs = sorted(get_split(test_imgs, val_idx))
if meta is not None:
meta = sorted(get_split(meta, val_idx))
return test_imgs, meta
| DeepLearningExamples-master | TensorFlow2/Segmentation/nnUNet/data_loading/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from abc import ABC, abstractmethod
from typing import Callable
import dllogger
from dllogger import Verbosity
from runtime.utils import rank_zero_only
class Logger(ABC):
@rank_zero_only
@abstractmethod
def log_hyperparams(self, params):
pass
@rank_zero_only
@abstractmethod
def log_metadata(self, metric, metadata):
pass
@rank_zero_only
@abstractmethod
def log_metrics(self, metrics, step=None):
pass
@staticmethod
def _sanitize_params(params):
def _sanitize(val):
if isinstance(val, Callable):
try:
_val = val()
if isinstance(_val, Callable):
return val.__name__
return _val
except Exception:
return getattr(val, "__name__", None)
elif isinstance(val, pathlib.Path):
return str(val)
return val
return {key: _sanitize(val) for key, val in params.items()}
@rank_zero_only
def flush(self):
pass
class LoggerCollection(Logger):
def __init__(self, loggers):
super().__init__()
self.loggers = loggers
def __getitem__(self, index):
return [logger for logger in self.loggers][index]
@rank_zero_only
def log_metrics(self, metrics, step=None):
for logger in self.loggers:
logger.log_metrics(metrics, step)
@rank_zero_only
def log_hyperparams(self, params):
for logger in self.loggers:
logger.log_hyperparams(params)
@rank_zero_only
def log_metadata(self, metric, metadata):
for logger in self.loggers:
logger.log_metadata(metric, metadata)
@rank_zero_only
def flush(self):
for logger in self.loggers:
logger.flush()
class DLLogger(Logger):
def __init__(self, save_dir, filename, append, quiet):
super().__init__()
self._initialize_dllogger(save_dir, filename, append, quiet)
@rank_zero_only
def _initialize_dllogger(self, save_dir, filename, append, quiet):
save_dir.mkdir(parents=True, exist_ok=True)
backends = [
dllogger.JSONStreamBackend(Verbosity.DEFAULT, str(save_dir / filename), append=append),
]
if not quiet:
backends.append(dllogger.StdOutBackend(Verbosity.VERBOSE, step_format=lambda step: f"Step: {step} "))
dllogger.init(backends=backends)
@rank_zero_only
def log_hyperparams(self, params):
params = self._sanitize_params(params)
dllogger.log(step="PARAMETER", data=params)
@rank_zero_only
def log_metadata(self, metric, metadata):
dllogger.metadata(metric, metadata)
@rank_zero_only
def log_metrics(self, metrics, step=None):
if step is None:
step = tuple()
dllogger.log(step=step, data=metrics)
@rank_zero_only
def flush(self):
dllogger.flush()
def get_logger(args):
loggers = []
if args.use_dllogger:
loggers.append(
DLLogger(save_dir=args.results, filename=args.logname, append=args.resume_training, quiet=args.quiet)
)
return LoggerCollection(loggers)
| DeepLearningExamples-master | TensorFlow2/Segmentation/nnUNet/runtime/logging.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.