Gilt_posture_dataset / code /RGBD /yolo_rgbd_model_inference.py
anilbhujel's picture
Save my local changes
a26ae4f
import os
import glob
import cv2
import numpy as np
import torch
from ultralytics import YOLO
from tqdm import tqdm
# ---- CONFIGURATION ----
model_path = 'path/to/your/model/weights/best.pt'
test_img_dir = 'path/to/your/rgbd/test/images'
output_rgb_dir = 'outputs/rgb'
output_depth_dir = 'outputs/depth'
class_names = ['Feeding', 'Lateral_lying', 'Sitting', 'Standing', 'Sternal_lying']
confidence_threshold = 0.65
input_size = 640 # Model input size
os.makedirs(output_rgb_dir, exist_ok=True)
os.makedirs(output_depth_dir, exist_ok=True)
# ---- Define consistent colors for each class ----
COLORS = {
'Feeding': (255, 0, 0), # Blue
'Lateral_lying': (0, 255, 0), # Green
'Sitting': (0, 0, 255), # Red
'Standing': (255, 255, 0), # Cyan
'Sternal_lying': (255, 0, 255) # Magenta
}
# ---- LOAD MODEL ----
model = YOLO(model_path).cuda().eval()
# ---- INFERENCE LOOP ----
image_paths = sorted(glob.glob(os.path.join(test_img_dir, '*.png')))
for img_path in tqdm(image_paths, desc="Visualizing Predictions"):
base = os.path.splitext(os.path.basename(img_path))[0]
# Load original 4-channel image
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
if img is None or img.shape[-1] != 4:
print(f"Skipping {img_path}, invalid image format.")
continue
rgb = img[:, :, :3]
depth = img[:, :, 3]
orig_h, orig_w = rgb.shape[:2]
# Resize to model input size for inference
img_resized = cv2.resize(img, (input_size, input_size))
input_tensor = torch.from_numpy(img_resized).permute(2, 0, 1).float() / 255.0
input_tensor = input_tensor.unsqueeze(0).cuda()
# Inference
results = model.predict(input_tensor, imgsz=input_size, conf=confidence_threshold)[0]
boxes = results.boxes
classes = boxes.cls.cpu().numpy()
confidences = boxes.conf.cpu().numpy()
xyxy_resized = boxes.xyxy.cpu().numpy()
# Scale boxes back to original image size
scale_x = orig_w / input_size
scale_y = orig_h / input_size
xyxy_orig = np.copy(xyxy_resized)
xyxy_orig[:, [0, 2]] *= scale_x
xyxy_orig[:, [1, 3]] *= scale_y
# Normalize depth to uint8 for visualization
depth_norm = cv2.normalize(depth, None, 0, 255, cv2.NORM_MINMAX)
depth_uint8 = depth_norm.astype('uint8')
rgb_draw = rgb.copy()
# Apply a colormap for better visualization
depth_rgb = cv2.applyColorMap(depth_uint8, cv2.COLORMAP_VIRIDIS) # Or COLORMAP_VIRIDIS, INFERNO, etc.
depth_draw = cv2.cvtColor(depth_rgb, cv2.COLOR_BGR2RGB) # Convert BGR to RGB for matplotlib
# ---- Draw boxes on original-size RGB and Depth ----
for box, cls, conf in zip(xyxy_orig, classes, confidences):
x1, y1, x2, y2 = map(int, box)
label = f"{class_names[int(cls)]} {conf:.2f}"
color = COLORS.get(class_names[int(cls)], (255, 255, 255)) # Default to white
# RGB
cv2.rectangle(rgb_draw, (x1, y1), (x2, y2), color, 2)
cv2.putText(rgb_draw, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
# Depth
cv2.rectangle(depth_draw, (x1, y1), (x2, y2), color, 2)
cv2.putText(depth_draw, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
# Save images
cv2.imwrite(os.path.join(output_rgb_dir, f"{base}.png"), rgb_draw)
cv2.imwrite(os.path.join(output_depth_dir, f"{base}.png"), depth_draw)