import cv2 as cv import numpy as np import gradio as gr from huggingface_hub import hf_hub_download from pphumanseg import PPHumanSeg # Download ONNX model from Hugging Face model_path = hf_hub_download(repo_id="opencv/human_segmentation_pphumanseg", filename="human_segmentation_pphumanseg_2023mar.onnx") # Initialize PPHumanSeg model model = PPHumanSeg( modelPath=model_path, backendId=cv.dnn.DNN_BACKEND_OPENCV, targetId=cv.dnn.DNN_TARGET_CPU ) def get_color_map_list(num_classes): num_classes += 1 color_map = num_classes * [0, 0, 0] for i in range(num_classes): j = 0 lab = i while lab: color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j)) color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)) color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)) j += 1 lab >>= 3 return color_map[3:] def visualize(image, result, weight=0.6): color_map = get_color_map_list(256) color_map = np.array(color_map).reshape(256, 3).astype(np.uint8) c1 = cv.LUT(result, color_map[:, 0]) c2 = cv.LUT(result, color_map[:, 1]) c3 = cv.LUT(result, color_map[:, 2]) pseudo_img = np.dstack((c1, c2, c3)) vis_result = cv.addWeighted(image, weight, pseudo_img, 1 - weight, 0) return vis_result def segment_person(input_image): image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR) h, w, _ = image.shape resized = cv.resize(image, (192, 192)) resized = cv.cvtColor(resized, cv.COLOR_BGR2RGB) result = model.infer(resized) result = cv.resize(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST) output = visualize(image, result) output = cv.cvtColor(output, cv.COLOR_BGR2RGB) return output # Gradio Interface demo = gr.Interface( fn=segment_person, inputs=gr.Image(type="numpy", label="Upload Image"), outputs=gr.Image(type="numpy", label="Human Segmentation Output"), title="Human Segmentation PPHumanSeg (OpenCV DNN)", allow_flagging="never", description="Upload an image to segment human regions using OpenCV's ONNX-based PPHumanSeg model." ) if __name__ == "__main__": demo.launch()