|
import cv2 as cv |
|
import numpy as np |
|
import gradio as gr |
|
from huggingface_hub import hf_hub_download |
|
from pphumanseg import PPHumanSeg |
|
|
|
|
|
model_path = hf_hub_download(repo_id="opencv/human_segmentation_pphumanseg", filename="human_segmentation_pphumanseg_2023mar.onnx") |
|
|
|
|
|
model = PPHumanSeg( |
|
modelPath=model_path, |
|
backendId=cv.dnn.DNN_BACKEND_OPENCV, |
|
targetId=cv.dnn.DNN_TARGET_CPU |
|
) |
|
|
|
def get_color_map_list(num_classes): |
|
num_classes += 1 |
|
color_map = num_classes * [0, 0, 0] |
|
for i in range(num_classes): |
|
j = 0 |
|
lab = i |
|
while lab: |
|
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j)) |
|
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j)) |
|
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j)) |
|
j += 1 |
|
lab >>= 3 |
|
return color_map[3:] |
|
|
|
def visualize(image, result, weight=0.6): |
|
color_map = get_color_map_list(256) |
|
color_map = np.array(color_map).reshape(256, 3).astype(np.uint8) |
|
|
|
c1 = cv.LUT(result, color_map[:, 0]) |
|
c2 = cv.LUT(result, color_map[:, 1]) |
|
c3 = cv.LUT(result, color_map[:, 2]) |
|
pseudo_img = np.dstack((c1, c2, c3)) |
|
|
|
vis_result = cv.addWeighted(image, weight, pseudo_img, 1 - weight, 0) |
|
return vis_result |
|
|
|
def segment_person(input_image): |
|
image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR) |
|
h, w, _ = image.shape |
|
resized = cv.resize(image, (192, 192)) |
|
resized = cv.cvtColor(resized, cv.COLOR_BGR2RGB) |
|
|
|
result = model.infer(resized) |
|
result = cv.resize(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST) |
|
|
|
output = visualize(image, result) |
|
output = cv.cvtColor(output, cv.COLOR_BGR2RGB) |
|
return output |
|
|
|
|
|
with gr.Blocks(css='''.example * { |
|
font-style: italic; |
|
font-size: 18px !important; |
|
color: #0ea5e9 !important; |
|
}''') as demo: |
|
|
|
gr.Markdown("### Human Segmentation PPHumanSeg (OpenCV DNN)") |
|
gr.Markdown("Upload an image to segment human regions using OpenCV's ONNX-based PPHumanSeg model.") |
|
|
|
with gr.Row(): |
|
input_image = gr.Image(type="numpy", label="Upload Image") |
|
output_image = gr.Image(type="numpy", label="Human Segmentation Output") |
|
|
|
|
|
input_image.change(fn=lambda: (None), outputs=output_image) |
|
|
|
with gr.Row(): |
|
submit_btn = gr.Button("Submit", variant="primary") |
|
clear_btn = gr.Button("Clear") |
|
|
|
submit_btn.click(fn=segment_person, inputs=input_image, outputs=output_image) |
|
clear_btn.click(fn=lambda:(None, None), outputs=[input_image, output_image]) |
|
|
|
gr.Markdown("Click on any example to try it.", elem_classes=["example"]) |
|
|
|
gr.Examples( |
|
examples=[ |
|
["examples/googlenet_0.png"], |
|
["examples/gray_face.png"], |
|
["examples/messi5.jpg"] |
|
], |
|
inputs=input_image |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|