File size: 2,974 Bytes
8a52c41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38043fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a52c41
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import cv2 as cv
import numpy as np
import gradio as gr
from huggingface_hub import hf_hub_download
from pphumanseg import PPHumanSeg

# Download ONNX model from Hugging Face
model_path = hf_hub_download(repo_id="opencv/human_segmentation_pphumanseg", filename="human_segmentation_pphumanseg_2023mar.onnx")

# Initialize PPHumanSeg model
model = PPHumanSeg(
    modelPath=model_path,
    backendId=cv.dnn.DNN_BACKEND_OPENCV,
    targetId=cv.dnn.DNN_TARGET_CPU
)

def get_color_map_list(num_classes):
    num_classes += 1
    color_map = num_classes * [0, 0, 0]
    for i in range(num_classes):
        j = 0
        lab = i
        while lab:
            color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
            color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
            color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
            j += 1
            lab >>= 3
    return color_map[3:]

def visualize(image, result, weight=0.6):
    color_map = get_color_map_list(256)
    color_map = np.array(color_map).reshape(256, 3).astype(np.uint8)

    c1 = cv.LUT(result, color_map[:, 0])
    c2 = cv.LUT(result, color_map[:, 1])
    c3 = cv.LUT(result, color_map[:, 2])
    pseudo_img = np.dstack((c1, c2, c3))

    vis_result = cv.addWeighted(image, weight, pseudo_img, 1 - weight, 0)
    return vis_result

def segment_person(input_image):
    image = cv.cvtColor(input_image, cv.COLOR_RGB2BGR)
    h, w, _ = image.shape
    resized = cv.resize(image, (192, 192))
    resized = cv.cvtColor(resized, cv.COLOR_BGR2RGB)

    result = model.infer(resized)
    result = cv.resize(result[0, :, :], dsize=(w, h), interpolation=cv.INTER_NEAREST)

    output = visualize(image, result)
    output = cv.cvtColor(output, cv.COLOR_BGR2RGB)
    return output

# Gradio Interface
with gr.Blocks(css='''.example * {
    font-style: italic;
    font-size: 18px !important;
    color: #0ea5e9 !important;
    }''') as demo:

    gr.Markdown("### Human Segmentation PPHumanSeg (OpenCV DNN)")
    gr.Markdown("Upload an image to segment human regions using OpenCV's ONNX-based PPHumanSeg model.")

    with gr.Row():
        input_image = gr.Image(type="numpy", label="Upload Image")
        output_image = gr.Image(type="numpy", label="Human Segmentation Output")

    # Clear output when new image is uploaded
    input_image.change(fn=lambda: (None), outputs=output_image)

    with gr.Row():
        submit_btn = gr.Button("Submit", variant="primary")
        clear_btn = gr.Button("Clear")

    submit_btn.click(fn=segment_person, inputs=input_image, outputs=output_image)
    clear_btn.click(fn=lambda:(None, None), outputs=[input_image, output_image])

    gr.Markdown("Click on any example to try it.", elem_classes=["example"])

    gr.Examples(
        examples=[
            ["examples/googlenet_0.png"],
            ["examples/gray_face.png"],
            ["examples/messi5.jpg"]
        ],
        inputs=input_image
    )

if __name__ == "__main__":
    demo.launch()