File size: 3,850 Bytes
71c450e
 
 
 
 
3ccf93b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d2d4164
ed8c2e4
3ccf93b
 
 
 
 
 
 
 
 
 
 
 
be42662
ed8c2e4
be42662
ed8c2e4
 
 
 
 
 
 
be42662
ed8c2e4
be42662
ed8c2e4
be42662
3ccf93b
 
ed8c2e4
 
be42662
3ccf93b
ed8c2e4
be42662
ed8c2e4
be42662
ed8c2e4
 
 
 
 
be42662
ed8c2e4
 
be42662
3ccf93b
 
 
 
 
ed8c2e4
be42662
ed8c2e4
 
 
 
 
 
be42662
ed8c2e4
 
 
be42662
ed8c2e4
 
 
 
3ccf93b
71c450e
 
 
ed8c2e4
71c450e
 
ed8c2e4
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import re
import gradio as gr
from PIL import Image

def create_detection_tab(predict_fn, example_images):
    with gr.TabItem("Breed Detection"):
        gr.HTML("""
            <div style='
                text-align: center;
                padding: 20px 0;
                margin: 15px 0;
                background: linear-gradient(to right, rgba(66, 153, 225, 0.1), rgba(72, 187, 120, 0.1));
                border-radius: 10px;
            '>
                <p style='
                    font-size: 1.2em;
                    margin: 0;
                    padding: 0 20px;
                    line-height: 1.5;
                    background: linear-gradient(90deg, #4299e1, #48bb78);
                    -webkit-background-clip: text;
                    -webkit-text-fill-color: transparent;
                    font-weight: 600;
                '>
                    Upload a picture of a dog or take a photo, and the model will predict its breed and provide detailed information!
                </p>
                <p style='
                    font-size: 0.9em;
                    color: #666;
                    margin-top: 8px;
                    padding: 0 20px;
                '>
                    Note: The model's predictions may not always be 100% accurate, and it is recommended to use the results as a reference.
                </p>
            </div>
        """)
        
        # 將輸入方法放在標籤頁中
        with gr.Tabs():
            # 標籤頁 1: 上傳圖片 (保留原有功能)
            with gr.TabItem("Upload Image"):
                input_image = gr.Image(label="Upload a dog image", type="pil")
                gr.Examples(
                    examples=example_images,
                    inputs=input_image
                )
            
            # 標籤頁 2: 拍照功能 (使用 gr.Webcam 而非 gr.Image(source="webcam"))
            with gr.TabItem("Take Photo"):
                camera_input = gr.Webcam(label="Take a photo of a dog")
        
        # 輸出區域
        with gr.Row():
            output_image = gr.Image(label="Annotated Image")
            output = gr.HTML(label="Prediction Results")
        
        # 使用 State 保存預測結果
        initial_state = gr.State()
        
        # 輔助函數,在函數內部定義避免循環導入
        def detect_from_inputs(upload_image, camera_image):
            # 使用最後修改的圖片(優先相機拍攝的圖片)
            image_to_use = camera_image if camera_image is not None else upload_image
            
            if image_to_use is None:
                return "Please upload an image or take a photo first.", None, None
            
            # 使用作為參數傳入的 predict_fn
            return predict_fn(image_to_use)
        
        # 修改輸入圖片事件處理
        input_image.change(
            predict_fn,
            inputs=input_image,
            outputs=[output, output_image, initial_state]
        )
        
        # 添加相機拍攝事件處理 (針對 gr.Webcam)
        camera_input.change(
            predict_fn,
            inputs=camera_input,
            outputs=[output, output_image, initial_state]
        )
        
        # 添加按鈕以便使用者可以主動觸發分析
        with gr.Row():
            detect_btn = gr.Button("Detect Breed", variant="primary")
        
        # 為按鈕設置事件處理
        detect_btn.click(
            detect_from_inputs,
            inputs=[input_image, camera_input],
            outputs=[output, output_image, initial_state]
        )

    return {
        'input_image': input_image,
        'camera_input': camera_input,
        'output_image': output_image,
        'output': output,
        'initial_state': initial_state,
        'detect_btn': detect_btn
    }