Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import cv2 as cv
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
@@ -6,27 +7,60 @@ from huggingface_hub import hf_hub_download
|
|
6 |
|
7 |
# Download ONNX model from Hugging Face
|
8 |
model_path = hf_hub_download(repo_id="opencv/image_classification_mobilenet", filename="image_classification_mobilenetv1_2022apr.onnx")
|
9 |
-
top_k =
|
10 |
backend_id = cv.dnn.DNN_BACKEND_OPENCV
|
11 |
target_id = cv.dnn.DNN_TARGET_CPU
|
12 |
|
13 |
# Load MobileNet model
|
14 |
model = MobileNet(modelPath=model_path, topK=top_k, backendId=backend_id, targetId=target_id)
|
15 |
|
16 |
-
def
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
image = image[16:240, 16:240, :]
|
19 |
|
|
|
|
|
20 |
result = model.infer(image)
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
def clear_output_on_change(img):
|
26 |
-
return gr.update(value="")
|
27 |
|
28 |
def clear_all():
|
29 |
-
return None, None
|
30 |
|
31 |
with gr.Blocks(css='''.example * {
|
32 |
font-style: italic;
|
@@ -34,22 +68,49 @@ with gr.Blocks(css='''.example * {
|
|
34 |
color: #0ea5e9 !important;
|
35 |
}''') as demo:
|
36 |
|
37 |
-
gr.Markdown("### Image Classification with MobileNet
|
38 |
-
gr.Markdown("Upload an image to
|
39 |
|
40 |
with gr.Row():
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
# Clear
|
45 |
-
image_input.change(fn=clear_output_on_change, inputs=image_input, outputs=output_box)
|
46 |
|
47 |
with gr.Row():
|
48 |
submit_btn = gr.Button("Submit", variant="primary")
|
49 |
clear_btn = gr.Button("Clear")
|
50 |
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
gr.Markdown("Click on any example to try it.", elem_classes=["example"])
|
55 |
|
|
|
1 |
+
|
2 |
import cv2 as cv
|
3 |
import numpy as np
|
4 |
import gradio as gr
|
|
|
7 |
|
8 |
# Download ONNX model from Hugging Face
|
9 |
model_path = hf_hub_download(repo_id="opencv/image_classification_mobilenet", filename="image_classification_mobilenetv1_2022apr.onnx")
|
10 |
+
top_k = 10 # Increased to support variable top_n
|
11 |
backend_id = cv.dnn.DNN_BACKEND_OPENCV
|
12 |
target_id = cv.dnn.DNN_TARGET_CPU
|
13 |
|
14 |
# Load MobileNet model
|
15 |
model = MobileNet(modelPath=model_path, topK=top_k, backendId=backend_id, targetId=target_id)
|
16 |
|
17 |
+
def add_hsv_noise(image, hue_noise=0, saturation_noise=0, value_noise=0):
|
18 |
+
"""Add HSV noise to an image"""
|
19 |
+
if image is None:
|
20 |
+
return None
|
21 |
+
|
22 |
+
# Convert BGR to HSV (OpenCV uses BGR by default)
|
23 |
+
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV).astype(np.float32)
|
24 |
+
|
25 |
+
# Add noise to each channel
|
26 |
+
hsv[:, :, 0] = np.clip(hsv[:, :, 0] + hue_noise, 0, 179) # Hue: 0-179
|
27 |
+
hsv[:, :, 1] = np.clip(hsv[:, :, 1] + saturation_noise, 0, 255) # Saturation: 0-255
|
28 |
+
hsv[:, :, 2] = np.clip(hsv[:, :, 2] + value_noise, 0, 255) # Value: 0-255
|
29 |
+
|
30 |
+
# Convert back to BGR
|
31 |
+
bgr = cv.cvtColor(hsv.astype(np.uint8), cv.COLOR_HSV2BGR)
|
32 |
+
|
33 |
+
return bgr
|
34 |
+
|
35 |
+
def classify_image_with_noise(input_image, top_n, hue_noise, saturation_noise, value_noise):
|
36 |
+
"""Classify image with HSV noise applied"""
|
37 |
+
if input_image is None:
|
38 |
+
return None, "Please upload an image first."
|
39 |
+
|
40 |
+
# Apply HSV noise
|
41 |
+
noisy_image = add_hsv_noise(input_image, hue_noise, saturation_noise, value_noise)
|
42 |
+
|
43 |
+
# Resize and crop as in original code
|
44 |
+
image = cv.resize(noisy_image, (256, 256))
|
45 |
image = image[16:240, 16:240, :]
|
46 |
|
47 |
+
# Update model's topK for this inference
|
48 |
+
model.topK = top_n
|
49 |
result = model.infer(image)
|
50 |
|
51 |
+
# Format results with probabilities if available
|
52 |
+
result_str = "\n".join(f"{i+1}. {label}" for i, label in enumerate(result[:top_n]))
|
53 |
+
|
54 |
+
# Convert BGR to RGB for display in Gradio
|
55 |
+
display_image = cv.cvtColor(noisy_image, cv.COLOR_BGR2RGB)
|
56 |
+
|
57 |
+
return display_image, result_str
|
58 |
|
59 |
def clear_output_on_change(img):
|
60 |
+
return gr.update(value=""), None
|
61 |
|
62 |
def clear_all():
|
63 |
+
return None, None, ""
|
64 |
|
65 |
with gr.Blocks(css='''.example * {
|
66 |
font-style: italic;
|
|
|
68 |
color: #0ea5e9 !important;
|
69 |
}''') as demo:
|
70 |
|
71 |
+
gr.Markdown("### Image Classification with MobileNet + HSV Noise Analysis")
|
72 |
+
gr.Markdown("Upload an image and adjust HSV noise sliders to see how it affects MobileNet predictions in real-time.")
|
73 |
|
74 |
with gr.Row():
|
75 |
+
with gr.Column():
|
76 |
+
# Input controls
|
77 |
+
image_input = gr.Image(type="numpy", label="Upload Image")
|
78 |
+
|
79 |
+
gr.Markdown("### Classification Settings")
|
80 |
+
top_n = gr.Slider(minimum=1, maximum=10, value=5, step=1, label="Top N Classes")
|
81 |
+
|
82 |
+
gr.Markdown("### HSV Noise Controls")
|
83 |
+
hue_noise = gr.Slider(minimum=-50, maximum=50, value=0, step=1, label="Hue Noise (-50 to 50)")
|
84 |
+
saturation_noise = gr.Slider(minimum=-100, maximum=100, value=0, step=5, label="Saturation Noise (-100 to 100)")
|
85 |
+
value_noise = gr.Slider(minimum=-100, maximum=100, value=0, step=5, label="Value/Brightness Noise (-100 to 100)")
|
86 |
+
|
87 |
+
with gr.Column():
|
88 |
+
# Output displays
|
89 |
+
noisy_image_output = gr.Image(label="Image with Noise Applied")
|
90 |
+
output_box = gr.Textbox(label="Top Predictions", lines=10, max_lines=15)
|
91 |
|
92 |
+
# Clear outputs when new image is uploaded
|
93 |
+
image_input.change(fn=clear_output_on_change, inputs=image_input, outputs=[output_box, noisy_image_output])
|
94 |
|
95 |
with gr.Row():
|
96 |
submit_btn = gr.Button("Submit", variant="primary")
|
97 |
clear_btn = gr.Button("Clear")
|
98 |
|
99 |
+
# Set up real-time updates for sliders
|
100 |
+
inputs = [image_input, top_n, hue_noise, saturation_noise, value_noise]
|
101 |
+
outputs = [noisy_image_output, output_box]
|
102 |
+
|
103 |
+
# Update predictions when sliders change (real-time)
|
104 |
+
for slider in [top_n, hue_noise, saturation_noise, value_noise]:
|
105 |
+
slider.change(
|
106 |
+
fn=classify_image_with_noise,
|
107 |
+
inputs=inputs,
|
108 |
+
outputs=outputs
|
109 |
+
)
|
110 |
+
|
111 |
+
# Manual submit button
|
112 |
+
submit_btn.click(fn=classify_image_with_noise, inputs=inputs, outputs=outputs)
|
113 |
+
clear_btn.click(fn=clear_all, outputs=[image_input, noisy_image_output, output_box])
|
114 |
|
115 |
gr.Markdown("Click on any example to try it.", elem_classes=["example"])
|
116 |
|