Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,81 +5,119 @@ import numpy as np
|
|
5 |
# Load Haar Cascade for face detection
|
6 |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
7 |
|
8 |
-
def
|
9 |
-
# Convert Gradio image (PIL) to OpenCV format (BGR)
|
10 |
image = np.array(image)
|
11 |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
blur_kernel += 1 # Kernel size must be odd
|
29 |
-
blurred = cv2.GaussianBlur(image, (blur_kernel, blur_kernel), 0)
|
30 |
-
outputs["Blurred Image"] = blurred
|
31 |
-
|
32 |
-
elif operation == "Face Detection":
|
33 |
-
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
34 |
-
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
35 |
-
output_image = image.copy()
|
36 |
-
for (x, y, w, h) in faces:
|
37 |
-
cv2.rectangle(output_image, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
38 |
-
outputs["Faces Detected"] = output_image
|
39 |
-
|
40 |
-
# Convert back to RGB for Gradio display
|
41 |
-
for key in outputs:
|
42 |
-
outputs[key] = cv2.cvtColor(outputs[key], cv2.COLOR_BGR2RGB)
|
43 |
-
|
44 |
-
return outputs
|
45 |
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
-
|
52 |
-
with gr.Column():
|
53 |
-
image_input = gr.Image(label="Upload Image", type="pil")
|
54 |
-
operation = gr.Dropdown(
|
55 |
-
choices=["Grayscale", "Canny Edge Detection", "Gaussian Blur", "Face Detection"],
|
56 |
-
label="Select Operation",
|
57 |
-
value="Grayscale"
|
58 |
-
)
|
59 |
-
canny_threshold1 = gr.Slider(0, 500, value=100, step=10, label="Canny Threshold 1", visible=False)
|
60 |
-
canny_threshold2 = gr.Slider(0, 500, value=200, step=10, label="Canny Threshold 2", visible=False)
|
61 |
-
blur_kernel = gr.Slider(3, 21, value=5, step=2, label="Blur Kernel Size", visible=False)
|
62 |
-
|
63 |
-
# Show/hide sliders based on operation
|
64 |
-
def update_sliders(op):
|
65 |
-
if op == "Canny Edge Detection":
|
66 |
-
return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)
|
67 |
-
elif op == "Gaussian Blur":
|
68 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
|
69 |
-
else:
|
70 |
-
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
71 |
-
|
72 |
-
operation.change(update_sliders, inputs=operation, outputs=[canny_threshold1, canny_threshold2, blur_kernel])
|
73 |
-
|
74 |
-
with gr.Column():
|
75 |
-
output = gr.Gallery(label="Processed Image")
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
if __name__ == "__main__":
|
85 |
demo.launch()
|
|
|
5 |
# Load Haar Cascade for face detection
|
6 |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
|
7 |
|
8 |
+
def process_grayscale(image):
|
|
|
9 |
image = np.array(image)
|
10 |
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
11 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
12 |
+
return cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
|
13 |
+
|
14 |
+
def process_canny(image, threshold1, threshold2):
|
15 |
+
image = np.array(image)
|
16 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
17 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
18 |
+
edges = cv2.Canny(gray, threshold1, threshold2)
|
19 |
+
return cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB)
|
20 |
+
|
21 |
+
def process_blur(image, kernel_size):
|
22 |
+
image = np.array(image)
|
23 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
24 |
+
kernel_size = int(kernel_size) | 1 # Ensure odd number
|
25 |
+
blurred = cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
|
26 |
+
return cv2.cvtColor(blurred, cv2.COLOR_BGR2RGB)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
def process_face_detection(image):
|
29 |
+
image = np.array(image)
|
30 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
31 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
32 |
+
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30))
|
33 |
+
output = image.copy()
|
34 |
+
for (x, y, w, h) in faces:
|
35 |
+
cv2.rectangle(output, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
36 |
+
return cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
|
37 |
+
|
38 |
+
def process_color_space(image, color_space):
|
39 |
+
image = np.array(image)
|
40 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
41 |
+
if color_space == "HSV":
|
42 |
+
output = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
43 |
+
elif color_space == "LAB":
|
44 |
+
output = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
|
45 |
+
else:
|
46 |
+
output = image # Fallback to original
|
47 |
+
return cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
|
48 |
+
|
49 |
+
# Custom CSS with Tailwind via CDN
|
50 |
+
custom_css = """
|
51 |
+
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/tailwind.min.css" rel="stylesheet">
|
52 |
+
<style>
|
53 |
+
body { @apply bg-gray-100 font-sans; }
|
54 |
+
.gradio-container { @apply max-w-7xl mx-auto p-4; }
|
55 |
+
.tab-button { @apply px-4 py-2 text-sm font-medium text-gray-700 bg-white rounded-t-lg border-b-2 border-transparent hover:border-blue-500 focus:outline-none focus:border-blue-500; }
|
56 |
+
.tab-button-active { @apply border-blue-500 text-blue-600; }
|
57 |
+
.tab-content { @apply bg-white p-6 rounded-b-lg shadow-lg; }
|
58 |
+
.gallery img { @apply rounded-lg shadow-md; }
|
59 |
+
.btn-primary { @apply bg-blue-500 text-white px-4 py-2 rounded-lg hover:bg-blue-600 transition; }
|
60 |
+
h1 { @apply text-3xl font-bold text-gray-800 mb-4; }
|
61 |
+
.input-label { @apply text-sm font-medium text-gray-600 mb-2; }
|
62 |
+
</style>
|
63 |
+
"""
|
64 |
+
|
65 |
+
# Gradio interface
|
66 |
+
with gr.Blocks(css=custom_css) as demo:
|
67 |
+
gr.HTML("<h1 class='text-center'>OpenCV Multi-Feature Demo</h1>")
|
68 |
+
gr.Markdown("Upload an image and explore various OpenCV features using the tabs below.", _js="document.querySelector('.markdown').classList.add('text-center', 'text-gray-600', 'mb-4')")
|
69 |
|
70 |
+
image_input = gr.Image(label="Upload Image", type="pil")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
with gr.Tabs():
|
73 |
+
with gr.TabItem("Grayscale", elem_classes="tab-button"):
|
74 |
+
with gr.Row():
|
75 |
+
with gr.Column():
|
76 |
+
gr.Markdown("Convert the image to grayscale.", _js="document.querySelector('.markdown').classList.add('input-label')")
|
77 |
+
grayscale_button = gr.Button("Apply Grayscale", elem_classes="btn-primary")
|
78 |
+
with gr.Column():
|
79 |
+
grayscale_output = gr.Image(label="Grayscale Result")
|
80 |
+
grayscale_button.click(fn=process_grayscale, inputs=image_input, outputs=grayscale_output)
|
81 |
+
|
82 |
+
with gr.TabItem("Canny Edge Detection", elem_classes="tab-button"):
|
83 |
+
with gr.Row():
|
84 |
+
with gr.Column():
|
85 |
+
gr.Markdown("Detect edges with adjustable thresholds.", _js="document.querySelector('.markdown').classList.add('input-label')")
|
86 |
+
canny_t1 = gr.Slider(0, 500, value=100, step=10, label="Threshold 1")
|
87 |
+
canny_t2 = gr.Slider(0, 500, value=200, step=10, label="Threshold 2")
|
88 |
+
canny_button = gr.Button("Apply Canny", elem_classes="btn-primary")
|
89 |
+
with gr.Column():
|
90 |
+
canny_output = gr.Image(label="Edges")
|
91 |
+
canny_button.click(fn=process_canny, inputs=[image_input, canny_t1, canny_t2], outputs=canny_output)
|
92 |
+
|
93 |
+
with gr.TabItem("Gaussian Blur", elem_classes="tab-button"):
|
94 |
+
with gr.Row():
|
95 |
+
with gr.Column():
|
96 |
+
gr.Markdown("Apply Gaussian blur with adjustable kernel size.", _js="document.querySelector('.markdown').classList.add('input-label')")
|
97 |
+
blur_kernel = gr.Slider(3, 21, value=5, step=2, label="Kernel Size")
|
98 |
+
blur_button = gr.Button("Apply Blur", elem_classes="btn-primary")
|
99 |
+
with gr.Column():
|
100 |
+
blur_output = gr.Image(label="Blurred Image")
|
101 |
+
blur_button.click(fn=process_blur, inputs=[image_input, blur_kernel], outputs=blur_output)
|
102 |
+
|
103 |
+
with gr.TabItem("Face Detection", elem_classes="tab-button"):
|
104 |
+
with gr.Row():
|
105 |
+
with gr.Column():
|
106 |
+
gr.Markdown("Detect faces using Haar Cascade.", _js="document.querySelector('.markdown').classList.add('input-label')")
|
107 |
+
face_button = gr.Button("Detect Faces", elem_classes="btn-primary")
|
108 |
+
with gr.Column():
|
109 |
+
face_output = gr.Image(label="Faces Detected")
|
110 |
+
face_button.click(fn=process_face_detection, inputs=image_input, outputs=face_output)
|
111 |
+
|
112 |
+
with gr.TabItem("Color Space Conversion", elem_classes="tab-button"):
|
113 |
+
with gr.Row():
|
114 |
+
with gr.Column():
|
115 |
+
gr.Markdown("Convert between RGB, HSV, and LAB color spaces.", _js="document.querySelector('.markdown').classList.add('input-label')")
|
116 |
+
color_space = gr.Dropdown(choices=["RGB", "HSV", "LAB"], label="Color Space", value="RGB")
|
117 |
+
color_button = gr.Button("Apply Conversion", elem_classes="btn-primary")
|
118 |
+
with gr.Column():
|
119 |
+
color_output = gr.Image(label="Converted Image")
|
120 |
+
color_button.click(fn=process_color_space, inputs=[image_input, color_space], outputs=color_output)
|
121 |
|
122 |
if __name__ == "__main__":
|
123 |
demo.launch()
|