Spaces:
Sleeping
Sleeping
File size: 3,118 Bytes
0e27905 d6888a9 0e27905 176f783 9e601d1 176f783 9e601d1 24647ae 9e601d1 176f783 d6888a9 176f783 24647ae d6888a9 9e601d1 d6888a9 9e601d1 d6888a9 24647ae 176f783 24647ae d6888a9 176f783 24647ae 176f783 24647ae d6888a9 24647ae d6888a9 0e27905 24647ae 0e27905 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import gradio as gr
import cv2
import numpy as np
from skimage.metrics import structural_similarity as ssim
def preprocess_image(image, blur_value):
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Gaussian blur to reduce noise
blurred = cv2.GaussianBlur(gray, (blur_value, blur_value), 0)
return blurred
def compare_images(image1, image2, blur_value, technique, threshold_value):
# Preprocess images
gray1 = preprocess_image(image1, blur_value)
gray2 = preprocess_image(image2, blur_value)
# Compute SSIM between the two images
score, diff = ssim(gray1, gray2, full=True)
diff = (diff * 255).astype("uint8")
if technique == "Adaptive Threshold":
_, thresh = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY_INV)
elif technique == "Otsu's Threshold":
_, thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
else: # Simple Binary
_, thresh = cv2.threshold(diff, threshold_value, 255, cv2.THRESH_BINARY)
# Find contours of differences
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Filter out small noise using contour area threshold
filtered_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > 500]
# Create a mask to isolate only the significant added object
mask = np.zeros_like(image1)
cv2.drawContours(mask, filtered_contours, -1, (255, 255, 255), thickness=cv2.FILLED)
# Apply the mask to highlight the object added in the second image
highlighted = cv2.bitwise_and(image2, mask)
# Overlay the difference in magenta over the first image
diff_colored = cv2.cvtColor(diff, cv2.COLOR_GRAY2BGR)
diff_colored[:, :, 1:] = 0 # Keep only red channel for magenta effect
overlayed = cv2.addWeighted(image1, 0.7, diff_colored, 0.3, 0)
return highlighted, overlayed
def update_threshold_visibility(technique):
return gr.update(visible=(technique == "Simple Binary"))
demo = gr.Interface(
fn=compare_images,
inputs=[
gr.Image(type="numpy", label="Image Without Object"),
gr.Image(type="numpy", label="Image With Object"),
gr.Slider(minimum=1, maximum=15, step=2, value=5, label="Gaussian Blur"),
gr.Dropdown(["Adaptive Threshold", "Otsu's Threshold", "Simple Binary"], label="Thresholding Technique", value="Adaptive Threshold", interactive=True),
gr.Slider(minimum=0, maximum=255, step=1, value=50, label="Threshold Value", visible=False)
],
outputs=[
gr.Image(type="numpy", label="Highlighted Differences"),
gr.Image(type="numpy", label="Raw Difference Overlay (Magenta)")
],
title="Object Difference Highlighter",
description="Upload two images: one without an object and one with an object. The app will highlight only the newly added object and show the real differences in magenta overlayed on the original image.",
live=True
)
demo.load(update_threshold_visibility, inputs=["Thresholding Technique"], outputs=["Threshold Value"])
demo.launch()
|