carlosriverat's picture
Update app.py
ef88f4b verified
raw
history blame
5.29 kB
import gradio as gr
import cv2
import numpy as np
from skimage.metrics import structural_similarity as ssim
def preprocess_image(image, blur_value):
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Gaussian blur to reduce noise
blurred = cv2.GaussianBlur(gray, (blur_value, blur_value), 0)
return blurred
def background_subtraction(image1, image2):
subtractor = cv2.createBackgroundSubtractorMOG2()
fgmask1 = subtractor.apply(image1)
fgmask2 = subtractor.apply(image2)
diff = cv2.absdiff(fgmask1, fgmask2)
unchanged = cv2.bitwise_and(image1, image1, mask=255 - diff)
return cv2.cvtColor(diff, cv2.COLOR_GRAY2BGR), unchanged
def optical_flow(image1, image2):
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(gray1, gray2, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
hsv = np.zeros_like(image1)
hsv[..., 1] = 255
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
unchanged = cv2.bitwise_and(image1, image1, mask=255 - hsv[..., 2].astype(np.uint8))
return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), unchanged
def feature_matching(image1, image2):
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(image1, None)
kp2, des2 = orb.detectAndCompute(image2, None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
result = cv2.drawMatches(image1, kp1, image2, kp2, matches[:20], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
unchanged = cv2.addWeighted(image1, 0.7, image2, 0.3, 0)
return result, unchanged
def compare_images(image1, image2, blur_value, technique, threshold_value, method):
if method == "Background Subtraction":
return background_subtraction(image1, image2)
elif method == "Optical Flow":
return optical_flow(image1, image2)
elif method == "Feature Matching":
return feature_matching(image1, image2)
gray1 = preprocess_image(image1, blur_value)
gray2 = preprocess_image(image2, blur_value)
score, diff = ssim(gray1, gray2, full=True)
diff = (diff * 255).astype("uint8")
if technique == "Adaptive Threshold":
_, thresh = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY_INV)
elif technique == "Otsu's Threshold":
_, thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
else:
_, thresh = cv2.threshold(diff, threshold_value, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
filtered_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > 500]
mask = np.zeros_like(image1, dtype=np.uint8)
cv2.drawContours(mask, filtered_contours, -1, (255, 255, 255), thickness=cv2.FILLED)
highlighted = cv2.bitwise_and(image2, mask)
diff_colored = np.zeros_like(image1, dtype=np.uint8)
diff_colored[:, :, 0] = 0
diff_colored[:, :, 1] = 0
diff_colored[:, :, 2] = thresh
overlayed = cv2.addWeighted(image1, 0.7, diff_colored, 0.6, 0)
blended_with_object = cv2.addWeighted(image1, 0.4, image2, 0.6, 0)
blended_without_object = cv2.addWeighted(image1, 0.6, image2, 0.4, 0)
return highlighted, overlayed, blended_with_object, blended_without_object
def update_threshold_visibility(technique):
return gr.update(visible=(technique == "Simple Binary"))
with gr.Blocks() as demo:
gr.Markdown("# Object Difference Highlighter\nUpload two images: one without an object and one with an object. The app will highlight only the newly added object and show the real differences in magenta overlayed on the original image.")
with gr.Row():
img1 = gr.Image(type="numpy", label="Image Without Object")
img2 = gr.Image(type="numpy", label="Image With Object")
blur_slider = gr.Slider(minimum=1, maximum=15, step=2, value=5, label="Gaussian Blur")
technique_dropdown = gr.Dropdown(["Adaptive Threshold", "Otsu's Threshold", "Simple Binary"], label="Thresholding Technique", value="Adaptive Threshold", interactive=True)
threshold_slider = gr.Slider(minimum=0, maximum=255, step=1, value=50, label="Threshold Value", visible=False)
method_dropdown = gr.Dropdown(["SSIM", "Background Subtraction", "Optical Flow", "Feature Matching"], label="Comparison Method", value="SSIM", interactive=True)
technique_dropdown.change(update_threshold_visibility, inputs=[technique_dropdown], outputs=[threshold_slider])
with gr.Row():
output1 = gr.Image(type="numpy", label="Highlighted Differences")
output2 = gr.Image(type="numpy", label="Raw Difference Overlay (Magenta)")
with gr.Row():
output3 = gr.Image(type="numpy", label="Blended with Object")
output4 = gr.Image(type="numpy", label="Blended without Object")
btn = gr.Button("Process")
btn.click(compare_images, inputs=[img1, img2, blur_slider, technique_dropdown, threshold_slider, method_dropdown], outputs=[output1, output2, output3, output4])
demo.launch()