File size: 5,292 Bytes
0e27905
d6888a9
 
 
0e27905
176f783
9e601d1
 
 
176f783
9e601d1
 
0b7c7f6
 
 
 
 
df8b5d4
 
0b7c7f6
 
 
 
 
 
 
 
 
 
df8b5d4
 
0b7c7f6
 
 
 
 
 
 
 
 
df8b5d4
 
0b7c7f6
 
 
df8b5d4
0b7c7f6
df8b5d4
0b7c7f6
df8b5d4
0b7c7f6
176f783
 
d6888a9
 
 
176f783
 
 
 
0b7c7f6
24647ae
d6888a9
 
9e601d1
ae2249b
9e601d1
d6888a9
 
ae2249b
0b7c7f6
 
 
 
176f783
ef88f4b
 
 
 
24647ae
 
 
d6888a9
b883a76
 
 
 
 
 
 
ef88f4b
b883a76
 
0b7c7f6
b883a76
 
 
0b7c7f6
 
 
b883a76
df8b5d4
ef88f4b
 
df8b5d4
b883a76
ef88f4b
24647ae
0b7c7f6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import gradio as gr
import cv2
import numpy as np
from skimage.metrics import structural_similarity as ssim

def preprocess_image(image, blur_value):
    # Convert to grayscale
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # Apply Gaussian blur to reduce noise
    blurred = cv2.GaussianBlur(gray, (blur_value, blur_value), 0)
    return blurred

def background_subtraction(image1, image2):
    subtractor = cv2.createBackgroundSubtractorMOG2()
    fgmask1 = subtractor.apply(image1)
    fgmask2 = subtractor.apply(image2)
    diff = cv2.absdiff(fgmask1, fgmask2)
    unchanged = cv2.bitwise_and(image1, image1, mask=255 - diff)
    return cv2.cvtColor(diff, cv2.COLOR_GRAY2BGR), unchanged

def optical_flow(image1, image2):
    gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    flow = cv2.calcOpticalFlowFarneback(gray1, gray2, None, 0.5, 3, 15, 3, 5, 1.2, 0)
    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    hsv = np.zeros_like(image1)
    hsv[..., 1] = 255
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    unchanged = cv2.bitwise_and(image1, image1, mask=255 - hsv[..., 2].astype(np.uint8))
    return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), unchanged

def feature_matching(image1, image2):
    orb = cv2.ORB_create()
    kp1, des1 = orb.detectAndCompute(image1, None)
    kp2, des2 = orb.detectAndCompute(image2, None)
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    matches = bf.match(des1, des2)
    matches = sorted(matches, key=lambda x: x.distance)
    result = cv2.drawMatches(image1, kp1, image2, kp2, matches[:20], None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    unchanged = cv2.addWeighted(image1, 0.7, image2, 0.3, 0)
    return result, unchanged

def compare_images(image1, image2, blur_value, technique, threshold_value, method):
    if method == "Background Subtraction":
        return background_subtraction(image1, image2)
    elif method == "Optical Flow":
        return optical_flow(image1, image2)
    elif method == "Feature Matching":
        return feature_matching(image1, image2)
    
    gray1 = preprocess_image(image1, blur_value)
    gray2 = preprocess_image(image2, blur_value)
    score, diff = ssim(gray1, gray2, full=True)
    diff = (diff * 255).astype("uint8")
    
    if technique == "Adaptive Threshold":
        _, thresh = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY_INV)
    elif technique == "Otsu's Threshold":
        _, thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
    else:
        _, thresh = cv2.threshold(diff, threshold_value, 255, cv2.THRESH_BINARY)
    
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    filtered_contours = [cnt for cnt in contours if cv2.contourArea(cnt) > 500]
    mask = np.zeros_like(image1, dtype=np.uint8)
    cv2.drawContours(mask, filtered_contours, -1, (255, 255, 255), thickness=cv2.FILLED)
    highlighted = cv2.bitwise_and(image2, mask)
    
    diff_colored = np.zeros_like(image1, dtype=np.uint8)
    diff_colored[:, :, 0] = 0
    diff_colored[:, :, 1] = 0
    diff_colored[:, :, 2] = thresh
    overlayed = cv2.addWeighted(image1, 0.7, diff_colored, 0.6, 0)
    
    blended_with_object = cv2.addWeighted(image1, 0.4, image2, 0.6, 0)
    blended_without_object = cv2.addWeighted(image1, 0.6, image2, 0.4, 0)
    
    return highlighted, overlayed, blended_with_object, blended_without_object

def update_threshold_visibility(technique):
    return gr.update(visible=(technique == "Simple Binary"))

with gr.Blocks() as demo:
    gr.Markdown("# Object Difference Highlighter\nUpload two images: one without an object and one with an object. The app will highlight only the newly added object and show the real differences in magenta overlayed on the original image.")
    
    with gr.Row():
        img1 = gr.Image(type="numpy", label="Image Without Object")
        img2 = gr.Image(type="numpy", label="Image With Object")
    
    blur_slider = gr.Slider(minimum=1, maximum=15, step=2, value=5, label="Gaussian Blur")
    technique_dropdown = gr.Dropdown(["Adaptive Threshold", "Otsu's Threshold", "Simple Binary"], label="Thresholding Technique", value="Adaptive Threshold", interactive=True)
    threshold_slider = gr.Slider(minimum=0, maximum=255, step=1, value=50, label="Threshold Value", visible=False)
    method_dropdown = gr.Dropdown(["SSIM", "Background Subtraction", "Optical Flow", "Feature Matching"], label="Comparison Method", value="SSIM", interactive=True)
    
    technique_dropdown.change(update_threshold_visibility, inputs=[technique_dropdown], outputs=[threshold_slider])
    
    with gr.Row():
        output1 = gr.Image(type="numpy", label="Highlighted Differences")
        output2 = gr.Image(type="numpy", label="Raw Difference Overlay (Magenta)")
    
    with gr.Row():
        output3 = gr.Image(type="numpy", label="Blended with Object")
        output4 = gr.Image(type="numpy", label="Blended without Object")
    
    btn = gr.Button("Process")
    btn.click(compare_images, inputs=[img1, img2, blur_slider, technique_dropdown, threshold_slider, method_dropdown], outputs=[output1, output2, output3, output4])

demo.launch()