Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -10,50 +10,92 @@ from transformers import pipeline
|
|
10 |
# Load the depth estimation pipeline
|
11 |
pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Small-hf")
|
12 |
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
original_image = Image.fromarray(image).convert("RGB")
|
15 |
original_image = original_image.resize((512, 512))
|
16 |
image_np = np.array(original_image)
|
17 |
|
18 |
# Inference
|
19 |
depth = pipe(original_image)["depth"]
|
20 |
-
depth = np.array(depth)
|
21 |
-
depth = cv2.resize(depth, (512, 512), interpolation=cv2.INTER_CUBIC)
|
22 |
-
|
23 |
# Normalize the depth map
|
24 |
normalized_depth_map = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))
|
25 |
|
26 |
# Create masks
|
27 |
foreground_mask = (normalized_depth_map < foreground_threshold).astype(np.uint8) * 255
|
28 |
-
midground_mask = (
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
blurred_image = np.copy(np.array(original_image))
|
32 |
|
33 |
if foreground_blur > 0:
|
34 |
-
blurred_image = np.where(
|
|
|
|
|
|
|
|
|
35 |
if midground_blur > 0:
|
36 |
-
blurred_image = np.where(
|
|
|
|
|
|
|
|
|
37 |
if background_blur > 0:
|
38 |
-
blurred_image = np.where(
|
|
|
|
|
|
|
|
|
39 |
|
40 |
return Image.fromarray(blurred_image.astype(np.uint8))
|
41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
iface = gr.Interface(
|
43 |
fn=apply_depth_aware_blur,
|
44 |
inputs=[
|
45 |
gr.Image(label="Input Image"),
|
46 |
-
gr.Slider(0, 51, step=2, label="Foreground Blur Kernel Size"
|
47 |
-
gr.Slider(0, 51, step=2, label="Midground Blur Kernel Size"
|
48 |
-
gr.Slider(0, 51, step=2, label="Background Blur Kernel Size"
|
49 |
-
gr.Slider(0, 1, label="Foreground Threshold"
|
50 |
-
gr.Slider(0, 1, label="Midground Lower Threshold"
|
51 |
-
gr.Slider(0, 1, label="Midground Upper Threshold"
|
52 |
-
gr.Slider(0, 1, label="Background Threshold",
|
53 |
],
|
54 |
outputs=gr.Image(label="Blurred Image"),
|
55 |
title="Depth-Aware Lens Blur App",
|
56 |
description="Apply depth-based blur to uploaded images using Depth Anything V2. Adjust blur intensity for foreground, midground, and background.",
|
|
|
57 |
)
|
58 |
|
59 |
if __name__ == "__main__":
|
|
|
10 |
# Load the depth estimation pipeline
|
11 |
pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Small-hf")
|
12 |
|
13 |
+
|
14 |
+
def apply_depth_aware_blur(
|
15 |
+
image,
|
16 |
+
foreground_blur,
|
17 |
+
midground_blur,
|
18 |
+
background_blur,
|
19 |
+
foreground_threshold,
|
20 |
+
midground_lower,
|
21 |
+
midground_upper,
|
22 |
+
background_threshold,
|
23 |
+
):
|
24 |
original_image = Image.fromarray(image).convert("RGB")
|
25 |
original_image = original_image.resize((512, 512))
|
26 |
image_np = np.array(original_image)
|
27 |
|
28 |
# Inference
|
29 |
depth = pipe(original_image)["depth"]
|
30 |
+
depth = np.array(depth) # Convert to numpy array
|
31 |
+
depth = cv2.resize(depth, (512, 512), interpolation=cv2.INTER_CUBIC) # Resize depth map
|
32 |
+
|
33 |
# Normalize the depth map
|
34 |
normalized_depth_map = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))
|
35 |
|
36 |
# Create masks
|
37 |
foreground_mask = (normalized_depth_map < foreground_threshold).astype(np.uint8) * 255
|
38 |
+
midground_mask = (
|
39 |
+
(normalized_depth_map >= midground_lower)
|
40 |
+
& (normalized_depth_map < midground_upper)
|
41 |
+
).astype(np.uint8) * 255
|
42 |
+
background_mask = (normalized_depth_map >= background_threshold).astype(
|
43 |
+
np.uint8
|
44 |
+
) * 255
|
45 |
|
46 |
blurred_image = np.copy(np.array(original_image))
|
47 |
|
48 |
if foreground_blur > 0:
|
49 |
+
blurred_image = np.where(
|
50 |
+
(foreground_mask[..., None] == 255),
|
51 |
+
cv2.GaussianBlur(blurred_image, (foreground_blur, foreground_blur), 10),
|
52 |
+
blurred_image,
|
53 |
+
)
|
54 |
if midground_blur > 0:
|
55 |
+
blurred_image = np.where(
|
56 |
+
(midground_mask[..., None] == 255),
|
57 |
+
cv2.GaussianBlur(blurred_image, (midground_blur, midground_blur), 8),
|
58 |
+
blurred_image,
|
59 |
+
)
|
60 |
if background_blur > 0:
|
61 |
+
blurred_image = np.where(
|
62 |
+
(background_mask[..., None] == 255),
|
63 |
+
cv2.GaussianBlur(blurred_image, (background_blur, background_blur), 20),
|
64 |
+
blurred_image,
|
65 |
+
)
|
66 |
|
67 |
return Image.fromarray(blurred_image.astype(np.uint8))
|
68 |
|
69 |
+
|
70 |
+
# Example input values (including defaults)
|
71 |
+
example_image = np.zeros((512, 512, 3), dtype=np.uint8) # Placeholder for an image
|
72 |
+
example_inputs = [
|
73 |
+
example_image,
|
74 |
+
0, # foreground_blur
|
75 |
+
0, # midground_blur
|
76 |
+
35, # background_blur (default)
|
77 |
+
0.2, # foreground_threshold (default)
|
78 |
+
0.2, # midground_lower (default)
|
79 |
+
0.6, # midground_upper (default)
|
80 |
+
0.6, # background_threshold (default)
|
81 |
+
]
|
82 |
+
|
83 |
iface = gr.Interface(
|
84 |
fn=apply_depth_aware_blur,
|
85 |
inputs=[
|
86 |
gr.Image(label="Input Image"),
|
87 |
+
gr.Slider(0, 51, step=2, label="Foreground Blur Kernel Size"),
|
88 |
+
gr.Slider(0, 51, step=2, label="Midground Blur Kernel Size"),
|
89 |
+
gr.Slider(0, 51, step=2, label="Background Blur Kernel Size"),
|
90 |
+
gr.Slider(0, 1, label="Foreground Threshold"),
|
91 |
+
gr.Slider(0, 1, label="Midground Lower Threshold"),
|
92 |
+
gr.Slider(0, 1, label="Midground Upper Threshold"),
|
93 |
+
gr.Slider(0, 1, label="Background Threshold"),
|
94 |
],
|
95 |
outputs=gr.Image(label="Blurred Image"),
|
96 |
title="Depth-Aware Lens Blur App",
|
97 |
description="Apply depth-based blur to uploaded images using Depth Anything V2. Adjust blur intensity for foreground, midground, and background.",
|
98 |
+
examples=[example_inputs], # Provide example inputs
|
99 |
)
|
100 |
|
101 |
if __name__ == "__main__":
|