Update app.py
Browse files
app.py
CHANGED
@@ -14,13 +14,12 @@ depth_model.eval()
|
|
14 |
|
15 |
def estimate_depth(image):
|
16 |
"""Estimate depth map from image."""
|
17 |
-
image = image.convert("RGB")
|
18 |
-
image = image.resize((384, 384)) # Resize for model input
|
19 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
20 |
with torch.no_grad():
|
21 |
outputs = depth_model(**inputs)
|
22 |
depth = outputs.predicted_depth.squeeze().cpu().numpy()
|
23 |
-
depth = cv2.resize(depth, (image.width, image.height)) # Resize back
|
24 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
|
25 |
return depth.astype(np.uint8)
|
26 |
|
@@ -30,21 +29,21 @@ def warp_design(cloth_img, design_img):
|
|
30 |
design_img = design_img.convert("RGB")
|
31 |
cloth_np = np.array(cloth_img)
|
32 |
design_np = np.array(design_img)
|
33 |
-
|
34 |
-
# Ensure design fits within the center of the clothing
|
35 |
h, w, _ = cloth_np.shape
|
36 |
dh, dw, _ = design_np.shape
|
37 |
-
|
|
|
|
|
38 |
new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
|
39 |
design_np = cv2.resize(design_np, (new_w, new_h))
|
40 |
|
41 |
-
# Create
|
42 |
-
design_canvas = np.zeros_like(cloth_np)
|
43 |
x_offset = (w - new_w) // 2
|
44 |
y_offset = (h - new_h) // 2
|
45 |
design_canvas[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = design_np
|
46 |
|
47 |
-
# Estimate depth
|
48 |
depth_map = estimate_depth(cloth_img)
|
49 |
depth_map = cv2.resize(depth_map, (w, h))
|
50 |
|
@@ -52,18 +51,19 @@ def warp_design(cloth_img, design_img):
|
|
52 |
displacement_x = cv2.Sobel(depth_map, cv2.CV_32F, 1, 0, ksize=5)
|
53 |
displacement_y = cv2.Sobel(depth_map, cv2.CV_32F, 0, 1, ksize=5)
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
displacement_y = cv2.normalize(displacement_y, None, -5, 5, cv2.NORM_MINMAX)
|
58 |
|
59 |
-
# Warp design using displacement map
|
60 |
map_x, map_y = np.meshgrid(np.arange(w), np.arange(h))
|
61 |
map_x = np.clip(np.float32(map_x + displacement_x), 0, w - 1)
|
62 |
map_y = np.clip(np.float32(map_y + displacement_y), 0, h - 1)
|
63 |
warped_design = cv2.remap(design_canvas, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
|
64 |
|
65 |
# Blend images without excessive transparency
|
66 |
-
|
|
|
|
|
|
|
67 |
return Image.fromarray(blended)
|
68 |
|
69 |
def main(cloth, design):
|
@@ -78,4 +78,4 @@ iface = gr.Interface(
|
|
78 |
)
|
79 |
|
80 |
if __name__ == "__main__":
|
81 |
-
iface.launch(share=True)
|
|
|
14 |
|
15 |
def estimate_depth(image):
|
16 |
"""Estimate depth map from image."""
|
17 |
+
image = image.convert("RGB").resize((384, 384)) # Resize for model input
|
|
|
18 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
19 |
with torch.no_grad():
|
20 |
outputs = depth_model(**inputs)
|
21 |
depth = outputs.predicted_depth.squeeze().cpu().numpy()
|
22 |
+
depth = cv2.resize(depth, (image.width, image.height)) # Resize back
|
23 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
|
24 |
return depth.astype(np.uint8)
|
25 |
|
|
|
29 |
design_img = design_img.convert("RGB")
|
30 |
cloth_np = np.array(cloth_img)
|
31 |
design_np = np.array(design_img)
|
|
|
|
|
32 |
h, w, _ = cloth_np.shape
|
33 |
dh, dw, _ = design_np.shape
|
34 |
+
|
35 |
+
# Resize design to fit within 70% of the clothing area
|
36 |
+
scale_factor = min(w / dw, h / dh) * 0.7
|
37 |
new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
|
38 |
design_np = cv2.resize(design_np, (new_w, new_h))
|
39 |
|
40 |
+
# Create blank canvas with transparent background
|
41 |
+
design_canvas = np.zeros_like(cloth_np, dtype=np.uint8)
|
42 |
x_offset = (w - new_w) // 2
|
43 |
y_offset = (h - new_h) // 2
|
44 |
design_canvas[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = design_np
|
45 |
|
46 |
+
# Estimate depth map
|
47 |
depth_map = estimate_depth(cloth_img)
|
48 |
depth_map = cv2.resize(depth_map, (w, h))
|
49 |
|
|
|
51 |
displacement_x = cv2.Sobel(depth_map, cv2.CV_32F, 1, 0, ksize=5)
|
52 |
displacement_y = cv2.Sobel(depth_map, cv2.CV_32F, 0, 1, ksize=5)
|
53 |
|
54 |
+
displacement_x = cv2.normalize(displacement_x, None, -3, 3, cv2.NORM_MINMAX)
|
55 |
+
displacement_y = cv2.normalize(displacement_y, None, -3, 3, cv2.NORM_MINMAX)
|
|
|
56 |
|
|
|
57 |
map_x, map_y = np.meshgrid(np.arange(w), np.arange(h))
|
58 |
map_x = np.clip(np.float32(map_x + displacement_x), 0, w - 1)
|
59 |
map_y = np.clip(np.float32(map_y + displacement_y), 0, h - 1)
|
60 |
warped_design = cv2.remap(design_canvas, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
|
61 |
|
62 |
# Blend images without excessive transparency
|
63 |
+
mask = np.any(warped_design > 0, axis=-1).astype(np.uint8) * 255
|
64 |
+
blended = cloth_np.copy()
|
65 |
+
np.copyto(blended, warped_design, where=(mask[..., None] > 0))
|
66 |
+
|
67 |
return Image.fromarray(blended)
|
68 |
|
69 |
def main(cloth, design):
|
|
|
78 |
)
|
79 |
|
80 |
if __name__ == "__main__":
|
81 |
+
iface.launch(share=True)
|