Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,6 @@ import numpy as np
|
|
5 |
from torchvision import transforms
|
6 |
from PIL import Image
|
7 |
from transformers import DPTForDepthEstimation, DPTFeatureExtractor
|
8 |
-
import torchvision.transforms.functional as F
|
9 |
|
10 |
# Load depth estimation model
|
11 |
model_name = "Intel/dpt-large"
|
@@ -15,39 +14,53 @@ depth_model.eval()
|
|
15 |
|
16 |
def estimate_depth(image):
|
17 |
"""Estimate depth map from image."""
|
18 |
-
image = image.convert("RGB")
|
|
|
19 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
20 |
with torch.no_grad():
|
21 |
outputs = depth_model(**inputs)
|
22 |
depth = outputs.predicted_depth.squeeze().cpu().numpy()
|
23 |
-
depth = cv2.resize(depth, (image.width, image.height)) # Resize back
|
24 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
|
25 |
return depth.astype(np.uint8)
|
26 |
|
27 |
-
def
|
28 |
-
"""
|
29 |
cloth_img = cloth_img.convert("RGB")
|
30 |
design_img = design_img.convert("RGB")
|
31 |
cloth_np = np.array(cloth_img)
|
32 |
design_np = np.array(design_img)
|
|
|
|
|
33 |
h, w, _ = cloth_np.shape
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
depth_map = estimate_depth(cloth_img)
|
37 |
depth_map = cv2.resize(depth_map, (w, h))
|
38 |
|
39 |
-
#
|
40 |
-
|
41 |
-
|
42 |
-
warped_design = cv2.remap(design_np, flow_map, None, cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
|
43 |
-
|
44 |
-
# Blending
|
45 |
-
blended = cv2.addWeighted(cloth_np, 0.7, warped_design, 0.3, 0)
|
46 |
|
47 |
return Image.fromarray(blended)
|
48 |
|
49 |
def main(cloth, design):
|
50 |
-
return
|
51 |
|
52 |
iface = gr.Interface(
|
53 |
fn=main,
|
|
|
5 |
from torchvision import transforms
|
6 |
from PIL import Image
|
7 |
from transformers import DPTForDepthEstimation, DPTFeatureExtractor
|
|
|
8 |
|
9 |
# Load depth estimation model
|
10 |
model_name = "Intel/dpt-large"
|
|
|
14 |
|
15 |
def estimate_depth(image):
|
16 |
"""Estimate depth map from image."""
|
17 |
+
image = image.convert("RGB")
|
18 |
+
image = image.resize((384, 384)) # Resize for model input
|
19 |
inputs = feature_extractor(images=image, return_tensors="pt")
|
20 |
with torch.no_grad():
|
21 |
outputs = depth_model(**inputs)
|
22 |
depth = outputs.predicted_depth.squeeze().cpu().numpy()
|
23 |
+
depth = cv2.resize(depth, (image.width, image.height)) # Resize back to original
|
24 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
|
25 |
return depth.astype(np.uint8)
|
26 |
|
27 |
+
def blend_design(cloth_img, design_img):
|
28 |
+
"""Blend design onto clothing naturally."""
|
29 |
cloth_img = cloth_img.convert("RGB")
|
30 |
design_img = design_img.convert("RGB")
|
31 |
cloth_np = np.array(cloth_img)
|
32 |
design_np = np.array(design_img)
|
33 |
+
|
34 |
+
# Resize design to fit within clothing
|
35 |
h, w, _ = cloth_np.shape
|
36 |
+
dh, dw, _ = design_np.shape
|
37 |
+
scale_factor = min(w / dw, h / dh) * 0.6 # Scale to 60% of clothing area
|
38 |
+
new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
|
39 |
+
design_np = cv2.resize(design_np, (new_w, new_h))
|
40 |
+
|
41 |
+
# Convert design to grayscale and darken
|
42 |
+
design_gray = cv2.cvtColor(design_np, cv2.COLOR_RGB2GRAY)
|
43 |
+
design_np = cv2.cvtColor(design_gray, cv2.COLOR_GRAY2RGB)
|
44 |
+
design_np = cv2.convertScaleAbs(design_np, alpha=1.2, beta=-30) # Increase contrast
|
45 |
|
46 |
+
# Create a blank canvas and paste the resized design at the center
|
47 |
+
design_canvas = np.zeros_like(cloth_np)
|
48 |
+
x_offset = (w - new_w) // 2
|
49 |
+
y_offset = (h - new_h) // 2
|
50 |
+
design_canvas[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = design_np
|
51 |
+
|
52 |
+
# Estimate depth for fold detection
|
53 |
depth_map = estimate_depth(cloth_img)
|
54 |
depth_map = cv2.resize(depth_map, (w, h))
|
55 |
|
56 |
+
# Use Poisson blending for seamless integration
|
57 |
+
mask = (design_canvas > 0).astype(np.uint8) * 255
|
58 |
+
blended = cv2.seamlessClone(design_canvas, cloth_np, mask, (w//2, h//2), cv2.NORMAL_CLONE)
|
|
|
|
|
|
|
|
|
59 |
|
60 |
return Image.fromarray(blended)
|
61 |
|
62 |
def main(cloth, design):
|
63 |
+
return blend_design(cloth, design)
|
64 |
|
65 |
iface = gr.Interface(
|
66 |
fn=main,
|