File size: 3,434 Bytes
00b93d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9144506
 
00b93d3
 
 
2201b09
9144506
00b93d3
9b06638
00b93d3
9144506
7d41451
2201b09
7bda381
00b93d3
 
9144506
 
b938ae3
9144506
96e997e
9144506
7bda381
9144506
7d41451
 
 
109a86d
9144506
 
96e997e
7bda381
 
 
 
 
 
9144506
 
b938ae3
 
109a86d
96e997e
 
 
7d41451
 
96e997e
 
 
 
 
7bda381
deceec0
7bda381
00b93d3
 
9144506
00b93d3
 
 
 
 
 
b938ae3
00b93d3
 
 
023c7c8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
import torch
import cv2
import numpy as np
from torchvision import transforms
from PIL import Image
from transformers import DPTForDepthEstimation, DPTFeatureExtractor

# Load depth estimation model
model_name = "Intel/dpt-large"
feature_extractor = DPTFeatureExtractor.from_pretrained(model_name)
depth_model = DPTForDepthEstimation.from_pretrained(model_name)
depth_model.eval()

def estimate_depth(image):
    """Estimate depth map from image."""
    image = image.convert("RGB")
    image = image.resize((384, 384))  # Resize for model input
    inputs = feature_extractor(images=image, return_tensors="pt")
    with torch.no_grad():
        outputs = depth_model(**inputs)
        depth = outputs.predicted_depth.squeeze().cpu().numpy()
    depth = cv2.resize(depth, (image.width, image.height))  # Resize back to original
    depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
    return depth.astype(np.uint8)

def blend_design(cloth_img, design_img):
    """Blend design onto clothing naturally with fold adaptation."""
    cloth_img = cloth_img.convert("RGB")
    design_img = design_img.convert("RGBA")
    cloth_np = np.array(cloth_img)
    design_np = np.array(design_img)
    
    # Resize design to fit within clothing
    h, w, _ = cloth_np.shape
    dh, dw, _ = design_np.shape
    scale_factor = min(w / dw, h / dh) * 0.4  # Scale to 40% of clothing area
    new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
    design_np = cv2.resize(design_np, (new_w, new_h), interpolation=cv2.INTER_AREA)
    
    # Extract alpha channel for transparency
    alpha_channel = design_np[:, :, 3] / 255.0
    design_np = design_np[:, :, :3]
    
    # Create a blank canvas and paste the resized design at the center
    x_offset = (w - new_w) // 2
    y_offset = int(h * 0.35)  # Move slightly upward for a natural position
    
    for c in range(3):
        cloth_np[y_offset:y_offset+new_h, x_offset:x_offset+new_w, c] = (
            cloth_np[y_offset:y_offset+new_h, x_offset:x_offset+new_w, c] * (1 - alpha_channel) + 
            design_np[:, :, c] * alpha_channel
        )
    
    # Estimate depth for fold detection
    depth_map = estimate_depth(cloth_img)
    depth_map = cv2.resize(depth_map, (w, h))
    
    # Generate displacement map based on depth
    displacement_x = cv2.Sobel(depth_map, cv2.CV_32F, 1, 0, ksize=5)
    displacement_y = cv2.Sobel(depth_map, cv2.CV_32F, 0, 1, ksize=5)
    displacement_x = cv2.normalize(displacement_x, None, -5, 5, cv2.NORM_MINMAX)
    displacement_y = cv2.normalize(displacement_y, None, -5, 5, cv2.NORM_MINMAX)
    
    # Warp design using displacement map
    map_x, map_y = np.meshgrid(np.arange(w), np.arange(h))
    map_x = np.clip(np.float32(map_x + displacement_x), 0, w - 1)
    map_y = np.clip(np.float32(map_y + displacement_y), 0, h - 1)
    warped_cloth = cv2.remap(cloth_np, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
    
    return Image.fromarray(warped_cloth)

def main(cloth, design):
    return blend_design(cloth, design)

iface = gr.Interface(
    fn=main,
    inputs=[gr.Image(type="pil"), gr.Image(type="pil")],
    outputs=gr.Image(type="pil"),
    title="AI Cloth Design Warping",
    description="Upload a clothing image and a design to blend it naturally, ensuring it stays centered and follows fabric folds."
)

if __name__ == "__main__":
    iface.launch(share=True)