File size: 3,622 Bytes
00b93d3 9144506 00b93d3 2201b09 9144506 00b93d3 9b06638 00b93d3 9144506 2201b09 00b93d3 9144506 b938ae3 9144506 96e997e 9144506 96e997e 9144506 96e997e 109a86d 9144506 96e997e 9144506 b938ae3 109a86d 96e997e 9144506 96e997e deceec0 00b93d3 9144506 00b93d3 b938ae3 00b93d3 023c7c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import gradio as gr
import torch
import cv2
import numpy as np
from torchvision import transforms
from PIL import Image
from transformers import DPTForDepthEstimation, DPTFeatureExtractor
# Load depth estimation model
model_name = "Intel/dpt-large"
feature_extractor = DPTFeatureExtractor.from_pretrained(model_name)
depth_model = DPTForDepthEstimation.from_pretrained(model_name)
depth_model.eval()
def estimate_depth(image):
"""Estimate depth map from image."""
image = image.convert("RGB")
image = image.resize((384, 384)) # Resize for model input
inputs = feature_extractor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = depth_model(**inputs)
depth = outputs.predicted_depth.squeeze().cpu().numpy()
depth = cv2.resize(depth, (image.width, image.height)) # Resize back to original
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
return depth.astype(np.uint8)
def blend_design(cloth_img, design_img):
"""Blend design onto clothing naturally."""
cloth_img = cloth_img.convert("RGB")
design_img = design_img.convert("RGB")
cloth_np = np.array(cloth_img)
design_np = np.array(design_img)
# Resize design to fit within clothing
h, w, _ = cloth_np.shape
dh, dw, _ = design_np.shape
scale_factor = min(w / dw, h / dh) * 0.4 # Scale to 40% of clothing area
new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
design_np = cv2.resize(design_np, (new_w, new_h))
# Convert design to grayscale and darken for print effect
design_gray = cv2.cvtColor(design_np, cv2.COLOR_RGB2GRAY)
design_np = cv2.cvtColor(design_gray, cv2.COLOR_GRAY2RGB)
design_np = cv2.convertScaleAbs(design_np, alpha=1.5, beta=-40) # Increase contrast
# Create a blank canvas and paste the resized design at the center
design_canvas = np.zeros_like(cloth_np)
x_offset = (w - new_w) // 2
y_offset = int(h * 0.35) # Move slightly upward for a natural position
design_canvas[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = design_np
# Estimate depth for fold detection
depth_map = estimate_depth(cloth_img)
depth_map = cv2.resize(depth_map, (w, h))
# Generate displacement map based on depth
displacement_x = cv2.Sobel(depth_map, cv2.CV_32F, 1, 0, ksize=5)
displacement_y = cv2.Sobel(depth_map, cv2.CV_32F, 0, 1, ksize=5)
displacement_x = cv2.normalize(displacement_x, None, -3, 3, cv2.NORM_MINMAX)
displacement_y = cv2.normalize(displacement_y, None, -3, 3, cv2.NORM_MINMAX)
# Warp design using displacement map
map_x, map_y = np.meshgrid(np.arange(w), np.arange(h))
map_x = np.clip(np.float32(map_x + displacement_x), 0, w - 1)
map_y = np.clip(np.float32(map_y + displacement_y), 0, h - 1)
warped_design = cv2.remap(design_canvas, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
# Use Poisson blending for seamless integration
mask = (warped_design > 0).astype(np.uint8) * 255
blended = cv2.seamlessClone(warped_design, cloth_np, mask, (w//2, int(h * 0.35 + new_h//2)), cv2.NORMAL_CLONE)
return Image.fromarray(blended)
def main(cloth, design):
return blend_design(cloth, design)
iface = gr.Interface(
fn=main,
inputs=[gr.Image(type="pil"), gr.Image(type="pil")],
outputs=gr.Image(type="pil"),
title="AI Cloth Design Warping",
description="Upload a clothing image and a design to blend it naturally, ensuring it stays centered and follows fabric folds."
)
if __name__ == "__main__":
iface.launch(share=True) |