Blends / app.py
gaur3009's picture
Update app.py
96e997e verified
raw
history blame
3.62 kB
import gradio as gr
import torch
import cv2
import numpy as np
from torchvision import transforms
from PIL import Image
from transformers import DPTForDepthEstimation, DPTFeatureExtractor
# Load depth estimation model
model_name = "Intel/dpt-large"
feature_extractor = DPTFeatureExtractor.from_pretrained(model_name)
depth_model = DPTForDepthEstimation.from_pretrained(model_name)
depth_model.eval()
def estimate_depth(image):
"""Estimate depth map from image."""
image = image.convert("RGB")
image = image.resize((384, 384)) # Resize for model input
inputs = feature_extractor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = depth_model(**inputs)
depth = outputs.predicted_depth.squeeze().cpu().numpy()
depth = cv2.resize(depth, (image.width, image.height)) # Resize back to original
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
return depth.astype(np.uint8)
def blend_design(cloth_img, design_img):
"""Blend design onto clothing naturally."""
cloth_img = cloth_img.convert("RGB")
design_img = design_img.convert("RGB")
cloth_np = np.array(cloth_img)
design_np = np.array(design_img)
# Resize design to fit within clothing
h, w, _ = cloth_np.shape
dh, dw, _ = design_np.shape
scale_factor = min(w / dw, h / dh) * 0.4 # Scale to 40% of clothing area
new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
design_np = cv2.resize(design_np, (new_w, new_h))
# Convert design to grayscale and darken for print effect
design_gray = cv2.cvtColor(design_np, cv2.COLOR_RGB2GRAY)
design_np = cv2.cvtColor(design_gray, cv2.COLOR_GRAY2RGB)
design_np = cv2.convertScaleAbs(design_np, alpha=1.5, beta=-40) # Increase contrast
# Create a blank canvas and paste the resized design at the center
design_canvas = np.zeros_like(cloth_np)
x_offset = (w - new_w) // 2
y_offset = int(h * 0.35) # Move slightly upward for a natural position
design_canvas[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = design_np
# Estimate depth for fold detection
depth_map = estimate_depth(cloth_img)
depth_map = cv2.resize(depth_map, (w, h))
# Generate displacement map based on depth
displacement_x = cv2.Sobel(depth_map, cv2.CV_32F, 1, 0, ksize=5)
displacement_y = cv2.Sobel(depth_map, cv2.CV_32F, 0, 1, ksize=5)
displacement_x = cv2.normalize(displacement_x, None, -3, 3, cv2.NORM_MINMAX)
displacement_y = cv2.normalize(displacement_y, None, -3, 3, cv2.NORM_MINMAX)
# Warp design using displacement map
map_x, map_y = np.meshgrid(np.arange(w), np.arange(h))
map_x = np.clip(np.float32(map_x + displacement_x), 0, w - 1)
map_y = np.clip(np.float32(map_y + displacement_y), 0, h - 1)
warped_design = cv2.remap(design_canvas, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
# Use Poisson blending for seamless integration
mask = (warped_design > 0).astype(np.uint8) * 255
blended = cv2.seamlessClone(warped_design, cloth_np, mask, (w//2, int(h * 0.35 + new_h//2)), cv2.NORMAL_CLONE)
return Image.fromarray(blended)
def main(cloth, design):
return blend_design(cloth, design)
iface = gr.Interface(
fn=main,
inputs=[gr.Image(type="pil"), gr.Image(type="pil")],
outputs=gr.Image(type="pil"),
title="AI Cloth Design Warping",
description="Upload a clothing image and a design to blend it naturally, ensuring it stays centered and follows fabric folds."
)
if __name__ == "__main__":
iface.launch(share=True)