Blends / app.py
gaur3009's picture
Update app.py
9144506 verified
raw
history blame
2.83 kB
import gradio as gr
import torch
import cv2
import numpy as np
from torchvision import transforms
from PIL import Image
from transformers import DPTForDepthEstimation, DPTFeatureExtractor
# Load depth estimation model
model_name = "Intel/dpt-large"
feature_extractor = DPTFeatureExtractor.from_pretrained(model_name)
depth_model = DPTForDepthEstimation.from_pretrained(model_name)
depth_model.eval()
def estimate_depth(image):
"""Estimate depth map from image."""
image = image.convert("RGB")
image = image.resize((384, 384)) # Resize for model input
inputs = feature_extractor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = depth_model(**inputs)
depth = outputs.predicted_depth.squeeze().cpu().numpy()
depth = cv2.resize(depth, (image.width, image.height)) # Resize back to original
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
return depth.astype(np.uint8)
def blend_design(cloth_img, design_img):
"""Blend design onto clothing naturally."""
cloth_img = cloth_img.convert("RGB")
design_img = design_img.convert("RGB")
cloth_np = np.array(cloth_img)
design_np = np.array(design_img)
# Resize design to fit within clothing
h, w, _ = cloth_np.shape
dh, dw, _ = design_np.shape
scale_factor = min(w / dw, h / dh) * 0.6 # Scale to 60% of clothing area
new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
design_np = cv2.resize(design_np, (new_w, new_h))
# Convert design to grayscale and darken
design_gray = cv2.cvtColor(design_np, cv2.COLOR_RGB2GRAY)
design_np = cv2.cvtColor(design_gray, cv2.COLOR_GRAY2RGB)
design_np = cv2.convertScaleAbs(design_np, alpha=1.2, beta=-30) # Increase contrast
# Create a blank canvas and paste the resized design at the center
design_canvas = np.zeros_like(cloth_np)
x_offset = (w - new_w) // 2
y_offset = (h - new_h) // 2
design_canvas[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = design_np
# Estimate depth for fold detection
depth_map = estimate_depth(cloth_img)
depth_map = cv2.resize(depth_map, (w, h))
# Use Poisson blending for seamless integration
mask = (design_canvas > 0).astype(np.uint8) * 255
blended = cv2.seamlessClone(design_canvas, cloth_np, mask, (w//2, h//2), cv2.NORMAL_CLONE)
return Image.fromarray(blended)
def main(cloth, design):
return blend_design(cloth, design)
iface = gr.Interface(
fn=main,
inputs=[gr.Image(type="pil"), gr.Image(type="pil")],
outputs=gr.Image(type="pil"),
title="AI Cloth Design Warping",
description="Upload a clothing image and a design to blend it naturally, ensuring it stays centered and follows fabric folds."
)
if __name__ == "__main__":
iface.launch(share=True)