gaur3009 commited on
Commit
cc5f70a
·
verified ·
1 Parent(s): 7d41451

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -36
app.py CHANGED
@@ -4,71 +4,71 @@ import cv2
4
  import numpy as np
5
  from torchvision import transforms
6
  from PIL import Image
7
- from transformers import DPTForDepthEstimation, DPTFeatureExtractor
8
 
9
- # Load depth estimation model
10
- model_name = "Intel/dpt-large"
11
- feature_extractor = DPTFeatureExtractor.from_pretrained(model_name)
12
- depth_model = DPTForDepthEstimation.from_pretrained(model_name)
13
  depth_model.eval()
14
 
15
  def estimate_depth(image):
16
- """Estimate depth map from image."""
17
  image = image.convert("RGB")
18
- image = image.resize((384, 384)) # Resize for model input
19
- inputs = feature_extractor(images=image, return_tensors="pt")
20
  with torch.no_grad():
21
  outputs = depth_model(**inputs)
22
  depth = outputs.predicted_depth.squeeze().cpu().numpy()
23
- depth = cv2.resize(depth, (image.width, image.height)) # Resize back to original
24
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
25
  return depth.astype(np.uint8)
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  def blend_design(cloth_img, design_img):
28
- """Blend design onto clothing naturally with fold adaptation."""
29
  cloth_img = cloth_img.convert("RGB")
30
  design_img = design_img.convert("RGBA")
31
  cloth_np = np.array(cloth_img)
32
  design_np = np.array(design_img)
33
 
34
- # Resize design to fit within clothing
35
  h, w, _ = cloth_np.shape
36
  dh, dw, _ = design_np.shape
37
- scale_factor = min(w / dw, h / dh) * 0.4 # Scale to 40% of clothing area
38
  new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
39
  design_np = cv2.resize(design_np, (new_w, new_h), interpolation=cv2.INTER_AREA)
40
 
41
- # Extract alpha channel for transparency
42
  alpha_channel = design_np[:, :, 3] / 255.0
43
  design_np = design_np[:, :, :3]
44
 
45
- # Create a blank canvas and paste the resized design at the center
46
  x_offset = (w - new_w) // 2
47
- y_offset = int(h * 0.35) # Move slightly upward for a natural position
48
-
49
- for c in range(3):
50
- cloth_np[y_offset:y_offset+new_h, x_offset:x_offset+new_w, c] = (
51
- cloth_np[y_offset:y_offset+new_h, x_offset:x_offset+new_w, c] * (1 - alpha_channel) +
52
- design_np[:, :, c] * alpha_channel
53
- )
54
 
55
- # Estimate depth for fold detection
56
  depth_map = estimate_depth(cloth_img)
57
- depth_map = cv2.resize(depth_map, (w, h))
58
 
59
- # Generate displacement map based on depth
60
- displacement_x = cv2.Sobel(depth_map, cv2.CV_32F, 1, 0, ksize=5)
61
- displacement_y = cv2.Sobel(depth_map, cv2.CV_32F, 0, 1, ksize=5)
62
- displacement_x = cv2.normalize(displacement_x, None, -5, 5, cv2.NORM_MINMAX)
63
- displacement_y = cv2.normalize(displacement_y, None, -5, 5, cv2.NORM_MINMAX)
64
-
65
- # Warp design using displacement map
66
- map_x, map_y = np.meshgrid(np.arange(w), np.arange(h))
67
- map_x = np.clip(np.float32(map_x + displacement_x), 0, w - 1)
68
- map_y = np.clip(np.float32(map_y + displacement_y), 0, h - 1)
69
- warped_cloth = cv2.remap(cloth_np, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
70
 
71
- return Image.fromarray(warped_cloth)
72
 
73
  def main(cloth, design):
74
  return blend_design(cloth, design)
@@ -82,4 +82,4 @@ iface = gr.Interface(
82
  )
83
 
84
  if __name__ == "__main__":
85
- iface.launch(share=True)
 
4
  import numpy as np
5
  from torchvision import transforms
6
  from PIL import Image
7
+ from transformers import DPTForDepthEstimation, DPTFeatureExtractor, MidasForDepthEstimation, MidasImageProcessor
8
 
9
+ # Load depth estimation model (MiDaS v3 for better accuracy)
10
+ model_name = "Intel/midas-v3" # Upgraded model
11
+ processor = MidasImageProcessor.from_pretrained(model_name)
12
+ depth_model = MidasForDepthEstimation.from_pretrained(model_name)
13
  depth_model.eval()
14
 
15
  def estimate_depth(image):
16
+ """Estimate depth map from image using MiDaS v3."""
17
  image = image.convert("RGB")
18
+ inputs = processor(images=image, return_tensors="pt")
 
19
  with torch.no_grad():
20
  outputs = depth_model(**inputs)
21
  depth = outputs.predicted_depth.squeeze().cpu().numpy()
22
+ depth = cv2.resize(depth, (image.width, image.height))
23
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
24
  return depth.astype(np.uint8)
25
 
26
+ def apply_tps_warping(design, depth):
27
+ """Apply Thin Plate Spline (TPS) warping based on depth."""
28
+ h, w = depth.shape
29
+ grid_x, grid_y = np.meshgrid(np.arange(w), np.arange(h))
30
+ displacement_x = cv2.Sobel(depth, cv2.CV_32F, 1, 0, ksize=5)
31
+ displacement_y = cv2.Sobel(depth, cv2.CV_32F, 0, 1, ksize=5)
32
+ displacement_x = cv2.normalize(displacement_x, None, -10, 10, cv2.NORM_MINMAX)
33
+ displacement_y = cv2.normalize(displacement_y, None, -10, 10, cv2.NORM_MINMAX)
34
+ map_x = np.clip(grid_x + displacement_x, 0, w - 1).astype(np.float32)
35
+ map_y = np.clip(grid_y + displacement_y, 0, h - 1).astype(np.float32)
36
+ warped_design = cv2.remap(design, map_x, map_y, interpolation=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REFLECT)
37
+ return warped_design
38
+
39
  def blend_design(cloth_img, design_img):
40
+ """Blend design onto clothing naturally with fold adaptation using TPS warping."""
41
  cloth_img = cloth_img.convert("RGB")
42
  design_img = design_img.convert("RGBA")
43
  cloth_np = np.array(cloth_img)
44
  design_np = np.array(design_img)
45
 
46
+ # Resize design
47
  h, w, _ = cloth_np.shape
48
  dh, dw, _ = design_np.shape
49
+ scale_factor = min(w / dw, h / dh) * 0.4
50
  new_w, new_h = int(dw * scale_factor), int(dh * scale_factor)
51
  design_np = cv2.resize(design_np, (new_w, new_h), interpolation=cv2.INTER_AREA)
52
 
53
+ # Extract alpha channel
54
  alpha_channel = design_np[:, :, 3] / 255.0
55
  design_np = design_np[:, :, :3]
56
 
57
+ # Create placement area
58
  x_offset = (w - new_w) // 2
59
+ y_offset = int(h * 0.35)
60
+ design_canvas = np.zeros_like(cloth_np)
61
+ design_canvas[y_offset:y_offset+new_h, x_offset:x_offset+new_w] = design_np
 
 
 
 
62
 
63
+ # Estimate depth and apply TPS warping
64
  depth_map = estimate_depth(cloth_img)
65
+ warped_design = apply_tps_warping(design_canvas, depth_map)
66
 
67
+ # Blend design onto cloth
68
+ for c in range(3):
69
+ cloth_np[:, :, c] = (cloth_np[:, :, c] * (1 - alpha_channel) + warped_design[:, :, c] * alpha_channel)
 
 
 
 
 
 
 
 
70
 
71
+ return Image.fromarray(cloth_np)
72
 
73
  def main(cloth, design):
74
  return blend_design(cloth, design)
 
82
  )
83
 
84
  if __name__ == "__main__":
85
+ iface.launch(share=True)