gaur3009 commited on
Commit
3327a73
·
verified ·
1 Parent(s): 60946de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -14,12 +14,17 @@ midas_transform = torch.hub.load("intel-isl/MiDaS", "transforms").default_transf
14
 
15
  def estimate_depth(image):
16
  """Estimate depth map using MiDaS v3."""
17
- image = image.convert("RGB")
18
- image = np.array(image) / 255.0 # Normalize and convert to NumPy array
19
- input_tensor = midas_transform(image).unsqueeze(0).to(device)
 
 
 
 
20
  with torch.no_grad():
21
- depth = midas_model(input_tensor).squeeze().cpu().numpy()
22
- depth = cv2.resize(depth, (image.shape[1], image.shape[0]))
 
23
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
24
  return depth.astype(np.uint8)
25
 
 
14
 
15
  def estimate_depth(image):
16
  """Estimate depth map using MiDaS v3."""
17
+ image = image.convert("RGB") # Ensure it's in RGB format
18
+ img_tensor = midas_transform(image).unsqueeze(0).to(device)
19
+
20
+ # Ensure tensor shape is [1, 3, H, W]
21
+ if img_tensor.dim() == 5: # If an extra batch dimension is present
22
+ img_tensor = img_tensor.squeeze(1)
23
+
24
  with torch.no_grad():
25
+ depth = midas_model(img_tensor).squeeze().cpu().numpy()
26
+
27
+ depth = cv2.resize(depth, (image.size[0], image.size[1]))
28
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
29
  return depth.astype(np.uint8)
30