gaur3009 commited on
Commit
1b8657c
·
verified ·
1 Parent(s): 3327a73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -15,7 +15,12 @@ midas_transform = torch.hub.load("intel-isl/MiDaS", "transforms").default_transf
15
  def estimate_depth(image):
16
  """Estimate depth map using MiDaS v3."""
17
  image = image.convert("RGB") # Ensure it's in RGB format
18
- img_tensor = midas_transform(image).unsqueeze(0).to(device)
 
 
 
 
 
19
 
20
  # Ensure tensor shape is [1, 3, H, W]
21
  if img_tensor.dim() == 5: # If an extra batch dimension is present
@@ -23,7 +28,7 @@ def estimate_depth(image):
23
 
24
  with torch.no_grad():
25
  depth = midas_model(img_tensor).squeeze().cpu().numpy()
26
-
27
  depth = cv2.resize(depth, (image.size[0], image.size[1]))
28
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
29
  return depth.astype(np.uint8)
 
15
  def estimate_depth(image):
16
  """Estimate depth map using MiDaS v3."""
17
  image = image.convert("RGB") # Ensure it's in RGB format
18
+
19
+ # Convert PIL image to a NumPy array and normalize it
20
+ img_np = np.array(image, dtype=np.float32) / 255.0 # Normalize to [0, 1]
21
+
22
+ # Convert NumPy array to a Torch tensor
23
+ img_tensor = torch.tensor(img_np).permute(2, 0, 1).unsqueeze(0).to(device)
24
 
25
  # Ensure tensor shape is [1, 3, H, W]
26
  if img_tensor.dim() == 5: # If an extra batch dimension is present
 
28
 
29
  with torch.no_grad():
30
  depth = midas_model(img_tensor).squeeze().cpu().numpy()
31
+
32
  depth = cv2.resize(depth, (image.size[0], image.size[1]))
33
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255
34
  return depth.astype(np.uint8)