lakshmi082024 commited on
Commit
581a214
Β·
verified Β·
1 Parent(s): d39ab15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +127 -126
app.py CHANGED
@@ -1,126 +1,127 @@
1
- import streamlit as st
2
- import cv2
3
- import numpy as np
4
- import pandas as pd
5
- from PIL import Image
6
- import torch
7
- from torchvision.transforms import Compose, Resize, ToTensor, Normalize
8
- from segment_anything import SamPredictor, sam_model_registry
9
-
10
- # Set Streamlit configuration
11
- st.set_page_config(page_title="Volume Estimator", layout="wide")
12
- st.title("πŸ“¦ Volume Estimation using SAM Segmentation + MiDaS Depth")
13
-
14
- # Load SAM and MiDaS models
15
- @st.cache_resource
16
- def load_models():
17
- sam_checkpoint = "C:/Users/Administrator/Desktop/streamlit_tl/models/sam_vit_h_4b8939.pth"
18
- sam = sam_model_registry["vit_h"](checkpoint=sam_checkpoint).to("cuda" if torch.cuda.is_available() else "cpu")
19
- predictor = SamPredictor(sam)
20
-
21
- midas = torch.hub.load("intel-isl/MiDaS", "DPT_Large")
22
- midas.eval()
23
- midas_transform = Compose([
24
- Resize(384),
25
- ToTensor(),
26
- Normalize(mean=[0.5]*3, std=[0.5]*3)
27
- ])
28
- return predictor, midas, midas_transform
29
-
30
- predictor, midas_model, midas_transform = load_models()
31
-
32
- # Input source selection
33
- source_option = st.radio("Select input source", ("Upload Image", "Use Webcam"))
34
-
35
- uploaded_file = None
36
- image_pil = None
37
-
38
- if source_option == "Upload Image":
39
- uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
40
- if uploaded_file:
41
- image_pil = Image.open(uploaded_file).convert("RGB")
42
-
43
- elif source_option == "Use Webcam":
44
- run_camera = st.checkbox("Start Camera")
45
-
46
- if run_camera:
47
- cap = cv2.VideoCapture(0)
48
- stframe = st.empty()
49
- capture = False
50
-
51
- while run_camera and cap.isOpened():
52
- ret, frame = cap.read()
53
- if not ret:
54
- break
55
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
56
- stframe.image(frame_rgb, caption="Live Camera Feed", channels="RGB")
57
-
58
- if st.button("πŸ“Έ Capture Frame"):
59
- image_pil = Image.fromarray(frame_rgb)
60
- run_camera = False
61
- cap.release()
62
- break
63
-
64
- # Continue processing if we have an image
65
- if image_pil:
66
- image_np = np.array(image_pil)
67
- img_h, img_w = image_np.shape[:2]
68
- st.image(image_pil, caption="Selected Image", use_container_width=True)
69
-
70
- # Real-world reference dimensions
71
- real_image_width_cm = 100
72
- real_image_height_cm = 75
73
- assumed_max_depth_cm = 100
74
-
75
- pixel_to_cm_x = real_image_width_cm / img_w
76
- pixel_to_cm_y = real_image_height_cm / img_h
77
-
78
- # SAM Segmentation
79
- predictor.set_image(image_np)
80
- masks, _, _ = predictor.predict(multimask_output=False)
81
-
82
- # MiDaS Depth Estimation
83
- input_tensor = midas_transform(image_pil).unsqueeze(0)
84
- with torch.no_grad():
85
- depth_prediction = midas_model(input_tensor).squeeze().cpu().numpy()
86
- depth_resized = cv2.resize(depth_prediction, (img_w, img_h))
87
-
88
- # Object volume computation
89
- volume_data = []
90
- for i, mask in enumerate(masks):
91
- mask_np = mask
92
- x, y, w, h = cv2.boundingRect(mask_np.astype(np.uint8))
93
- width_px = w
94
- height_px = h
95
-
96
- width_cm = width_px * pixel_to_cm_x
97
- height_cm = height_px * pixel_to_cm_y
98
-
99
- depth_masked = depth_resized[mask_np > 0.5]
100
-
101
- if depth_masked.size == 0:
102
- continue
103
-
104
- normalized_depth = (depth_masked - np.min(depth_resized)) / (np.max(depth_resized) - np.min(depth_resized) + 1e-6)
105
- depth_cm = np.mean(normalized_depth) * assumed_max_depth_cm
106
-
107
- volume_cm3 = round(depth_cm * width_cm * height_cm, 2)
108
-
109
- volume_data.append({
110
- "Object": f"Object #{i+1}",
111
- "Length (Depth)": f"{round(depth_cm, 2)} cm",
112
- "Breadth (Width)": f"{round(width_cm, 2)} cm",
113
- "Height": f"{round(height_cm, 2)} cm",
114
- "Volume": f"{volume_cm3} cmΒ³"
115
- })
116
-
117
- # Display volume table
118
- if volume_data:
119
- df = pd.DataFrame(volume_data)
120
- st.markdown("### πŸ“Š Object Dimensions and Volume")
121
- st.dataframe(df)
122
-
123
- csv = df.to_csv(index=False).encode('utf-8')
124
- st.download_button("πŸ“‚ Download Volume Table as CSV", csv, "object_volumes_with_units.csv", "text/csv")
125
- else:
126
- st.warning("🚫 No objects were segmented.")
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ import numpy as np
4
+ import pandas as pd
5
+ from PIL import Image
6
+ import torch
7
+ from torchvision.transforms import Compose, Resize, ToTensor, Normalize
8
+ #from segment_anything import SamPredictor, sam_model_registry
9
+ from segment_anything import sam_model_registry, SamPredictor
10
+
11
+ # Set Streamlit configuration
12
+ st.set_page_config(page_title="Volume Estimator", layout="wide")
13
+ st.title("πŸ“¦ Volume Estimation using SAM Segmentation + MiDaS Depth")
14
+
15
+ # Load SAM and MiDaS models
16
+ @st.cache_resource
17
+ def load_models():
18
+ sam_checkpoint = "C:/Users/Administrator/Desktop/streamlit_tl/models/sam_vit_h_4b8939.pth"
19
+ sam = sam_model_registry["vit_h"](checkpoint=sam_checkpoint).to("cuda" if torch.cuda.is_available() else "cpu")
20
+ predictor = SamPredictor(sam)
21
+
22
+ midas = torch.hub.load("intel-isl/MiDaS", "DPT_Large")
23
+ midas.eval()
24
+ midas_transform = Compose([
25
+ Resize(384),
26
+ ToTensor(),
27
+ Normalize(mean=[0.5]*3, std=[0.5]*3)
28
+ ])
29
+ return predictor, midas, midas_transform
30
+
31
+ predictor, midas_model, midas_transform = load_models()
32
+
33
+ # Input source selection
34
+ source_option = st.radio("Select input source", ("Upload Image", "Use Webcam"))
35
+
36
+ uploaded_file = None
37
+ image_pil = None
38
+
39
+ if source_option == "Upload Image":
40
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
41
+ if uploaded_file:
42
+ image_pil = Image.open(uploaded_file).convert("RGB")
43
+
44
+ elif source_option == "Use Webcam":
45
+ run_camera = st.checkbox("Start Camera")
46
+
47
+ if run_camera:
48
+ cap = cv2.VideoCapture(0)
49
+ stframe = st.empty()
50
+ capture = False
51
+
52
+ while run_camera and cap.isOpened():
53
+ ret, frame = cap.read()
54
+ if not ret:
55
+ break
56
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
57
+ stframe.image(frame_rgb, caption="Live Camera Feed", channels="RGB")
58
+
59
+ if st.button("πŸ“Έ Capture Frame"):
60
+ image_pil = Image.fromarray(frame_rgb)
61
+ run_camera = False
62
+ cap.release()
63
+ break
64
+
65
+ # Continue processing if we have an image
66
+ if image_pil:
67
+ image_np = np.array(image_pil)
68
+ img_h, img_w = image_np.shape[:2]
69
+ st.image(image_pil, caption="Selected Image", use_container_width=True)
70
+
71
+ # Real-world reference dimensions
72
+ real_image_width_cm = 100
73
+ real_image_height_cm = 75
74
+ assumed_max_depth_cm = 100
75
+
76
+ pixel_to_cm_x = real_image_width_cm / img_w
77
+ pixel_to_cm_y = real_image_height_cm / img_h
78
+
79
+ # SAM Segmentation
80
+ predictor.set_image(image_np)
81
+ masks, _, _ = predictor.predict(multimask_output=False)
82
+
83
+ # MiDaS Depth Estimation
84
+ input_tensor = midas_transform(image_pil).unsqueeze(0)
85
+ with torch.no_grad():
86
+ depth_prediction = midas_model(input_tensor).squeeze().cpu().numpy()
87
+ depth_resized = cv2.resize(depth_prediction, (img_w, img_h))
88
+
89
+ # Object volume computation
90
+ volume_data = []
91
+ for i, mask in enumerate(masks):
92
+ mask_np = mask
93
+ x, y, w, h = cv2.boundingRect(mask_np.astype(np.uint8))
94
+ width_px = w
95
+ height_px = h
96
+
97
+ width_cm = width_px * pixel_to_cm_x
98
+ height_cm = height_px * pixel_to_cm_y
99
+
100
+ depth_masked = depth_resized[mask_np > 0.5]
101
+
102
+ if depth_masked.size == 0:
103
+ continue
104
+
105
+ normalized_depth = (depth_masked - np.min(depth_resized)) / (np.max(depth_resized) - np.min(depth_resized) + 1e-6)
106
+ depth_cm = np.mean(normalized_depth) * assumed_max_depth_cm
107
+
108
+ volume_cm3 = round(depth_cm * width_cm * height_cm, 2)
109
+
110
+ volume_data.append({
111
+ "Object": f"Object #{i+1}",
112
+ "Length (Depth)": f"{round(depth_cm, 2)} cm",
113
+ "Breadth (Width)": f"{round(width_cm, 2)} cm",
114
+ "Height": f"{round(height_cm, 2)} cm",
115
+ "Volume": f"{volume_cm3} cmΒ³"
116
+ })
117
+
118
+ # Display volume table
119
+ if volume_data:
120
+ df = pd.DataFrame(volume_data)
121
+ st.markdown("### πŸ“Š Object Dimensions and Volume")
122
+ st.dataframe(df)
123
+
124
+ csv = df.to_csv(index=False).encode('utf-8')
125
+ st.download_button("πŸ“‚ Download Volume Table as CSV", csv, "object_volumes_with_units.csv", "text/csv")
126
+ else:
127
+ st.warning("🚫 No objects were segmented.")