Spaces:
Sleeping
Sleeping
Update streamlit_app.py
Browse files- streamlit_app.py +118 -36
streamlit_app.py
CHANGED
@@ -4,11 +4,15 @@ import torch
|
|
4 |
from torchvision import models, transforms
|
5 |
import json
|
6 |
import os
|
7 |
-
import
|
|
|
|
|
|
|
|
|
8 |
|
9 |
# Configure Streamlit
|
10 |
st.set_page_config(
|
11 |
-
page_title="Butterfly Identifier/
|
12 |
page_icon="🦋",
|
13 |
layout="wide"
|
14 |
)
|
@@ -48,45 +52,123 @@ transform = transforms.Compose([
|
|
48 |
transforms.ToTensor(),
|
49 |
])
|
50 |
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
-
#
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
uploaded_file = st.file_uploader(
|
57 |
"Choose an image...",
|
58 |
type=["jpg", "jpeg", "png"],
|
59 |
-
help="Upload a clear photo of a butterfly"
|
60 |
-
key="butterfly_image"
|
61 |
)
|
62 |
|
63 |
if uploaded_file is not None:
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
# Load image from temporary file
|
70 |
-
image = Image.open(tmp_file_path).convert("RGB")
|
71 |
-
st.image(image, caption="Uploaded Image", use_column_width=True)
|
72 |
-
|
73 |
-
# Preprocess
|
74 |
-
input_tensor = transform(image).unsqueeze(0)
|
75 |
-
|
76 |
-
# Predict
|
77 |
-
with torch.no_grad():
|
78 |
-
output = model(input_tensor)
|
79 |
-
_, pred = torch.max(output, 1)
|
80 |
-
predicted_class = class_names[pred.item()]
|
81 |
-
|
82 |
-
st.success(f"**Prediction: {predicted_class}**")
|
83 |
-
|
84 |
-
if predicted_class in butterfly_info:
|
85 |
-
st.info(butterfly_info[predicted_class]["description"])
|
86 |
-
|
87 |
-
# Clean up temporary file
|
88 |
-
os.unlink(tmp_file_path)
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from torchvision import models, transforms
|
5 |
import json
|
6 |
import os
|
7 |
+
import io
|
8 |
+
import numpy as np
|
9 |
+
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
|
10 |
+
import av
|
11 |
+
import cv2
|
12 |
|
13 |
# Configure Streamlit
|
14 |
st.set_page_config(
|
15 |
+
page_title="Butterfly Identifier/Liblikamaja ID",
|
16 |
page_icon="🦋",
|
17 |
layout="wide"
|
18 |
)
|
|
|
52 |
transforms.ToTensor(),
|
53 |
])
|
54 |
|
55 |
+
def predict_butterfly(image):
|
56 |
+
"""Predict butterfly species from image"""
|
57 |
+
if image is None:
|
58 |
+
return None, None
|
59 |
+
|
60 |
+
# Convert to PIL Image if needed
|
61 |
+
if isinstance(image, np.ndarray):
|
62 |
+
image = Image.fromarray(image)
|
63 |
+
|
64 |
+
# Preprocess
|
65 |
+
input_tensor = transform(image).unsqueeze(0)
|
66 |
+
|
67 |
+
# Predict
|
68 |
+
with torch.no_grad():
|
69 |
+
output = model(input_tensor)
|
70 |
+
probabilities = torch.nn.functional.softmax(output[0], dim=0)
|
71 |
+
confidence, pred = torch.max(probabilities, 0)
|
72 |
+
predicted_class = class_names[pred.item()]
|
73 |
+
|
74 |
+
return predicted_class, confidence.item()
|
75 |
|
76 |
+
# Video frame callback for live camera
|
77 |
+
class VideoProcessor:
|
78 |
+
def __init__(self):
|
79 |
+
self.prediction_text = ""
|
80 |
+
self.frame_count = 0
|
81 |
+
|
82 |
+
def recv(self, frame):
|
83 |
+
img = frame.to_ndarray(format="bgr24")
|
84 |
+
|
85 |
+
# Only process every 30th frame for performance
|
86 |
+
self.frame_count += 1
|
87 |
+
if self.frame_count % 30 == 0:
|
88 |
+
# Convert BGR to RGB
|
89 |
+
rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
90 |
+
predicted_class, confidence = predict_butterfly(rgb_img)
|
91 |
+
|
92 |
+
if predicted_class and confidence > 0.7: # Only show if confident
|
93 |
+
self.prediction_text = f"{predicted_class} ({confidence:.2f})"
|
94 |
+
|
95 |
+
# Draw prediction on frame
|
96 |
+
if self.prediction_text:
|
97 |
+
cv2.putText(img, self.prediction_text, (10, 30),
|
98 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
|
99 |
+
|
100 |
+
return av.VideoFrame.from_ndarray(img, format="bgr24")
|
101 |
+
|
102 |
+
# Streamlit UI
|
103 |
+
st.title("🦋 Butterfly Identifier / Liblikamaja ID")
|
104 |
+
st.write("Identify butterflies using your camera or by uploading an image!")
|
105 |
+
|
106 |
+
# Create tabs for different input methods
|
107 |
+
tab1, tab2 = st.tabs(["📷 Live Camera", "📁 Upload Image"])
|
108 |
+
|
109 |
+
with tab1:
|
110 |
+
st.header("Live Camera Identification")
|
111 |
+
st.write("Point your camera at a butterfly and get real-time identification!")
|
112 |
+
|
113 |
+
# WebRTC configuration
|
114 |
+
RTC_CONFIGURATION = RTCConfiguration({
|
115 |
+
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
|
116 |
+
})
|
117 |
+
|
118 |
+
# Camera streamer
|
119 |
+
webrtc_ctx = webrtc_streamer(
|
120 |
+
key="butterfly-camera",
|
121 |
+
mode=WebRtcMode.SENDRECV,
|
122 |
+
rtc_configuration=RTC_CONFIGURATION,
|
123 |
+
video_processor_factory=VideoProcessor,
|
124 |
+
media_stream_constraints={"video": True, "audio": False},
|
125 |
+
async_processing=True,
|
126 |
+
)
|
127 |
+
|
128 |
+
st.info("💡 Tips for best results:")
|
129 |
+
st.write("- Hold the camera steady")
|
130 |
+
st.write("- Ensure good lighting")
|
131 |
+
st.write("- Get close to the butterfly")
|
132 |
+
st.write("- Wait for the green prediction text to appear")
|
133 |
+
|
134 |
+
with tab2:
|
135 |
+
st.header("Upload Image")
|
136 |
+
st.write("Upload a clear photo of a butterfly for identification")
|
137 |
+
|
138 |
uploaded_file = st.file_uploader(
|
139 |
"Choose an image...",
|
140 |
type=["jpg", "jpeg", "png"],
|
141 |
+
help="Upload a clear photo of a butterfly"
|
|
|
142 |
)
|
143 |
|
144 |
if uploaded_file is not None:
|
145 |
+
try:
|
146 |
+
# Read file directly into memory
|
147 |
+
image_bytes = uploaded_file.read()
|
148 |
+
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
|
150 |
+
col1, col2 = st.columns(2)
|
151 |
+
|
152 |
+
with col1:
|
153 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
154 |
+
|
155 |
+
with col2:
|
156 |
+
predicted_class, confidence = predict_butterfly(image)
|
157 |
+
|
158 |
+
if predicted_class:
|
159 |
+
st.success(f"**Prediction: {predicted_class}**")
|
160 |
+
st.info(f"Confidence: {confidence:.2%}")
|
161 |
+
|
162 |
+
if predicted_class in butterfly_info:
|
163 |
+
st.write("**Species Information:**")
|
164 |
+
st.write(butterfly_info[predicted_class]["description"])
|
165 |
+
|
166 |
+
except Exception as e:
|
167 |
+
st.error(f"Error processing image: {str(e)}")
|
168 |
+
|
169 |
+
# Add footer with instructions
|
170 |
+
st.markdown("---")
|
171 |
+
st.markdown("### How to use:")
|
172 |
+
st.markdown("1. **Live Camera**: Click 'START' to begin live identification")
|
173 |
+
st.markdown("2. **Upload Image**: Choose a butterfly photo from your device")
|
174 |
+
st.markdown("3. **Best Results**: Use clear, well-lit photos with the butterfly clearly visible")
|