adil9858 commited on
Commit
5d47b99
·
verified ·
1 Parent(s): 10055af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -69
app.py CHANGED
@@ -14,97 +14,179 @@ st.set_page_config(
14
  initial_sidebar_state="expanded"
15
  )
16
 
17
- # Custom CSS (keep your existing CSS here)
18
  st.markdown("""
19
  <style>
20
- /* Your existing CSS styles */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  </style>
22
  """, unsafe_allow_html=True)
23
 
24
  # App title and description
25
  st.title("🔍 Optimus Alpha | Live Vision Assistant")
26
 
27
- # Initialize OpenAI client (keep your existing cached function)
28
  @st.cache_resource
29
  def get_client():
30
  return OpenAI(
31
  base_url="https://openrouter.ai/api/v1",
32
- api_key='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'
33
  )
34
 
35
- # ===== New Live Camera Section =====
36
- st.subheader("Live Camera Feed")
37
- run_camera = st.checkbox("Enable Camera", value=False)
 
 
 
38
 
39
- FRAME_WINDOW = st.empty()
40
  captured_image = None
41
 
42
- if run_camera:
43
- cap = cv2.VideoCapture(0)
 
44
 
45
- capture_button = st.button("Capture Image")
46
- stop_button = st.button("Stop Camera")
47
 
48
- if stop_button:
49
- run_camera = False
50
- cap.release()
51
- st.experimental_rerun()
52
-
53
- while run_camera:
54
- ret, frame = cap.read()
55
- if not ret:
56
- st.error("Failed to access camera")
57
- break
58
-
59
- # Display the live feed
60
- frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
61
- FRAME_WINDOW.image(frame)
62
-
63
- if capture_button:
64
- captured_image = frame
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  run_camera = False
66
- cap.release()
67
- break
68
- else:
69
- FRAME_WINDOW.info("Camera is currently off")
70
-
71
- # ===== Image Processing Section =====
72
- col1, col2 = st.columns([1, 2])
73
 
74
- with col1:
75
- st.subheader("Image Source")
76
-
77
- # Option to use captured image or upload
78
- if captured_image is not None:
79
- st.image(captured_image, caption="Captured Image", width=300)
80
- use_captured = True
81
- else:
82
- use_captured = False
83
-
84
  uploaded_file = st.file_uploader(
85
- "Or upload an image",
86
  type=["jpg", "jpeg", "png"],
87
- disabled=use_captured
88
  )
 
 
 
 
 
 
 
 
 
 
89
 
90
- # Determine which image to use
91
- if use_captured:
92
  image = Image.fromarray(captured_image)
93
- elif uploaded_file:
94
- image = Image.open(uploaded_file)
95
  else:
96
- image = None
97
-
98
- with col2:
99
- st.subheader("AI Analysis")
100
 
101
  user_prompt = st.text_input(
102
- "Your question about the image:",
103
- placeholder="e.g. 'What objects do you see?' or 'Explain this diagram'",
104
  key="user_prompt"
105
  )
106
 
107
- if st.button("Analyze", type="primary") and image:
108
  try:
109
  # Convert image to base64
110
  buffered = io.BytesIO()
@@ -115,18 +197,18 @@ with col2:
115
  messages = [
116
  {
117
  "role": "system",
118
- "content": """You are a real-time vision assistant. Analyze the current camera feed or uploaded image and:
119
- 1. Identify objects, people, text clearly
120
- 2. Answer follow-up questions precisely
121
- 3. Format responses with bullet points
122
- 4. Highlight urgent/important findings"""
123
  },
124
  {
125
  "role": "user",
126
  "content": [
127
  {
128
  "type": "text",
129
- "text": user_prompt if user_prompt else "Describe what you see in detail"
130
  },
131
  {
132
  "type": "image_url",
@@ -159,13 +241,19 @@ with col2:
159
  """, unsafe_allow_html=True)
160
 
161
  except Exception as e:
162
- st.error(f"Error: {str(e)}")
163
 
164
- # Sidebar (keep your existing sidebar)
165
  with st.sidebar:
166
- st.image("blob.png", width=200)
167
  st.markdown("""
168
  *Powered by OpenRouter*
169
  """)
170
  st.markdown("---")
171
- st.markdown("Made with ❤️ by Koshur AI")
 
 
 
 
 
 
 
14
  initial_sidebar_state="expanded"
15
  )
16
 
17
+ # Custom CSS for futuristic design
18
  st.markdown("""
19
  <style>
20
+ /* Main colors */
21
+ :root {
22
+ --primary: #6366f1;
23
+ --secondary: #10b981;
24
+ --dark: #1e293b;
25
+ --light: #f8fafc;
26
+ }
27
+
28
+ /* Main container */
29
+ .stApp {
30
+ background: linear-gradient(135deg, #0f172a 0%, #1e293b 100%);
31
+ color: var(--light);
32
+ }
33
+
34
+ /* Headers */
35
+ h1, h2, h3, h4, h5, h6 {
36
+ color: var(--light) !important;
37
+ font-family: 'Inter', sans-serif;
38
+ }
39
+
40
+ /* Sidebar */
41
+ [data-testid="stSidebar"] {
42
+ background: linear-gradient(195deg, #0f172a 0%, #1e40af 100%) !important;
43
+ }
44
+
45
+ /* Buttons */
46
+ .stButton>button {
47
+ background: var(--primary) !important;
48
+ color: white !important;
49
+ border: none;
50
+ border-radius: 8px;
51
+ padding: 10px 24px;
52
+ font-weight: 500;
53
+ transition: all 0.3s;
54
+ }
55
+
56
+ .stButton>button:hover {
57
+ transform: translateY(-2px);
58
+ box-shadow: 0 4px 12px rgba(99, 102, 241, 0.3);
59
+ }
60
+
61
+ /* File uploader */
62
+ [data-testid="stFileUploader"] {
63
+ border: 2px dashed var(--primary) !important;
64
+ border-radius: 12px !important;
65
+ padding: 20px !important;
66
+ }
67
+
68
+ /* Markdown output */
69
+ .markdown-text {
70
+ background: rgba(30, 41, 59, 0.7) !important;
71
+ border-radius: 12px;
72
+ padding: 20px;
73
+ border-left: 4px solid var(--secondary);
74
+ animation: fadeIn 0.5s ease-in-out;
75
+ }
76
+
77
+ @keyframes fadeIn {
78
+ from { opacity: 0; transform: translateY(10px); }
79
+ to { opacity: 1; transform: translateY(0); }
80
+ }
81
+
82
+ /* Streamlit text input */
83
+ .stTextInput>div>div>input {
84
+ background: rgba(15, 23, 42, 0.7) !important;
85
+ color: white !important;
86
+ border: 1px solid #334155 !important;
87
+ }
88
  </style>
89
  """, unsafe_allow_html=True)
90
 
91
  # App title and description
92
  st.title("🔍 Optimus Alpha | Live Vision Assistant")
93
 
94
+ # Initialize OpenAI client
95
  @st.cache_resource
96
  def get_client():
97
  return OpenAI(
98
  base_url="https://openrouter.ai/api/v1",
99
+ api_key='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' # Replace with your actual key
100
  )
101
 
102
+ # ===== Camera/Upload Selection =====
103
+ input_method = st.radio(
104
+ "Select input method:",
105
+ ["Live Camera", "Upload Image"],
106
+ horizontal=True
107
+ )
108
 
109
+ # ===== Camera Section =====
110
  captured_image = None
111
 
112
+ if input_method == "Live Camera":
113
+ st.subheader("Live Camera Feed")
114
+ run_camera = st.checkbox("Start Camera", value=False)
115
 
116
+ FRAME_WINDOW = st.empty()
 
117
 
118
+ if run_camera:
119
+ try:
120
+ cap = cv2.VideoCapture(0)
121
+ if not cap.isOpened():
122
+ st.error("Could not access camera. Please:")
123
+ st.markdown("""
124
+ - Check camera permissions
125
+ - Ensure no other app is using the camera
126
+ - Try reconnecting the camera
127
+ """)
128
+ run_camera = False
129
+ else:
130
+ capture_col, stop_col = st.columns(2)
131
+ with capture_col:
132
+ capture_button = st.button("📸 Capture Image")
133
+ with stop_col:
134
+ stop_button = st.button("🛑 Stop Camera")
135
+
136
+ if stop_button:
137
+ cap.release()
138
+ st.rerun()
139
+
140
+ while run_camera:
141
+ ret, frame = cap.read()
142
+ if not ret:
143
+ st.error("Failed to capture frame")
144
+ break
145
+
146
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
147
+ FRAME_WINDOW.image(frame)
148
+
149
+ if capture_button:
150
+ captured_image = frame
151
+ cap.release()
152
+ st.rerun()
153
+ break
154
+ except Exception as e:
155
+ st.error(f"Camera error: {str(e)}")
156
  run_camera = False
 
 
 
 
 
 
 
157
 
158
+ # ===== Upload Section =====
159
+ else:
160
+ st.subheader("Upload Image")
 
 
 
 
 
 
 
161
  uploaded_file = st.file_uploader(
162
+ "Choose an image file",
163
  type=["jpg", "jpeg", "png"],
164
+ label_visibility="collapsed"
165
  )
166
+ if uploaded_file:
167
+ try:
168
+ captured_image = Image.open(uploaded_file)
169
+ st.image(captured_image, caption="Uploaded Image", width=300)
170
+ except Exception as e:
171
+ st.error(f"Error loading image: {str(e)}")
172
+
173
+ # ===== Image Analysis Section =====
174
+ if captured_image is not None:
175
+ st.subheader("AI Analysis")
176
 
177
+ # Convert to PIL Image if from OpenCV
178
+ if isinstance(captured_image, np.ndarray):
179
  image = Image.fromarray(captured_image)
 
 
180
  else:
181
+ image = captured_image
 
 
 
182
 
183
  user_prompt = st.text_input(
184
+ "Ask about the image:",
185
+ placeholder="e.g. 'What is in this image?' or 'Explain this diagram'",
186
  key="user_prompt"
187
  )
188
 
189
+ if st.button("Analyze Image", type="primary"):
190
  try:
191
  # Convert image to base64
192
  buffered = io.BytesIO()
 
197
  messages = [
198
  {
199
  "role": "system",
200
+ "content": """You are an expert vision assistant. Analyze images with:
201
+ - Clear, structured responses
202
+ - Bullet points for multiple objects
203
+ - Concise explanations
204
+ - Highlight important findings in bold"""
205
  },
206
  {
207
  "role": "user",
208
  "content": [
209
  {
210
  "type": "text",
211
+ "text": user_prompt if user_prompt else "Describe this image in detail"
212
  },
213
  {
214
  "type": "image_url",
 
241
  """, unsafe_allow_html=True)
242
 
243
  except Exception as e:
244
+ st.error(f"Analysis error: {str(e)}")
245
 
246
+ # Sidebar
247
  with st.sidebar:
248
+ st.image("https://via.placeholder.com/200", width=200) # Replace with your logo
249
  st.markdown("""
250
  *Powered by OpenRouter*
251
  """)
252
  st.markdown("---")
253
+ st.markdown("""
254
+ **Tips:**
255
+ - For best results, use clear, well-lit images
256
+ - Ask specific questions for detailed answers
257
+ """)
258
+ st.markdown("Made with ❤️ by Koshur AI")
259
+