uumerrr684 commited on
Commit
963d0d8
Β·
verified Β·
1 Parent(s): 7c18a0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -293
app.py CHANGED
@@ -3,380 +3,201 @@ import requests
3
  import os
4
  import json
5
  import time
6
- from datetime import datetime
7
 
8
- # Page configuration - MUST be first Streamlit command
9
  st.set_page_config(
10
  page_title="AI Assistant 2025",
11
  page_icon="πŸ€–",
12
  layout="wide",
13
- initial_sidebar_state="expanded",
14
- menu_items={
15
- 'Get Help': 'https://docs.streamlit.io/develop/api-reference/chat',
16
- 'Report a bug': "https://github.com/streamlit/streamlit/issues",
17
- 'About': "AI Assistant 2025 - Built with Streamlit's native chat components"
18
- }
19
  )
20
 
21
- # Streamlit's built-in theme with minimal custom CSS
22
  st.markdown("""
23
  <style>
24
- /* Minimal custom styling to enhance built-in components */
25
- .stApp {
26
- font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
 
 
27
  }
28
 
29
- /* Enhanced chat input */
30
- .stChatInputContainer {
31
- border-top: 1px solid #e6e6e6;
32
- padding-top: 1rem;
 
 
 
33
  }
34
 
35
- /* Status indicators */
36
- .status-indicator {
37
- display: inline-flex;
38
- align-items: center;
39
- gap: 0.5rem;
40
- padding: 0.25rem 0.75rem;
41
- border-radius: 20px;
42
- font-size: 0.875rem;
43
- font-weight: 500;
44
  }
45
 
46
- .status-connected {
47
- background-color: #d4edda;
48
- color: #155724;
49
- border: 1px solid #c3e6cb;
 
 
 
50
  }
51
 
52
- .status-error {
53
- background-color: #f8d7da;
54
- color: #721c24;
55
- border: 1px solid #f5c6cb;
56
  }
57
 
58
- .status-warning {
59
- background-color: #fff3cd;
60
- color: #856404;
61
- border: 1px solid #ffeaa7;
62
  }
63
  </style>
64
  """, unsafe_allow_html=True)
65
 
66
- # Initialize session state using Streamlit patterns
67
  if "messages" not in st.session_state:
68
  st.session_state.messages = []
69
- # Add welcome message
70
- st.session_state.messages.append({
71
- "role": "assistant",
72
- "content": "Hello! I'm your AI Assistant. How can I help you today? πŸ€–",
73
- "timestamp": datetime.now()
74
- })
75
 
76
- if "conversation_id" not in st.session_state:
77
- st.session_state.conversation_id = str(int(time.time()))
78
-
79
- # API Configuration
80
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")
81
- DEFAULT_MODEL = "openai/gpt-3.5-turbo"
82
-
83
- @st.cache_data(ttl=300, show_spinner=False)
84
- def get_available_models():
85
- """Fetch available models from OpenRouter"""
86
- if not OPENROUTER_API_KEY:
87
- return [DEFAULT_MODEL]
88
-
89
- try:
90
- url = "https://openrouter.ai/api/v1/models"
91
- headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
92
- response = requests.get(url, headers=headers, timeout=10)
93
-
94
- if response.status_code == 200:
95
- models = response.json()
96
- return [model["id"] for model in models.get("data", [])][:20] # Limit to 20 models
97
- except:
98
- pass
99
-
100
- return [DEFAULT_MODEL]
101
 
102
- @st.cache_data(ttl=600, show_spinner=False)
103
  def check_api_status():
104
- """Check API connection status"""
105
  if not OPENROUTER_API_KEY:
106
- return "no_key"
107
-
108
  try:
109
  url = "https://openrouter.ai/api/v1/models"
110
  headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
111
  response = requests.get(url, headers=headers, timeout=5)
112
- return "connected" if response.status_code == 200 else "error"
113
  except:
114
- return "error"
115
 
116
- def stream_ai_response(messages, model=DEFAULT_MODEL, temperature=0.7):
117
- """Stream response from OpenRouter API"""
118
  if not OPENROUTER_API_KEY:
119
- yield "❌ **Error:** OPENROUTER_API_KEY not found in environment variables."
120
- return
121
 
122
  url = "https://openrouter.ai/api/v1/chat/completions"
123
  headers = {
124
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
125
- "Content-Type": "application/json",
126
- "HTTP-Referer": "https://streamlit.io",
127
- "X-Title": "Streamlit AI Assistant"
128
  }
129
 
130
- # Prepare API messages (exclude timestamp)
131
- api_messages = [
132
- {
133
- "role": "system",
134
- "content": "You are a helpful, knowledgeable AI assistant. Provide clear, accurate responses. Use markdown formatting when appropriate."
135
- }
136
- ]
137
-
138
- for msg in messages:
139
- if msg["role"] in ["user", "assistant"]:
140
- api_messages.append({
141
- "role": msg["role"],
142
- "content": msg["content"]
143
- })
144
 
145
- payload = {
146
  "model": model,
147
  "messages": api_messages,
148
  "stream": True,
149
- "temperature": temperature,
150
- "max_tokens": 2000,
151
  }
152
 
153
  try:
154
- with requests.post(url, headers=headers, json=payload, stream=True, timeout=60) as response:
155
- if response.status_code != 200:
156
- yield f"❌ **API Error {response.status_code}:** {response.text[:200]}"
157
- return
158
-
159
- full_response = ""
160
- for line in response.iter_lines():
161
- if line:
162
- line = line.decode('utf-8')
163
- if line.startswith('data: '):
164
- data_str = line[6:].strip()
165
- if data_str == '[DONE]':
166
- break
167
- try:
168
- data = json.loads(data_str)
169
- if 'choices' in data and len(data['choices']) > 0:
170
- delta = data['choices'][0].get('delta', {})
171
- if 'content' in delta:
172
- full_response += delta['content']
173
- yield full_response
174
- except json.JSONDecodeError:
175
- continue
176
-
177
- except requests.exceptions.Timeout:
178
- yield "⏰ **Request timed out.** Please try a shorter message or try again."
179
- except requests.exceptions.RequestException as e:
180
- yield f"πŸ”Œ **Connection error:** Unable to reach AI service. Please try again."
181
  except Exception as e:
182
- yield f"⚠️ **Unexpected error occurred.** Please refresh and try again."
183
 
184
  # Header
185
- st.title("πŸ€– AI Assistant 2025")
186
- st.caption("Built with Streamlit's native chat components")
187
 
188
- # Sidebar with enhanced controls
189
  with st.sidebar:
190
- st.header("πŸ› οΈ Chat Settings")
191
 
192
  # API Status
193
  status = check_api_status()
194
- if status == "connected":
195
- st.markdown('<div class="status-indicator status-connected">🟒 API Connected</div>', unsafe_allow_html=True)
196
- elif status == "no_key":
197
- st.markdown('<div class="status-indicator status-error">πŸ”΄ No API Key</div>', unsafe_allow_html=True)
198
- st.error("Set OPENROUTER_API_KEY in environment variables")
199
  else:
200
- st.markdown('<div class="status-indicator status-warning">🟑 Connection Issues</div>', unsafe_allow_html=True)
201
 
202
  st.divider()
203
 
204
- # Model Selection
205
- available_models = get_available_models()
206
- selected_model = st.selectbox(
207
- "πŸ€– Select Model",
208
- available_models,
209
- index=0,
210
- help="Choose the AI model for responses"
211
- )
212
 
213
- # Temperature control
214
- temperature = st.slider(
215
- "🌑️ Creativity",
216
- min_value=0.0,
217
- max_value=2.0,
218
- value=0.7,
219
- step=0.1,
220
- help="Higher values make responses more creative but less predictable"
221
- )
222
 
223
  st.divider()
224
 
225
- # Chat Controls
226
- st.subheader("πŸ’¬ Chat Controls")
227
-
228
  col1, col2 = st.columns(2)
229
  with col1:
230
- if st.button("πŸ—‘οΈ Clear Chat", use_container_width=True):
231
  st.session_state.messages = []
232
- # Add welcome message back
233
- st.session_state.messages.append({
234
- "role": "assistant",
235
- "content": "Chat cleared! How can I help you? πŸ€–",
236
- "timestamp": datetime.now()
237
- })
238
  st.rerun()
239
 
240
  with col2:
241
- if st.button("πŸ“ New Session", use_container_width=True):
242
- st.session_state.messages = []
243
- st.session_state.conversation_id = str(int(time.time()))
244
- st.session_state.messages.append({
245
- "role": "assistant",
246
- "content": "New session started! What would you like to discuss? 🌟",
247
- "timestamp": datetime.now()
248
- })
249
  st.rerun()
250
 
251
- # Export chat
252
- if len(st.session_state.messages) > 1:
253
- chat_export = "\n\n".join([
254
- f"**{msg['role'].title()}:** {msg['content']}"
255
- for msg in st.session_state.messages
256
- ])
257
- st.download_button(
258
- "πŸ“₯ Export Chat",
259
- chat_export,
260
- file_name=f"chat_export_{st.session_state.conversation_id}.txt",
261
- mime="text/plain",
262
- use_container_width=True
263
- )
264
-
265
- st.divider()
266
-
267
- # Statistics
268
- st.subheader("πŸ“Š Statistics")
269
- message_count = len([msg for msg in st.session_state.messages if msg["role"] == "user"])
270
- total_chars = sum(len(msg["content"]) for msg in st.session_state.messages)
271
-
272
- col1, col2 = st.columns(2)
273
- with col1:
274
- st.metric("Messages", message_count)
275
- with col2:
276
- st.metric("Characters", f"{total_chars:,}")
277
-
278
- # Quick actions
279
- st.subheader("⚑ Quick Actions")
280
- quick_prompts = [
281
- "Explain a complex topic simply",
282
- "Help me write an email",
283
- "Generate creative ideas",
284
- "Analyze some data",
285
- "Write Python code"
286
- ]
287
-
288
- for prompt in quick_prompts:
289
- if st.button(f"πŸ’‘ {prompt}", key=f"quick_{prompt}", use_container_width=True):
290
- # Add the quick prompt as a user message
291
- st.session_state.messages.append({
292
- "role": "user",
293
- "content": prompt,
294
- "timestamp": datetime.now()
295
- })
296
- st.rerun()
297
-
298
- # Main chat interface using Streamlit's built-in chat components
299
- chat_container = st.container()
300
 
301
- with chat_container:
302
- # Display chat messages using st.chat_message
303
- for i, message in enumerate(st.session_state.messages):
304
- with st.chat_message(message["role"]):
305
- st.markdown(message["content"])
306
-
307
- # Show timestamp for recent messages
308
- if hasattr(message, 'timestamp') or 'timestamp' in message:
309
- timestamp = message.get('timestamp', datetime.now())
310
- if isinstance(timestamp, datetime):
311
- st.caption(f"*{timestamp.strftime('%H:%M:%S')}*")
312
 
313
- # Chat input using Streamlit's built-in st.chat_input
314
- if prompt := st.chat_input("πŸ’­ What can I help you with?"):
315
- # Add user message with timestamp
316
- user_message = {
317
- "role": "user",
318
- "content": prompt,
319
- "timestamp": datetime.now()
320
- }
321
- st.session_state.messages.append(user_message)
322
 
323
- # Display user message immediately
324
  with st.chat_message("user"):
325
  st.markdown(prompt)
326
- st.caption(f"*{user_message['timestamp'].strftime('%H:%M:%S')}*")
327
 
328
- # Generate and display assistant response
329
  with st.chat_message("assistant"):
330
- message_placeholder = st.empty()
331
-
332
- # Show typing indicator
333
- with st.spinner("πŸ€” Thinking..."):
334
- time.sleep(0.1) # Brief pause for UX
335
 
336
- # Stream the response
337
  full_response = ""
338
- try:
339
- for response_chunk in stream_ai_response(
340
- st.session_state.messages,
341
- model=selected_model,
342
- temperature=temperature
343
- ):
344
- full_response = response_chunk
345
- message_placeholder.markdown(full_response + "β–Œ")
346
-
347
- # Remove cursor and show final response
348
- message_placeholder.markdown(full_response)
349
-
350
- # Add timestamp
351
- response_time = datetime.now()
352
- st.caption(f"*{response_time.strftime('%H:%M:%S')}*")
353
-
354
- # Add assistant response to session state
355
- assistant_message = {
356
- "role": "assistant",
357
- "content": full_response,
358
- "timestamp": response_time
359
- }
360
- st.session_state.messages.append(assistant_message)
361
-
362
- except Exception as e:
363
- error_message = "❌ **Error:** Unable to generate response. Please try again."
364
- message_placeholder.markdown(error_message)
365
- st.session_state.messages.append({
366
- "role": "assistant",
367
- "content": error_message,
368
- "timestamp": datetime.now()
369
- })
370
 
371
- # Footer
372
- st.divider()
373
- st.markdown(
374
- """
375
- <div style='text-align: center; color: gray; font-size: 0.875rem;'>
376
- πŸ€– <strong>AI Assistant 2025</strong> |
377
- Built with <a href='https://streamlit.io' target='_blank'>Streamlit</a> |
378
- Powered by <a href='https://openrouter.ai' target='_blank'>OpenRouter</a>
379
- </div>
380
- """,
381
- unsafe_allow_html=True
382
- )
 
3
  import os
4
  import json
5
  import time
 
6
 
7
+ # Page configuration
8
  st.set_page_config(
9
  page_title="AI Assistant 2025",
10
  page_icon="πŸ€–",
11
  layout="wide",
12
+ initial_sidebar_state="expanded"
 
 
 
 
 
13
  )
14
 
15
+ # Simple CSS for chat layout - User right, AI left
16
  st.markdown("""
17
  <style>
18
+ /* User messages - Right side */
19
+ .stChatMessage[data-testid*="user"] {
20
+ flex-direction: row-reverse !important;
21
+ margin-left: 20% !important;
22
+ margin-right: 0% !important;
23
  }
24
 
25
+ .stChatMessage[data-testid*="user"] .stMarkdown {
26
+ background-color: #007bff !important;
27
+ color: white !important;
28
+ border-radius: 18px 18px 4px 18px !important;
29
+ padding: 12px 16px !important;
30
+ margin-left: 8px !important;
31
+ margin-right: 0px !important;
32
  }
33
 
34
+ /* AI messages - Left side (default) */
35
+ .stChatMessage[data-testid*="assistant"] {
36
+ margin-right: 20% !important;
37
+ margin-left: 0% !important;
 
 
 
 
 
38
  }
39
 
40
+ .stChatMessage[data-testid*="assistant"] .stMarkdown {
41
+ background-color: #f1f3f4 !important;
42
+ color: #333 !important;
43
+ border-radius: 18px 18px 18px 4px !important;
44
+ padding: 12px 16px !important;
45
+ margin-right: 8px !important;
46
+ margin-left: 0px !important;
47
  }
48
 
49
+ /* Hide avatars for cleaner look */
50
+ .stChatMessage img {
51
+ display: none !important;
 
52
  }
53
 
54
+ /* Clean chat input */
55
+ .stChatInputContainer {
56
+ border-top: 1px solid #e0e0e0;
57
+ padding-top: 1rem;
58
  }
59
  </style>
60
  """, unsafe_allow_html=True)
61
 
62
+ # Initialize session state
63
  if "messages" not in st.session_state:
64
  st.session_state.messages = []
 
 
 
 
 
 
65
 
66
+ # Get API key
 
 
 
67
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
+ @st.cache_data(ttl=300)
70
  def check_api_status():
71
+ """Simple API check"""
72
  if not OPENROUTER_API_KEY:
73
+ return "No API Key"
 
74
  try:
75
  url = "https://openrouter.ai/api/v1/models"
76
  headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
77
  response = requests.get(url, headers=headers, timeout=5)
78
+ return "Connected" if response.status_code == 200 else "Error"
79
  except:
80
+ return "Error"
81
 
82
+ def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
83
+ """Get AI response"""
84
  if not OPENROUTER_API_KEY:
85
+ return "❌ No API key found. Please add OPENROUTER_API_KEY to environment variables."
 
86
 
87
  url = "https://openrouter.ai/api/v1/chat/completions"
88
  headers = {
89
  "Authorization": f"Bearer {OPENROUTER_API_KEY}",
90
+ "Content-Type": "application/json"
 
 
91
  }
92
 
93
+ api_messages = [{"role": "system", "content": "You are a helpful AI assistant."}]
94
+ api_messages.extend(messages)
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ data = {
97
  "model": model,
98
  "messages": api_messages,
99
  "stream": True,
100
+ "temperature": 0.7,
101
+ "max_tokens": 1500
102
  }
103
 
104
  try:
105
+ response = requests.post(url, headers=headers, json=data, stream=True, timeout=30)
106
+ response.raise_for_status()
107
+
108
+ full_response = ""
109
+ for line in response.iter_lines():
110
+ if line:
111
+ line = line.decode('utf-8')
112
+ if line.startswith('data: '):
113
+ data_str = line[6:].strip()
114
+ if data_str == '[DONE]':
115
+ break
116
+ try:
117
+ data = json.loads(data_str)
118
+ if 'choices' in data and data['choices']:
119
+ delta = data['choices'][0].get('delta', {})
120
+ if 'content' in delta:
121
+ full_response += delta['content']
122
+ yield full_response
123
+ except json.JSONDecodeError:
124
+ continue
 
 
 
 
 
 
 
125
  except Exception as e:
126
+ yield f"❌ Error: {str(e)[:100]}..."
127
 
128
  # Header
129
+ st.title("πŸ€– AI Assistant")
130
+ st.caption("Simple chat interface")
131
 
132
+ # Simple sidebar
133
  with st.sidebar:
134
+ st.header("Settings")
135
 
136
  # API Status
137
  status = check_api_status()
138
+ if status == "Connected":
139
+ st.success("βœ… API Connected")
140
+ elif status == "No API Key":
141
+ st.error("❌ No API Key")
 
142
  else:
143
+ st.warning("⚠️ Connection Issue")
144
 
145
  st.divider()
146
 
147
+ # Model selection
148
+ models = [
149
+ "openai/gpt-3.5-turbo",
150
+ "openai/gpt-4",
151
+ "anthropic/claude-3-haiku",
152
+ "google/gemini-pro"
153
+ ]
 
154
 
155
+ selected_model = st.selectbox("Model", models)
 
 
 
 
 
 
 
 
156
 
157
  st.divider()
158
 
159
+ # Controls
 
 
160
  col1, col2 = st.columns(2)
161
  with col1:
162
+ if st.button("πŸ—‘οΈ Clear", use_container_width=True):
163
  st.session_state.messages = []
 
 
 
 
 
 
164
  st.rerun()
165
 
166
  with col2:
167
+ if st.button("πŸ”„ Refresh", use_container_width=True):
 
 
 
 
 
 
 
168
  st.rerun()
169
 
170
+ # Stats
171
+ st.metric("Messages", len(st.session_state.messages))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
+ # Display chat messages
174
+ for message in st.session_state.messages:
175
+ with st.chat_message(message["role"]):
176
+ st.markdown(message["content"])
 
 
 
 
 
 
 
177
 
178
+ # Chat input
179
+ if prompt := st.chat_input("Type your message..."):
180
+ # Add user message
181
+ st.session_state.messages.append({"role": "user", "content": prompt})
 
 
 
 
 
182
 
183
+ # Display user message
184
  with st.chat_message("user"):
185
  st.markdown(prompt)
 
186
 
187
+ # Get AI response
188
  with st.chat_message("assistant"):
189
+ placeholder = st.empty()
 
 
 
 
190
 
 
191
  full_response = ""
192
+ for response in get_ai_response(st.session_state.messages, selected_model):
193
+ full_response = response
194
+ placeholder.markdown(full_response + "β–Œ")
195
+
196
+ placeholder.markdown(full_response)
197
+
198
+ # Add AI response to messages
199
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
+ # Simple footer
202
+ st.markdown("---")
203
+ st.markdown("πŸ€– **AI Assistant 2025** | Built with Streamlit")