uumerrr684 commited on
Commit
92fb852
·
verified ·
1 Parent(s): 61b0aa0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -56
app.py CHANGED
@@ -1,8 +1,9 @@
1
  import requests
2
- import json
3
  import os
 
4
  import streamlit as st
5
  from datetime import datetime
 
6
 
7
  # Page configuration
8
  st.set_page_config(
@@ -11,18 +12,27 @@ st.set_page_config(
11
  initial_sidebar_state="collapsed"
12
  )
13
 
14
- # Minimal CSS for styling
15
  st.markdown("""
16
  <style>
17
  .stApp {
18
  background: white;
 
 
 
19
  max-width: 800px;
20
- margin: 0 auto;
21
  }
 
 
 
 
 
 
22
  .model-id {
23
  color: #28a745;
24
  font-family: monospace;
25
  }
 
26
  .model-attribution {
27
  color: #28a745;
28
  font-size: 0.8em;
@@ -37,10 +47,12 @@ HISTORY_FILE = "chat_history.json"
37
  def load_chat_history():
38
  """Load chat history from file"""
39
  try:
40
- with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
41
- return json.load(f)
42
- except (FileNotFoundError, json.JSONDecodeError):
43
- return []
 
 
44
 
45
  def save_chat_history(messages):
46
  """Save chat history to file"""
@@ -53,24 +65,21 @@ def save_chat_history(messages):
53
  def clear_chat_history():
54
  """Clear chat history file"""
55
  try:
56
- open(HISTORY_FILE, 'w').close()
 
57
  st.session_state.messages = []
58
  except Exception as e:
59
  st.error(f"Error clearing chat history: {e}")
60
 
61
- # Initialize session state
62
  if "messages" not in st.session_state:
63
  st.session_state.messages = load_chat_history()
64
 
65
  # Get API key
66
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")
67
- if not OPENROUTER_API_KEY:
68
- st.error("API key not found. Please set OPENROUTER_API_KEY.")
69
- st.stop()
70
 
71
  @st.cache_data(ttl=300)
72
  def check_api_status():
73
- """Check OpenRouter API status"""
74
  if not OPENROUTER_API_KEY:
75
  return "No API Key"
76
  try:
@@ -78,21 +87,22 @@ def check_api_status():
78
  headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
79
  response = requests.get(url, headers=headers, timeout=10)
80
  return "Connected" if response.status_code == 200 else "Error"
81
- except requests.RequestException:
82
  return "Error"
83
 
84
  def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
85
- """Get streaming AI response from OpenRouter"""
86
  if not OPENROUTER_API_KEY:
87
- yield "No API key found. Please add OPENROUTER_API_KEY to environment variables."
88
- return
89
 
90
  url = "https://openrouter.ai/api/v1/chat/completions"
91
  headers = {
92
  "Content-Type": "application/json",
93
- "Authorization": f"Bearer {OPENROUTER_API_KEY}"
 
 
94
  }
95
 
 
96
  api_messages = [{"role": "system", "content": "You are a helpful AI assistant. Provide clear and helpful responses."}]
97
  api_messages.extend(messages)
98
 
@@ -109,31 +119,49 @@ def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
109
 
110
  try:
111
  response = requests.post(url, headers=headers, json=data, stream=True, timeout=60)
 
 
112
  if response.status_code != 200:
113
- error_detail = response.json().get('error', {}).get('message', f"HTTP {response.status_code}")
 
 
 
 
 
 
114
  yield f"API Error: {error_detail}. Please try a different model or check your API key."
115
  return
116
 
117
  full_response = ""
 
 
 
118
  for line in response.iter_lines():
119
- if line and line.startswith(b"data: "):
120
- data_str = line[len(b"data: "):].decode("utf-8")
121
- if data_str.strip() == "[DONE]":
122
- break
123
- try:
124
- data = json.loads(data_str)
125
- delta = data["choices"][0]["delta"].get("content", "")
126
- if delta:
127
- full_response += delta
128
- yield full_response
129
- except json.JSONDecodeError:
130
- continue
131
- except requests.Timeout:
132
- yield "Request timed out. Please try again."
133
- except requests.ConnectionError:
134
- yield "Connection error. Please check your internet."
135
- except requests.RequestException as e:
136
- yield f"Request error: {str(e)}."
 
 
 
 
 
 
 
137
 
138
  # Header
139
  st.title("AI Assistant")
@@ -154,7 +182,7 @@ with st.sidebar:
154
 
155
  st.divider()
156
 
157
- # Model list
158
  models = [
159
  ("GPT-3.5 Turbo", "openai/gpt-3.5-turbo"),
160
  ("LLaMA 3.1 8B", "meta-llama/llama-3.1-8b-instruct"),
@@ -172,20 +200,26 @@ with st.sidebar:
172
  model_ids = [model_id for _, model_id in models]
173
 
174
  selected_index = st.selectbox("Model", range(len(model_names)),
175
- format_func=lambda x: model_names[x], index=0)
 
176
  selected_model = model_ids[selected_index]
177
 
 
178
  st.markdown(f"**Model ID:** <span class='model-id'>{selected_model}</span>", unsafe_allow_html=True)
179
 
180
  st.divider()
181
 
182
  # Chat History Controls
183
  st.header("Chat History")
 
 
184
  if st.session_state.messages:
185
  st.info(f"Messages stored: {len(st.session_state.messages)}")
186
 
 
187
  auto_save = st.checkbox("Auto-save messages", value=True)
188
 
 
189
  col1, col2 = st.columns(2)
190
  with col1:
191
  if st.button("Save History", use_container_width=True):
@@ -193,18 +227,51 @@ with st.sidebar:
193
  st.success("History saved!")
194
 
195
  with col2:
196
- if st.button("Clear History", use_container_width=True):
197
- clear_chat_history()
198
- st.success("History cleared!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
 
200
- # Show welcome message
201
  if not st.session_state.messages:
202
  st.info("How can I help you today?")
203
 
204
  # Display chat messages
205
  for message in st.session_state.messages:
206
  with st.chat_message(message["role"]):
 
207
  if message["role"] == "assistant" and "Response created by:" in message["content"]:
 
208
  parts = message["content"].split("\n\n---\n*Response created by:")
209
  main_content = parts[0]
210
  if len(parts) > 1:
@@ -218,29 +285,44 @@ for message in st.session_state.messages:
218
 
219
  # Chat input
220
  if prompt := st.chat_input("Ask anything..."):
 
221
  user_message = {"role": "user", "content": prompt}
222
  st.session_state.messages.append(user_message)
223
 
 
224
  if auto_save:
225
  save_chat_history(st.session_state.messages)
226
 
 
227
  with st.chat_message("user"):
228
  st.markdown(prompt)
229
 
 
230
  with st.chat_message("assistant"):
231
  placeholder = st.empty()
232
- full_response = ""
233
- for response in get_ai_response(st.session_state.messages, selected_model):
234
- full_response = response
235
- placeholder.markdown(full_response + "▌")
236
- placeholder.markdown(full_response)
237
 
238
- full_response_with_attribution = full_response + f"\n\n---\n*Response created by: **{model_names[selected_index]}***"
239
- assistant_message = {"role": "assistant", "content": full_response_with_attribution}
240
- st.session_state.messages.append(assistant_message)
241
-
242
- if auto_save:
243
- save_chat_history(st.session_state.messages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
- # Show current model
246
- st.caption(f"Currently using: {model_names[selected_index]}")
 
1
  import requests
 
2
  import os
3
+ import json
4
  import streamlit as st
5
  from datetime import datetime
6
+ import time
7
 
8
  # Page configuration
9
  st.set_page_config(
 
12
  initial_sidebar_state="collapsed"
13
  )
14
 
15
+ # White background
16
  st.markdown("""
17
  <style>
18
  .stApp {
19
  background: white;
20
+ }
21
+
22
+ .main .block-container {
23
  max-width: 800px;
 
24
  }
25
+
26
+ #MainMenu {visibility: hidden;}
27
+ footer {visibility: hidden;}
28
+ header {visibility: hidden;}
29
+ .stDeployButton {display: none;}
30
+
31
  .model-id {
32
  color: #28a745;
33
  font-family: monospace;
34
  }
35
+
36
  .model-attribution {
37
  color: #28a745;
38
  font-size: 0.8em;
 
47
  def load_chat_history():
48
  """Load chat history from file"""
49
  try:
50
+ if os.path.exists(HISTORY_FILE):
51
+ with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
52
+ return json.load(f)
53
+ except Exception as e:
54
+ st.error(f"Error loading chat history: {e}")
55
+ return []
56
 
57
  def save_chat_history(messages):
58
  """Save chat history to file"""
 
65
  def clear_chat_history():
66
  """Clear chat history file"""
67
  try:
68
+ if os.path.exists(HISTORY_FILE):
69
+ os.remove(HISTORY_FILE)
70
  st.session_state.messages = []
71
  except Exception as e:
72
  st.error(f"Error clearing chat history: {e}")
73
 
74
+ # Initialize session state with saved history
75
  if "messages" not in st.session_state:
76
  st.session_state.messages = load_chat_history()
77
 
78
  # Get API key
79
  OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")
 
 
 
80
 
81
  @st.cache_data(ttl=300)
82
  def check_api_status():
 
83
  if not OPENROUTER_API_KEY:
84
  return "No API Key"
85
  try:
 
87
  headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
88
  response = requests.get(url, headers=headers, timeout=10)
89
  return "Connected" if response.status_code == 200 else "Error"
90
+ except:
91
  return "Error"
92
 
93
  def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
 
94
  if not OPENROUTER_API_KEY:
95
+ return "No API key found. Please add OPENROUTER_API_KEY to environment variables."
 
96
 
97
  url = "https://openrouter.ai/api/v1/chat/completions"
98
  headers = {
99
  "Content-Type": "application/json",
100
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
101
+ "HTTP-Referer": "http://localhost:8501", # Optional: Your site URL
102
+ "X-Title": "Streamlit AI Assistant" # Optional: Your app name
103
  }
104
 
105
+ # Create system message and user messages
106
  api_messages = [{"role": "system", "content": "You are a helpful AI assistant. Provide clear and helpful responses."}]
107
  api_messages.extend(messages)
108
 
 
119
 
120
  try:
121
  response = requests.post(url, headers=headers, json=data, stream=True, timeout=60)
122
+
123
+ # Better error handling
124
  if response.status_code != 200:
125
+ error_detail = ""
126
+ try:
127
+ error_data = response.json()
128
+ error_detail = error_data.get('error', {}).get('message', f"HTTP {response.status_code}")
129
+ except:
130
+ error_detail = f"HTTP {response.status_code}: {response.reason}"
131
+
132
  yield f"API Error: {error_detail}. Please try a different model or check your API key."
133
  return
134
 
135
  full_response = ""
136
+ buffer = ""
137
+
138
+ # Using your working streaming logic
139
  for line in response.iter_lines():
140
+ if line:
141
+ # The server sends lines starting with "data: ..."
142
+ if line.startswith(b"data: "):
143
+ data_str = line[len(b"data: "):].decode("utf-8")
144
+ if data_str.strip() == "[DONE]":
145
+ break
146
+ try:
147
+ data = json.loads(data_str)
148
+ delta = data["choices"][0]["delta"].get("content", "")
149
+ if delta:
150
+ full_response += delta
151
+ yield full_response
152
+ except json.JSONDecodeError:
153
+ continue
154
+ except (KeyError, IndexError):
155
+ continue
156
+
157
+ except requests.exceptions.Timeout:
158
+ yield "Request timed out. Please try again with a shorter message or different model."
159
+ except requests.exceptions.ConnectionError:
160
+ yield "Connection error. Please check your internet connection and try again."
161
+ except requests.exceptions.RequestException as e:
162
+ yield f"Request error: {str(e)}. Please try again."
163
+ except Exception as e:
164
+ yield f"Unexpected error: {str(e)}. Please try again or contact support."
165
 
166
  # Header
167
  st.title("AI Assistant")
 
182
 
183
  st.divider()
184
 
185
+ # All models including new ones
186
  models = [
187
  ("GPT-3.5 Turbo", "openai/gpt-3.5-turbo"),
188
  ("LLaMA 3.1 8B", "meta-llama/llama-3.1-8b-instruct"),
 
200
  model_ids = [model_id for _, model_id in models]
201
 
202
  selected_index = st.selectbox("Model", range(len(model_names)),
203
+ format_func=lambda x: model_names[x],
204
+ index=0)
205
  selected_model = model_ids[selected_index]
206
 
207
+ # Show selected model ID in green
208
  st.markdown(f"**Model ID:** <span class='model-id'>{selected_model}</span>", unsafe_allow_html=True)
209
 
210
  st.divider()
211
 
212
  # Chat History Controls
213
  st.header("Chat History")
214
+
215
+ # Show number of messages
216
  if st.session_state.messages:
217
  st.info(f"Messages stored: {len(st.session_state.messages)}")
218
 
219
+ # Auto-save toggle
220
  auto_save = st.checkbox("Auto-save messages", value=True)
221
 
222
+ # Manual save/load buttons
223
  col1, col2 = st.columns(2)
224
  with col1:
225
  if st.button("Save History", use_container_width=True):
 
227
  st.success("History saved!")
228
 
229
  with col2:
230
+ if st.button("Load History", use_container_width=True):
231
+ st.session_state.messages = load_chat_history()
232
+ st.success("History loaded!")
233
+ st.rerun()
234
+
235
+ st.divider()
236
+
237
+ # View History
238
+ if st.button("View History File", use_container_width=True):
239
+ if os.path.exists(HISTORY_FILE):
240
+ with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
241
+ history_content = f.read()
242
+ st.text_area("Chat History (JSON)", history_content, height=200)
243
+ else:
244
+ st.warning("No history file found")
245
+
246
+ # Download History
247
+ if os.path.exists(HISTORY_FILE):
248
+ with open(HISTORY_FILE, 'rb') as f:
249
+ st.download_button(
250
+ label="Download History",
251
+ data=f.read(),
252
+ file_name=f"chat_history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
253
+ mime="application/json",
254
+ use_container_width=True
255
+ )
256
+
257
+ st.divider()
258
+
259
+ # Clear controls
260
+ if st.button("Clear Chat", use_container_width=True, type="secondary"):
261
+ clear_chat_history()
262
+ st.success("Chat cleared!")
263
+ st.rerun()
264
 
265
+ # Show welcome message when no messages
266
  if not st.session_state.messages:
267
  st.info("How can I help you today?")
268
 
269
  # Display chat messages
270
  for message in st.session_state.messages:
271
  with st.chat_message(message["role"]):
272
+ # Check if this is an assistant message with attribution
273
  if message["role"] == "assistant" and "Response created by:" in message["content"]:
274
+ # Split content and attribution
275
  parts = message["content"].split("\n\n---\n*Response created by:")
276
  main_content = parts[0]
277
  if len(parts) > 1:
 
285
 
286
  # Chat input
287
  if prompt := st.chat_input("Ask anything..."):
288
+ # Add user message
289
  user_message = {"role": "user", "content": prompt}
290
  st.session_state.messages.append(user_message)
291
 
292
+ # Auto-save if enabled
293
  if auto_save:
294
  save_chat_history(st.session_state.messages)
295
 
296
+ # Display user message
297
  with st.chat_message("user"):
298
  st.markdown(prompt)
299
 
300
+ # Get AI response
301
  with st.chat_message("assistant"):
302
  placeholder = st.empty()
 
 
 
 
 
303
 
304
+ full_response = ""
305
+ try:
306
+ for response in get_ai_response(st.session_state.messages, selected_model):
307
+ full_response = response
308
+ placeholder.markdown(full_response + "▌")
309
+
310
+ # Remove cursor and show final response
311
+ placeholder.markdown(full_response)
312
+
313
+ except Exception as e:
314
+ error_msg = f"An error occurred: {str(e)}"
315
+ placeholder.markdown(error_msg)
316
+ full_response = error_msg
317
+
318
+ # Add AI response to messages with attribution
319
+ full_response_with_attribution = full_response + f"\n\n---\n*Response created by: **{model_names[selected_index]}***"
320
+ assistant_message = {"role": "assistant", "content": full_response_with_attribution}
321
+ st.session_state.messages.append(assistant_message)
322
+
323
+ # Auto-save if enabled
324
+ if auto_save:
325
+ save_chat_history(st.session_state.messages)
326
 
327
+ # Show currently using model
328
+ st.caption(f"Currently using: **{model_names[selected_index]}**")