Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,7 @@
|
|
1 |
import requests
|
2 |
-
import os
|
3 |
import json
|
4 |
import streamlit as st
|
5 |
from datetime import datetime
|
6 |
-
import time
|
7 |
|
8 |
# Page configuration
|
9 |
st.set_page_config(
|
@@ -12,27 +10,18 @@ st.set_page_config(
|
|
12 |
initial_sidebar_state="collapsed"
|
13 |
)
|
14 |
|
15 |
-
#
|
16 |
st.markdown("""
|
17 |
<style>
|
18 |
.stApp {
|
19 |
background: white;
|
20 |
-
}
|
21 |
-
|
22 |
-
.main .block-container {
|
23 |
max-width: 800px;
|
|
|
24 |
}
|
25 |
-
|
26 |
-
#MainMenu {visibility: hidden;}
|
27 |
-
footer {visibility: hidden;}
|
28 |
-
header {visibility: hidden;}
|
29 |
-
.stDeployButton {display: none;}
|
30 |
-
|
31 |
.model-id {
|
32 |
color: #28a745;
|
33 |
font-family: monospace;
|
34 |
}
|
35 |
-
|
36 |
.model-attribution {
|
37 |
color: #28a745;
|
38 |
font-size: 0.8em;
|
@@ -47,12 +36,10 @@ HISTORY_FILE = "chat_history.json"
|
|
47 |
def load_chat_history():
|
48 |
"""Load chat history from file"""
|
49 |
try:
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
st.error(f"Error loading chat history: {e}")
|
55 |
-
return []
|
56 |
|
57 |
def save_chat_history(messages):
|
58 |
"""Save chat history to file"""
|
@@ -65,13 +52,12 @@ def save_chat_history(messages):
|
|
65 |
def clear_chat_history():
|
66 |
"""Clear chat history file"""
|
67 |
try:
|
68 |
-
|
69 |
-
os.remove(HISTORY_FILE)
|
70 |
st.session_state.messages = []
|
71 |
except Exception as e:
|
72 |
st.error(f"Error clearing chat history: {e}")
|
73 |
|
74 |
-
# Initialize session state
|
75 |
if "messages" not in st.session_state:
|
76 |
st.session_state.messages = load_chat_history()
|
77 |
|
@@ -80,6 +66,7 @@ OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY")
|
|
80 |
|
81 |
@st.cache_data(ttl=300)
|
82 |
def check_api_status():
|
|
|
83 |
if not OPENROUTER_API_KEY:
|
84 |
return "No API Key"
|
85 |
try:
|
@@ -87,22 +74,21 @@ def check_api_status():
|
|
87 |
headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
|
88 |
response = requests.get(url, headers=headers, timeout=10)
|
89 |
return "Connected" if response.status_code == 200 else "Error"
|
90 |
-
except:
|
91 |
return "Error"
|
92 |
|
93 |
def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
|
|
|
94 |
if not OPENROUTER_API_KEY:
|
95 |
-
|
|
|
96 |
|
97 |
url = "https://openrouter.ai/api/v1/chat/completions"
|
98 |
headers = {
|
99 |
"Content-Type": "application/json",
|
100 |
-
"Authorization": f"Bearer {OPENROUTER_API_KEY}"
|
101 |
-
"HTTP-Referer": "http://localhost:8501", # Optional: Your site URL
|
102 |
-
"X-Title": "Streamlit AI Assistant" # Optional: Your app name
|
103 |
}
|
104 |
|
105 |
-
# Create system message and user messages
|
106 |
api_messages = [{"role": "system", "content": "You are a helpful AI assistant. Provide clear and helpful responses."}]
|
107 |
api_messages.extend(messages)
|
108 |
|
@@ -119,49 +105,31 @@ def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
|
|
119 |
|
120 |
try:
|
121 |
response = requests.post(url, headers=headers, json=data, stream=True, timeout=60)
|
122 |
-
|
123 |
-
# Better error handling
|
124 |
if response.status_code != 200:
|
125 |
-
error_detail = ""
|
126 |
-
try:
|
127 |
-
error_data = response.json()
|
128 |
-
error_detail = error_data.get('error', {}).get('message', f"HTTP {response.status_code}")
|
129 |
-
except:
|
130 |
-
error_detail = f"HTTP {response.status_code}: {response.reason}"
|
131 |
-
|
132 |
yield f"API Error: {error_detail}. Please try a different model or check your API key."
|
133 |
return
|
134 |
|
135 |
full_response = ""
|
136 |
-
buffer = ""
|
137 |
-
|
138 |
-
# Using your working streaming logic
|
139 |
for line in response.iter_lines():
|
140 |
-
if line:
|
141 |
-
|
142 |
-
if
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
yield "Request timed out. Please try again with a shorter message or different model."
|
159 |
-
except requests.exceptions.ConnectionError:
|
160 |
-
yield "Connection error. Please check your internet connection and try again."
|
161 |
-
except requests.exceptions.RequestException as e:
|
162 |
-
yield f"Request error: {str(e)}. Please try again."
|
163 |
-
except Exception as e:
|
164 |
-
yield f"Unexpected error: {str(e)}. Please try again or contact support."
|
165 |
|
166 |
# Header
|
167 |
st.title("AI Assistant")
|
@@ -182,7 +150,7 @@ with st.sidebar:
|
|
182 |
|
183 |
st.divider()
|
184 |
|
185 |
-
#
|
186 |
models = [
|
187 |
("GPT-3.5 Turbo", "openai/gpt-3.5-turbo"),
|
188 |
("LLaMA 3.1 8B", "meta-llama/llama-3.1-8b-instruct"),
|
@@ -200,26 +168,20 @@ with st.sidebar:
|
|
200 |
model_ids = [model_id for _, model_id in models]
|
201 |
|
202 |
selected_index = st.selectbox("Model", range(len(model_names)),
|
203 |
-
format_func=lambda x: model_names[x],
|
204 |
-
index=0)
|
205 |
selected_model = model_ids[selected_index]
|
206 |
|
207 |
-
# Show selected model ID in green
|
208 |
st.markdown(f"**Model ID:** <span class='model-id'>{selected_model}</span>", unsafe_allow_html=True)
|
209 |
|
210 |
st.divider()
|
211 |
|
212 |
# Chat History Controls
|
213 |
st.header("Chat History")
|
214 |
-
|
215 |
-
# Show number of messages
|
216 |
if st.session_state.messages:
|
217 |
st.info(f"Messages stored: {len(st.session_state.messages)}")
|
218 |
|
219 |
-
# Auto-save toggle
|
220 |
auto_save = st.checkbox("Auto-save messages", value=True)
|
221 |
|
222 |
-
# Manual save/load buttons
|
223 |
col1, col2 = st.columns(2)
|
224 |
with col1:
|
225 |
if st.button("Save History", use_container_width=True):
|
@@ -227,51 +189,18 @@ with st.sidebar:
|
|
227 |
st.success("History saved!")
|
228 |
|
229 |
with col2:
|
230 |
-
if st.button("
|
231 |
-
|
232 |
-
st.success("History
|
233 |
-
st.rerun()
|
234 |
-
|
235 |
-
st.divider()
|
236 |
-
|
237 |
-
# View History
|
238 |
-
if st.button("View History File", use_container_width=True):
|
239 |
-
if os.path.exists(HISTORY_FILE):
|
240 |
-
with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
|
241 |
-
history_content = f.read()
|
242 |
-
st.text_area("Chat History (JSON)", history_content, height=200)
|
243 |
-
else:
|
244 |
-
st.warning("No history file found")
|
245 |
-
|
246 |
-
# Download History
|
247 |
-
if os.path.exists(HISTORY_FILE):
|
248 |
-
with open(HISTORY_FILE, 'rb') as f:
|
249 |
-
st.download_button(
|
250 |
-
label="Download History",
|
251 |
-
data=f.read(),
|
252 |
-
file_name=f"chat_history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
|
253 |
-
mime="application/json",
|
254 |
-
use_container_width=True
|
255 |
-
)
|
256 |
-
|
257 |
-
st.divider()
|
258 |
-
|
259 |
-
# Clear controls
|
260 |
-
if st.button("Clear Chat", use_container_width=True, type="secondary"):
|
261 |
-
clear_chat_history()
|
262 |
-
st.success("Chat cleared!")
|
263 |
-
st.rerun()
|
264 |
|
265 |
-
# Show welcome message
|
266 |
if not st.session_state.messages:
|
267 |
st.info("How can I help you today?")
|
268 |
|
269 |
# Display chat messages
|
270 |
for message in st.session_state.messages:
|
271 |
with st.chat_message(message["role"]):
|
272 |
-
# Check if this is an assistant message with attribution
|
273 |
if message["role"] == "assistant" and "Response created by:" in message["content"]:
|
274 |
-
# Split content and attribution
|
275 |
parts = message["content"].split("\n\n---\n*Response created by:")
|
276 |
main_content = parts[0]
|
277 |
if len(parts) > 1:
|
@@ -285,44 +214,29 @@ for message in st.session_state.messages:
|
|
285 |
|
286 |
# Chat input
|
287 |
if prompt := st.chat_input("Ask anything..."):
|
288 |
-
# Add user message
|
289 |
user_message = {"role": "user", "content": prompt}
|
290 |
st.session_state.messages.append(user_message)
|
291 |
|
292 |
-
# Auto-save if enabled
|
293 |
if auto_save:
|
294 |
save_chat_history(st.session_state.messages)
|
295 |
|
296 |
-
# Display user message
|
297 |
with st.chat_message("user"):
|
298 |
st.markdown(prompt)
|
299 |
|
300 |
-
# Get AI response
|
301 |
with st.chat_message("assistant"):
|
302 |
placeholder = st.empty()
|
303 |
-
|
304 |
full_response = ""
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
full_response = error_msg
|
317 |
-
|
318 |
-
# Add AI response to messages with attribution
|
319 |
-
full_response_with_attribution = full_response + f"\n\n---\n*Response created by: **{model_names[selected_index]}***"
|
320 |
-
assistant_message = {"role": "assistant", "content": full_response_with_attribution}
|
321 |
-
st.session_state.messages.append(assistant_message)
|
322 |
-
|
323 |
-
# Auto-save if enabled
|
324 |
-
if auto_save:
|
325 |
-
save_chat_history(st.session_state.messages)
|
326 |
|
327 |
-
# Show
|
328 |
-
st.caption(f"Currently using:
|
|
|
1 |
import requests
|
|
|
2 |
import json
|
3 |
import streamlit as st
|
4 |
from datetime import datetime
|
|
|
5 |
|
6 |
# Page configuration
|
7 |
st.set_page_config(
|
|
|
10 |
initial_sidebar_state="collapsed"
|
11 |
)
|
12 |
|
13 |
+
# Minimal CSS for styling
|
14 |
st.markdown("""
|
15 |
<style>
|
16 |
.stApp {
|
17 |
background: white;
|
|
|
|
|
|
|
18 |
max-width: 800px;
|
19 |
+
margin: 0 auto;
|
20 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
.model-id {
|
22 |
color: #28a745;
|
23 |
font-family: monospace;
|
24 |
}
|
|
|
25 |
.model-attribution {
|
26 |
color: #28a745;
|
27 |
font-size: 0.8em;
|
|
|
36 |
def load_chat_history():
|
37 |
"""Load chat history from file"""
|
38 |
try:
|
39 |
+
with open(HISTORY_FILE, 'r', encoding='utf-8') as f:
|
40 |
+
return json.load(f)
|
41 |
+
except (FileNotFoundError, json.JSONDecodeError):
|
42 |
+
return []
|
|
|
|
|
43 |
|
44 |
def save_chat_history(messages):
|
45 |
"""Save chat history to file"""
|
|
|
52 |
def clear_chat_history():
|
53 |
"""Clear chat history file"""
|
54 |
try:
|
55 |
+
open(HISTORY_FILE, 'w').close()
|
|
|
56 |
st.session_state.messages = []
|
57 |
except Exception as e:
|
58 |
st.error(f"Error clearing chat history: {e}")
|
59 |
|
60 |
+
# Initialize session state
|
61 |
if "messages" not in st.session_state:
|
62 |
st.session_state.messages = load_chat_history()
|
63 |
|
|
|
66 |
|
67 |
@st.cache_data(ttl=300)
|
68 |
def check_api_status():
|
69 |
+
"""Check OpenRouter API status"""
|
70 |
if not OPENROUTER_API_KEY:
|
71 |
return "No API Key"
|
72 |
try:
|
|
|
74 |
headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"}
|
75 |
response = requests.get(url, headers=headers, timeout=10)
|
76 |
return "Connected" if response.status_code == 200 else "Error"
|
77 |
+
except requests.RequestException:
|
78 |
return "Error"
|
79 |
|
80 |
def get_ai_response(messages, model="openai/gpt-3.5-turbo"):
|
81 |
+
"""Get streaming AI response from OpenRouter"""
|
82 |
if not OPENROUTER_API_KEY:
|
83 |
+
yield "No API key found. Please add OPENROUTER_API_KEY to environment variables."
|
84 |
+
return
|
85 |
|
86 |
url = "https://openrouter.ai/api/v1/chat/completions"
|
87 |
headers = {
|
88 |
"Content-Type": "application/json",
|
89 |
+
"Authorization": f"Bearer {OPENROUTER_API_KEY}"
|
|
|
|
|
90 |
}
|
91 |
|
|
|
92 |
api_messages = [{"role": "system", "content": "You are a helpful AI assistant. Provide clear and helpful responses."}]
|
93 |
api_messages.extend(messages)
|
94 |
|
|
|
105 |
|
106 |
try:
|
107 |
response = requests.post(url, headers=headers, json=data, stream=True, timeout=60)
|
|
|
|
|
108 |
if response.status_code != 200:
|
109 |
+
error_detail = response.json().get('error', {}).get('message', f"HTTP {response.status_code}")
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
yield f"API Error: {error_detail}. Please try a different model or check your API key."
|
111 |
return
|
112 |
|
113 |
full_response = ""
|
|
|
|
|
|
|
114 |
for line in response.iter_lines():
|
115 |
+
if line and line.startswith(b"data: "):
|
116 |
+
data_str = line[len(b"data: "):].decode("utf-8")
|
117 |
+
if data_str.strip() == "[DONE]":
|
118 |
+
break
|
119 |
+
try:
|
120 |
+
data = json.loads(data_str)
|
121 |
+
delta = data["choices"][0]["delta"].get("content", "")
|
122 |
+
if delta:
|
123 |
+
full_response += delta
|
124 |
+
yield full_response
|
125 |
+
except json.JSONDecodeError:
|
126 |
+
continue
|
127 |
+
except requests.Timeout:
|
128 |
+
yield "Request timed out. Please try again."
|
129 |
+
except requests.ConnectionError:
|
130 |
+
yield "Connection error. Please check your internet."
|
131 |
+
except requests.RequestException as e:
|
132 |
+
yield f"Request error: {str(e)}."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
# Header
|
135 |
st.title("AI Assistant")
|
|
|
150 |
|
151 |
st.divider()
|
152 |
|
153 |
+
# Model list
|
154 |
models = [
|
155 |
("GPT-3.5 Turbo", "openai/gpt-3.5-turbo"),
|
156 |
("LLaMA 3.1 8B", "meta-llama/llama-3.1-8b-instruct"),
|
|
|
168 |
model_ids = [model_id for _, model_id in models]
|
169 |
|
170 |
selected_index = st.selectbox("Model", range(len(model_names)),
|
171 |
+
format_func=lambda x: model_names[x], index=0)
|
|
|
172 |
selected_model = model_ids[selected_index]
|
173 |
|
|
|
174 |
st.markdown(f"**Model ID:** <span class='model-id'>{selected_model}</span>", unsafe_allow_html=True)
|
175 |
|
176 |
st.divider()
|
177 |
|
178 |
# Chat History Controls
|
179 |
st.header("Chat History")
|
|
|
|
|
180 |
if st.session_state.messages:
|
181 |
st.info(f"Messages stored: {len(st.session_state.messages)}")
|
182 |
|
|
|
183 |
auto_save = st.checkbox("Auto-save messages", value=True)
|
184 |
|
|
|
185 |
col1, col2 = st.columns(2)
|
186 |
with col1:
|
187 |
if st.button("Save History", use_container_width=True):
|
|
|
189 |
st.success("History saved!")
|
190 |
|
191 |
with col2:
|
192 |
+
if st.button("Clear History", use_container_width=True):
|
193 |
+
clear_chat_history()
|
194 |
+
st.success("History cleared!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
+
# Show welcome message
|
197 |
if not st.session_state.messages:
|
198 |
st.info("How can I help you today?")
|
199 |
|
200 |
# Display chat messages
|
201 |
for message in st.session_state.messages:
|
202 |
with st.chat_message(message["role"]):
|
|
|
203 |
if message["role"] == "assistant" and "Response created by:" in message["content"]:
|
|
|
204 |
parts = message["content"].split("\n\n---\n*Response created by:")
|
205 |
main_content = parts[0]
|
206 |
if len(parts) > 1:
|
|
|
214 |
|
215 |
# Chat input
|
216 |
if prompt := st.chat_input("Ask anything..."):
|
|
|
217 |
user_message = {"role": "user", "content": prompt}
|
218 |
st.session_state.messages.append(user_message)
|
219 |
|
|
|
220 |
if auto_save:
|
221 |
save_chat_history(st.session_state.messages)
|
222 |
|
|
|
223 |
with st.chat_message("user"):
|
224 |
st.markdown(prompt)
|
225 |
|
|
|
226 |
with st.chat_message("assistant"):
|
227 |
placeholder = st.empty()
|
|
|
228 |
full_response = ""
|
229 |
+
for response in get_ai_response(st.session_state.messages, selected_model):
|
230 |
+
full_response = response
|
231 |
+
placeholder.markdown(full_response + "▌")
|
232 |
+
placeholder.markdown(full_response)
|
233 |
+
|
234 |
+
full_response_with_attribution = full_response + f"\n\n---\n*Response created by: **{model_names[selected_index]}***"
|
235 |
+
assistant_message = {"role": "assistant", "content": full_response_with_attribution}
|
236 |
+
st.session_state.messages.append(assistant_message)
|
237 |
+
|
238 |
+
if auto_save:
|
239 |
+
save_chat_history(st.session_state.messages)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
240 |
|
241 |
+
# Show current model
|
242 |
+
st.caption(f"Currently using: {model_names[selected_index]}")
|