Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import os
|
2 |
import re
|
3 |
import random
|
|
|
4 |
import requests
|
5 |
import streamlit as st
|
6 |
import spacy # for additional NLP processing
|
@@ -17,8 +18,6 @@ def load_spacy_model():
|
|
17 |
try:
|
18 |
return spacy.load("en_core_web_sm")
|
19 |
except OSError:
|
20 |
-
st.warning("Downloading spaCy model en_core_web_sm... This may take a moment.")
|
21 |
-
import subprocess
|
22 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
23 |
return spacy.load("en_core_web_sm")
|
24 |
|
@@ -32,6 +31,38 @@ if "response_ready" not in st.session_state:
|
|
32 |
if "follow_up" not in st.session_state:
|
33 |
st.session_state.follow_up = ""
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
# --- Set Up Model & API Functions ---
|
36 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
37 |
sentiment_analyzer = pipeline(
|
@@ -92,18 +123,17 @@ def generate_follow_up(user_text):
|
|
92 |
cleaned = ["Would you like to explore this topic further?"]
|
93 |
return random.choice(cleaned)
|
94 |
|
95 |
-
def get_response(system_message, chat_history, user_text, max_new_tokens=
|
96 |
"""
|
97 |
-
Generates HAL's detailed answer and a follow-up question.
|
|
|
98 |
"""
|
99 |
sentiment = analyze_sentiment(user_text)
|
100 |
action = predict_action(user_text)
|
101 |
|
102 |
-
# Extract extra context
|
103 |
context_info = extract_context(user_text)
|
104 |
context_clause = f" The key topics here are: {context_info}." if context_info else ""
|
105 |
|
106 |
-
# Extract style instructions if present.
|
107 |
style_instruction = ""
|
108 |
lower_text = user_text.lower()
|
109 |
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
@@ -112,6 +142,8 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
|
|
112 |
style_instruction = match.group(2).strip().capitalize()
|
113 |
style_instruction = f" Please respond in the voice of {style_instruction}."
|
114 |
|
|
|
|
|
115 |
if action == "nasa_info":
|
116 |
nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
|
117 |
response = f"**{nasa_title}**\n\n{nasa_explanation}"
|
@@ -135,20 +167,13 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
|
|
135 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
136 |
"User: {user_text}.\n [/INST]\n"
|
137 |
"AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that thoroughly covers the topic."
|
138 |
-
+ style_clause + context_clause +
|
139 |
"\nHAL:"
|
140 |
)
|
141 |
)
|
142 |
|
143 |
-
# Debug: print the prompt for troubleshooting
|
144 |
-
st.write("DEBUG: Prompt sent to model:")
|
145 |
-
st.write(prompt.format(system_message=system_message, chat_history=filtered_history, user_text=user_text))
|
146 |
-
|
147 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
148 |
raw_output = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
149 |
-
st.write("DEBUG: Raw model output:")
|
150 |
-
st.write(raw_output)
|
151 |
-
|
152 |
response = raw_output.split("HAL:")[-1].strip()
|
153 |
if not response:
|
154 |
response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
|
@@ -163,8 +188,6 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
|
|
163 |
follow_up = generate_follow_up(user_text)
|
164 |
chat_history.append({'role': 'assistant', 'content': follow_up})
|
165 |
|
166 |
-
st.write("DEBUG: Generated follow-up question:", follow_up)
|
167 |
-
|
168 |
return response, follow_up, chat_history, None
|
169 |
|
170 |
# --- Chat UI ---
|
|
|
1 |
import os
|
2 |
import re
|
3 |
import random
|
4 |
+
import subprocess
|
5 |
import requests
|
6 |
import streamlit as st
|
7 |
import spacy # for additional NLP processing
|
|
|
18 |
try:
|
19 |
return spacy.load("en_core_web_sm")
|
20 |
except OSError:
|
|
|
|
|
21 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
22 |
return spacy.load("en_core_web_sm")
|
23 |
|
|
|
31 |
if "follow_up" not in st.session_state:
|
32 |
st.session_state.follow_up = ""
|
33 |
|
34 |
+
# --- Appearance CSS ---
|
35 |
+
st.markdown("""
|
36 |
+
<style>
|
37 |
+
.user-msg {
|
38 |
+
background-color: #696969;
|
39 |
+
color: white;
|
40 |
+
padding: 10px;
|
41 |
+
border-radius: 10px;
|
42 |
+
margin-bottom: 5px;
|
43 |
+
width: fit-content;
|
44 |
+
max-width: 80%;
|
45 |
+
}
|
46 |
+
.assistant-msg {
|
47 |
+
background-color: #333333;
|
48 |
+
color: white;
|
49 |
+
padding: 10px;
|
50 |
+
border-radius: 10px;
|
51 |
+
margin-bottom: 5px;
|
52 |
+
width: fit-content;
|
53 |
+
max-width: 80%;
|
54 |
+
}
|
55 |
+
.container {
|
56 |
+
display: flex;
|
57 |
+
flex-direction: column;
|
58 |
+
align-items: flex-start;
|
59 |
+
}
|
60 |
+
@media (max-width: 600px) {
|
61 |
+
.user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
|
62 |
+
}
|
63 |
+
</style>
|
64 |
+
""", unsafe_allow_html=True)
|
65 |
+
|
66 |
# --- Set Up Model & API Functions ---
|
67 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
68 |
sentiment_analyzer = pipeline(
|
|
|
123 |
cleaned = ["Would you like to explore this topic further?"]
|
124 |
return random.choice(cleaned)
|
125 |
|
126 |
+
def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
|
127 |
"""
|
128 |
+
Generates HAL's detailed, in-depth answer and a follow-up question.
|
129 |
+
Incorporates sentiment analysis, additional NLP context, and style instructions.
|
130 |
"""
|
131 |
sentiment = analyze_sentiment(user_text)
|
132 |
action = predict_action(user_text)
|
133 |
|
|
|
134 |
context_info = extract_context(user_text)
|
135 |
context_clause = f" The key topics here are: {context_info}." if context_info else ""
|
136 |
|
|
|
137 |
style_instruction = ""
|
138 |
lower_text = user_text.lower()
|
139 |
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
|
|
142 |
style_instruction = match.group(2).strip().capitalize()
|
143 |
style_instruction = f" Please respond in the voice of {style_instruction}."
|
144 |
|
145 |
+
language_clause = " Answer exclusively in English."
|
146 |
+
|
147 |
if action == "nasa_info":
|
148 |
nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
|
149 |
response = f"**{nasa_title}**\n\n{nasa_explanation}"
|
|
|
167 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
168 |
"User: {user_text}.\n [/INST]\n"
|
169 |
"AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that thoroughly covers the topic."
|
170 |
+
+ style_clause + context_clause + language_clause +
|
171 |
"\nHAL:"
|
172 |
)
|
173 |
)
|
174 |
|
|
|
|
|
|
|
|
|
175 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
176 |
raw_output = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
|
|
|
|
|
|
177 |
response = raw_output.split("HAL:")[-1].strip()
|
178 |
if not response:
|
179 |
response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
|
|
|
188 |
follow_up = generate_follow_up(user_text)
|
189 |
chat_history.append({'role': 'assistant', 'content': follow_up})
|
190 |
|
|
|
|
|
191 |
return response, follow_up, chat_history, None
|
192 |
|
193 |
# --- Chat UI ---
|