Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import os
|
2 |
-
import subprocess
|
3 |
import re
|
4 |
import random
|
|
|
5 |
import requests
|
6 |
import streamlit as st
|
7 |
import spacy # For additional NLP processing
|
@@ -22,7 +22,6 @@ def load_spacy_model():
|
|
22 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
23 |
return spacy.load("en_core_web_sm")
|
24 |
|
25 |
-
# Load the spaCy model (used for extracting context)
|
26 |
nlp_spacy = load_spacy_model()
|
27 |
|
28 |
# --- Initialize Session State Variables ---
|
@@ -33,38 +32,6 @@ if "response_ready" not in st.session_state:
|
|
33 |
if "follow_up" not in st.session_state:
|
34 |
st.session_state.follow_up = ""
|
35 |
|
36 |
-
# --- Appearance (Optional) ---
|
37 |
-
st.markdown("""
|
38 |
-
<style>
|
39 |
-
.user-msg {
|
40 |
-
background-color: #696969;
|
41 |
-
color: white;
|
42 |
-
padding: 10px;
|
43 |
-
border-radius: 10px;
|
44 |
-
margin-bottom: 5px;
|
45 |
-
width: fit-content;
|
46 |
-
max-width: 80%;
|
47 |
-
}
|
48 |
-
.assistant-msg {
|
49 |
-
background-color: #333333;
|
50 |
-
color: white;
|
51 |
-
padding: 10px;
|
52 |
-
border-radius: 10px;
|
53 |
-
margin-bottom: 5px;
|
54 |
-
width: fit-content;
|
55 |
-
max-width: 80%;
|
56 |
-
}
|
57 |
-
.container {
|
58 |
-
display: flex;
|
59 |
-
flex-direction: column;
|
60 |
-
align-items: flex-start;
|
61 |
-
}
|
62 |
-
@media (max-width: 600px) {
|
63 |
-
.user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
|
64 |
-
}
|
65 |
-
</style>
|
66 |
-
""", unsafe_allow_html=True)
|
67 |
-
|
68 |
# --- Set Up Model & API Functions ---
|
69 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
70 |
sentiment_analyzer = pipeline(
|
@@ -102,18 +69,13 @@ def predict_action(user_text):
|
|
102 |
|
103 |
def extract_context(text):
|
104 |
"""
|
105 |
-
|
106 |
-
Returns a comma-separated string of entities, if any.
|
107 |
"""
|
108 |
doc = nlp_spacy(text)
|
109 |
entities = [ent.text for ent in doc.ents]
|
110 |
return ", ".join(entities) if entities else ""
|
111 |
|
112 |
def generate_follow_up(user_text):
|
113 |
-
"""
|
114 |
-
Generates two variant follow-up questions and randomly selects one.
|
115 |
-
Cleans up extraneous quotation marks.
|
116 |
-
"""
|
117 |
prompt_text = (
|
118 |
f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
|
119 |
"that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
|
@@ -128,18 +90,14 @@ def generate_follow_up(user_text):
|
|
128 |
return random.choice(cleaned)
|
129 |
|
130 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
131 |
-
"""
|
132 |
-
Generates HAL's detailed, in-depth response with a follow-up question.
|
133 |
-
Incorporates sentiment analysis and additional NLP context extraction.
|
134 |
-
"""
|
135 |
sentiment = analyze_sentiment(user_text)
|
136 |
action = predict_action(user_text)
|
137 |
|
138 |
-
# Extract
|
139 |
context_info = extract_context(user_text)
|
140 |
context_clause = f" The key topics here are: {context_info}." if context_info else ""
|
141 |
|
142 |
-
# Extract style instructions if
|
143 |
style_instruction = ""
|
144 |
lower_text = user_text.lower()
|
145 |
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
@@ -165,22 +123,27 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
165 |
filtered_history += f"{message['role']}: {message['content']}\n"
|
166 |
|
167 |
style_clause = style_instruction if style_instruction else ""
|
168 |
-
|
169 |
prompt = PromptTemplate.from_template(
|
170 |
(
|
171 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
172 |
"User: {user_text}.\n [/INST]\n"
|
173 |
-
"AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that thoroughly covers the topic."
|
174 |
+ style_clause + context_clause +
|
175 |
"\nHAL:"
|
176 |
)
|
177 |
)
|
178 |
|
|
|
|
|
|
|
|
|
179 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
180 |
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
181 |
response = response.split("HAL:")[-1].strip()
|
182 |
if not response:
|
183 |
response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
|
|
|
184 |
chat_history.append({'role': 'user', 'content': user_text})
|
185 |
chat_history.append({'role': 'assistant', 'content': response})
|
186 |
|
@@ -191,6 +154,9 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
191 |
follow_up = generate_follow_up(user_text)
|
192 |
chat_history.append({'role': 'assistant', 'content': follow_up})
|
193 |
|
|
|
|
|
|
|
194 |
return response, follow_up, chat_history, None
|
195 |
|
196 |
# --- Chat UI ---
|
|
|
1 |
import os
|
|
|
2 |
import re
|
3 |
import random
|
4 |
+
import subprocess
|
5 |
import requests
|
6 |
import streamlit as st
|
7 |
import spacy # For additional NLP processing
|
|
|
22 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
23 |
return spacy.load("en_core_web_sm")
|
24 |
|
|
|
25 |
nlp_spacy = load_spacy_model()
|
26 |
|
27 |
# --- Initialize Session State Variables ---
|
|
|
32 |
if "follow_up" not in st.session_state:
|
33 |
st.session_state.follow_up = ""
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
# --- Set Up Model & API Functions ---
|
36 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
37 |
sentiment_analyzer = pipeline(
|
|
|
69 |
|
70 |
def extract_context(text):
|
71 |
"""
|
72 |
+
Extract key entities using spaCy for additional context.
|
|
|
73 |
"""
|
74 |
doc = nlp_spacy(text)
|
75 |
entities = [ent.text for ent in doc.ents]
|
76 |
return ", ".join(entities) if entities else ""
|
77 |
|
78 |
def generate_follow_up(user_text):
|
|
|
|
|
|
|
|
|
79 |
prompt_text = (
|
80 |
f"Based on the user's question: '{user_text}', generate two concise, friendly follow-up questions "
|
81 |
"that invite further discussion. For example, one might be 'Would you like to know more about the six types of quarks?' "
|
|
|
90 |
return random.choice(cleaned)
|
91 |
|
92 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
|
|
|
|
|
|
|
93 |
sentiment = analyze_sentiment(user_text)
|
94 |
action = predict_action(user_text)
|
95 |
|
96 |
+
# Extract extra context from user's text.
|
97 |
context_info = extract_context(user_text)
|
98 |
context_clause = f" The key topics here are: {context_info}." if context_info else ""
|
99 |
|
100 |
+
# Extract style instructions if present.
|
101 |
style_instruction = ""
|
102 |
lower_text = user_text.lower()
|
103 |
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
|
|
123 |
filtered_history += f"{message['role']}: {message['content']}\n"
|
124 |
|
125 |
style_clause = style_instruction if style_instruction else ""
|
126 |
+
# Construct prompt with additional context.
|
127 |
prompt = PromptTemplate.from_template(
|
128 |
(
|
129 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
130 |
"User: {user_text}.\n [/INST]\n"
|
131 |
+
"AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that thoroughly covers the topic."
|
132 |
+ style_clause + context_clause +
|
133 |
"\nHAL:"
|
134 |
)
|
135 |
)
|
136 |
|
137 |
+
# Debug: Output the prompt being sent (for troubleshooting)
|
138 |
+
st.write("DEBUG: Prompt sent to language model:")
|
139 |
+
st.write(prompt.format(system_message=system_message, chat_history=filtered_history, user_text=user_text))
|
140 |
+
|
141 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
142 |
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
143 |
response = response.split("HAL:")[-1].strip()
|
144 |
if not response:
|
145 |
response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
|
146 |
+
|
147 |
chat_history.append({'role': 'user', 'content': user_text})
|
148 |
chat_history.append({'role': 'assistant', 'content': response})
|
149 |
|
|
|
154 |
follow_up = generate_follow_up(user_text)
|
155 |
chat_history.append({'role': 'assistant', 'content': follow_up})
|
156 |
|
157 |
+
# Debug: Output generated follow-up for troubleshooting.
|
158 |
+
st.write("DEBUG: Generated follow-up question:", follow_up)
|
159 |
+
|
160 |
return response, follow_up, chat_history, None
|
161 |
|
162 |
# --- Chat UI ---
|