Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import os
|
2 |
import re
|
3 |
import random
|
4 |
-
import subprocess
|
5 |
import requests
|
6 |
import streamlit as st
|
7 |
import spacy # for additional NLP processing
|
@@ -18,6 +17,8 @@ def load_spacy_model():
|
|
18 |
try:
|
19 |
return spacy.load("en_core_web_sm")
|
20 |
except OSError:
|
|
|
|
|
21 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
22 |
return spacy.load("en_core_web_sm")
|
23 |
|
@@ -31,38 +32,6 @@ if "response_ready" not in st.session_state:
|
|
31 |
if "follow_up" not in st.session_state:
|
32 |
st.session_state.follow_up = ""
|
33 |
|
34 |
-
# --- Appearance CSS ---
|
35 |
-
st.markdown("""
|
36 |
-
<style>
|
37 |
-
.user-msg {
|
38 |
-
background-color: #696969;
|
39 |
-
color: white;
|
40 |
-
padding: 10px;
|
41 |
-
border-radius: 10px;
|
42 |
-
margin-bottom: 5px;
|
43 |
-
width: fit-content;
|
44 |
-
max-width: 80%;
|
45 |
-
}
|
46 |
-
.assistant-msg {
|
47 |
-
background-color: #333333;
|
48 |
-
color: white;
|
49 |
-
padding: 10px;
|
50 |
-
border-radius: 10px;
|
51 |
-
margin-bottom: 5px;
|
52 |
-
width: fit-content;
|
53 |
-
max-width: 80%;
|
54 |
-
}
|
55 |
-
.container {
|
56 |
-
display: flex;
|
57 |
-
flex-direction: column;
|
58 |
-
align-items: flex-start;
|
59 |
-
}
|
60 |
-
@media (max-width: 600px) {
|
61 |
-
.user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
|
62 |
-
}
|
63 |
-
</style>
|
64 |
-
""", unsafe_allow_html=True)
|
65 |
-
|
66 |
# --- Set Up Model & API Functions ---
|
67 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
68 |
sentiment_analyzer = pipeline(
|
@@ -99,7 +68,9 @@ def predict_action(user_text):
|
|
99 |
return "general_query"
|
100 |
|
101 |
def extract_context(text):
|
102 |
-
"""
|
|
|
|
|
103 |
doc = nlp_spacy(text)
|
104 |
entities = [ent.text for ent in doc.ents]
|
105 |
return ", ".join(entities) if entities else ""
|
@@ -121,17 +92,18 @@ def generate_follow_up(user_text):
|
|
121 |
cleaned = ["Would you like to explore this topic further?"]
|
122 |
return random.choice(cleaned)
|
123 |
|
124 |
-
def get_response(system_message, chat_history, user_text, max_new_tokens=
|
125 |
"""
|
126 |
-
Generates HAL's detailed
|
127 |
-
Incorporates sentiment analysis, additional NLP context, and style instructions.
|
128 |
"""
|
129 |
sentiment = analyze_sentiment(user_text)
|
130 |
action = predict_action(user_text)
|
131 |
|
|
|
132 |
context_info = extract_context(user_text)
|
133 |
context_clause = f" The key topics here are: {context_info}." if context_info else ""
|
134 |
|
|
|
135 |
style_instruction = ""
|
136 |
lower_text = user_text.lower()
|
137 |
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
@@ -140,8 +112,6 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
|
|
140 |
style_instruction = match.group(2).strip().capitalize()
|
141 |
style_instruction = f" Please respond in the voice of {style_instruction}."
|
142 |
|
143 |
-
language_clause = " Answer exclusively in English."
|
144 |
-
|
145 |
if action == "nasa_info":
|
146 |
nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
|
147 |
response = f"**{nasa_title}**\n\n{nasa_explanation}"
|
@@ -165,13 +135,20 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
|
|
165 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
166 |
"User: {user_text}.\n [/INST]\n"
|
167 |
"AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that thoroughly covers the topic."
|
168 |
-
+ style_clause + context_clause +
|
169 |
"\nHAL:"
|
170 |
)
|
171 |
)
|
172 |
|
|
|
|
|
|
|
|
|
173 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
174 |
raw_output = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
|
|
|
|
|
|
175 |
response = raw_output.split("HAL:")[-1].strip()
|
176 |
if not response:
|
177 |
response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
|
@@ -186,6 +163,8 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=1024):
|
|
186 |
follow_up = generate_follow_up(user_text)
|
187 |
chat_history.append({'role': 'assistant', 'content': follow_up})
|
188 |
|
|
|
|
|
189 |
return response, follow_up, chat_history, None
|
190 |
|
191 |
# --- Chat UI ---
|
|
|
1 |
import os
|
2 |
import re
|
3 |
import random
|
|
|
4 |
import requests
|
5 |
import streamlit as st
|
6 |
import spacy # for additional NLP processing
|
|
|
17 |
try:
|
18 |
return spacy.load("en_core_web_sm")
|
19 |
except OSError:
|
20 |
+
st.warning("Downloading spaCy model en_core_web_sm... This may take a moment.")
|
21 |
+
import subprocess
|
22 |
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
23 |
return spacy.load("en_core_web_sm")
|
24 |
|
|
|
32 |
if "follow_up" not in st.session_state:
|
33 |
st.session_state.follow_up = ""
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
# --- Set Up Model & API Functions ---
|
36 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
37 |
sentiment_analyzer = pipeline(
|
|
|
68 |
return "general_query"
|
69 |
|
70 |
def extract_context(text):
|
71 |
+
"""
|
72 |
+
Uses spaCy to extract named entities for additional context.
|
73 |
+
"""
|
74 |
doc = nlp_spacy(text)
|
75 |
entities = [ent.text for ent in doc.ents]
|
76 |
return ", ".join(entities) if entities else ""
|
|
|
92 |
cleaned = ["Would you like to explore this topic further?"]
|
93 |
return random.choice(cleaned)
|
94 |
|
95 |
+
def get_response(system_message, chat_history, user_text, max_new_tokens=512):
|
96 |
"""
|
97 |
+
Generates HAL's detailed answer and a follow-up question.
|
|
|
98 |
"""
|
99 |
sentiment = analyze_sentiment(user_text)
|
100 |
action = predict_action(user_text)
|
101 |
|
102 |
+
# Extract extra context
|
103 |
context_info = extract_context(user_text)
|
104 |
context_clause = f" The key topics here are: {context_info}." if context_info else ""
|
105 |
|
106 |
+
# Extract style instructions if present.
|
107 |
style_instruction = ""
|
108 |
lower_text = user_text.lower()
|
109 |
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
|
|
112 |
style_instruction = match.group(2).strip().capitalize()
|
113 |
style_instruction = f" Please respond in the voice of {style_instruction}."
|
114 |
|
|
|
|
|
115 |
if action == "nasa_info":
|
116 |
nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
|
117 |
response = f"**{nasa_title}**\n\n{nasa_explanation}"
|
|
|
135 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
136 |
"User: {user_text}.\n [/INST]\n"
|
137 |
"AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that thoroughly covers the topic."
|
138 |
+
+ style_clause + context_clause +
|
139 |
"\nHAL:"
|
140 |
)
|
141 |
)
|
142 |
|
143 |
+
# Debug: print the prompt for troubleshooting
|
144 |
+
st.write("DEBUG: Prompt sent to model:")
|
145 |
+
st.write(prompt.format(system_message=system_message, chat_history=filtered_history, user_text=user_text))
|
146 |
+
|
147 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
148 |
raw_output = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
149 |
+
st.write("DEBUG: Raw model output:")
|
150 |
+
st.write(raw_output)
|
151 |
+
|
152 |
response = raw_output.split("HAL:")[-1].strip()
|
153 |
if not response:
|
154 |
response = "Certainly, here is an in-depth explanation: [Fallback explanation]."
|
|
|
163 |
follow_up = generate_follow_up(user_text)
|
164 |
chat_history.append({'role': 'assistant', 'content': follow_up})
|
165 |
|
166 |
+
st.write("DEBUG: Generated follow-up question:", follow_up)
|
167 |
+
|
168 |
return response, follow_up, chat_history, None
|
169 |
|
170 |
# --- Chat UI ---
|