Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
import os
|
|
|
2 |
import re
|
3 |
import random
|
4 |
import requests
|
5 |
import streamlit as st
|
6 |
-
import spacy #
|
7 |
from langchain_huggingface import HuggingFaceEndpoint
|
8 |
from langchain_core.prompts import PromptTemplate
|
9 |
from langchain_core.output_parsers import StrOutputParser
|
@@ -12,8 +13,17 @@ from transformers import pipeline
|
|
12 |
# Must be the first Streamlit command!
|
13 |
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
|
14 |
|
15 |
-
# ---
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
# --- Initialize Session State Variables ---
|
19 |
if "chat_history" not in st.session_state:
|
@@ -23,17 +33,37 @@ if "response_ready" not in st.session_state:
|
|
23 |
if "follow_up" not in st.session_state:
|
24 |
st.session_state.follow_up = ""
|
25 |
|
26 |
-
# ---
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
# --- Set Up Model & API Functions ---
|
39 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
@@ -70,6 +100,15 @@ def predict_action(user_text):
|
|
70 |
return "nasa_info"
|
71 |
return "general_query"
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
def generate_follow_up(user_text):
|
74 |
"""
|
75 |
Generates two variant follow-up questions and randomly selects one.
|
@@ -90,20 +129,17 @@ def generate_follow_up(user_text):
|
|
90 |
|
91 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
92 |
"""
|
93 |
-
Generates HAL's response with a
|
94 |
-
|
95 |
"""
|
96 |
sentiment = analyze_sentiment(user_text)
|
97 |
action = predict_action(user_text)
|
98 |
|
99 |
-
# Extract additional context using spaCy
|
100 |
context_info = extract_context(user_text)
|
101 |
-
if context_info
|
102 |
-
context_clause = f" The key topics here are: {context_info}."
|
103 |
-
else:
|
104 |
-
context_clause = ""
|
105 |
|
106 |
-
# Extract style
|
107 |
style_instruction = ""
|
108 |
lower_text = user_text.lower()
|
109 |
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
@@ -134,7 +170,7 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
134 |
(
|
135 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
136 |
"User: {user_text}.\n [/INST]\n"
|
137 |
-
"AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that covers the topic
|
138 |
+ style_clause + context_clause +
|
139 |
"\nHAL:"
|
140 |
)
|
@@ -167,7 +203,6 @@ if st.sidebar.button("Reset Chat"):
|
|
167 |
st.session_state.follow_up = ""
|
168 |
st.experimental_rerun()
|
169 |
|
170 |
-
# Render the chat history.
|
171 |
st.markdown("<div class='container'>", unsafe_allow_html=True)
|
172 |
for message in st.session_state.chat_history:
|
173 |
if message["role"] == "user":
|
|
|
1 |
import os
|
2 |
+
import subprocess
|
3 |
import re
|
4 |
import random
|
5 |
import requests
|
6 |
import streamlit as st
|
7 |
+
import spacy # For additional NLP processing
|
8 |
from langchain_huggingface import HuggingFaceEndpoint
|
9 |
from langchain_core.prompts import PromptTemplate
|
10 |
from langchain_core.output_parsers import StrOutputParser
|
|
|
13 |
# Must be the first Streamlit command!
|
14 |
st.set_page_config(page_title="HAL - NASA ChatBot", page_icon="🚀")
|
15 |
|
16 |
+
# --- Helper to load spaCy model with fallback ---
|
17 |
+
def load_spacy_model():
|
18 |
+
try:
|
19 |
+
return spacy.load("en_core_web_sm")
|
20 |
+
except OSError:
|
21 |
+
st.warning("Downloading spaCy model en_core_web_sm... This may take a moment.")
|
22 |
+
subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
|
23 |
+
return spacy.load("en_core_web_sm")
|
24 |
+
|
25 |
+
# Load the spaCy model (used for extracting context)
|
26 |
+
nlp_spacy = load_spacy_model()
|
27 |
|
28 |
# --- Initialize Session State Variables ---
|
29 |
if "chat_history" not in st.session_state:
|
|
|
33 |
if "follow_up" not in st.session_state:
|
34 |
st.session_state.follow_up = ""
|
35 |
|
36 |
+
# --- Appearance (Optional) ---
|
37 |
+
st.markdown("""
|
38 |
+
<style>
|
39 |
+
.user-msg {
|
40 |
+
background-color: #696969;
|
41 |
+
color: white;
|
42 |
+
padding: 10px;
|
43 |
+
border-radius: 10px;
|
44 |
+
margin-bottom: 5px;
|
45 |
+
width: fit-content;
|
46 |
+
max-width: 80%;
|
47 |
+
}
|
48 |
+
.assistant-msg {
|
49 |
+
background-color: #333333;
|
50 |
+
color: white;
|
51 |
+
padding: 10px;
|
52 |
+
border-radius: 10px;
|
53 |
+
margin-bottom: 5px;
|
54 |
+
width: fit-content;
|
55 |
+
max-width: 80%;
|
56 |
+
}
|
57 |
+
.container {
|
58 |
+
display: flex;
|
59 |
+
flex-direction: column;
|
60 |
+
align-items: flex-start;
|
61 |
+
}
|
62 |
+
@media (max-width: 600px) {
|
63 |
+
.user-msg, .assistant-msg { font-size: 16px; max-width: 100%; }
|
64 |
+
}
|
65 |
+
</style>
|
66 |
+
""", unsafe_allow_html=True)
|
67 |
|
68 |
# --- Set Up Model & API Functions ---
|
69 |
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
|
|
100 |
return "nasa_info"
|
101 |
return "general_query"
|
102 |
|
103 |
+
def extract_context(text):
|
104 |
+
"""
|
105 |
+
Uses spaCy to extract named entities for additional context.
|
106 |
+
Returns a comma-separated string of entities, if any.
|
107 |
+
"""
|
108 |
+
doc = nlp_spacy(text)
|
109 |
+
entities = [ent.text for ent in doc.ents]
|
110 |
+
return ", ".join(entities) if entities else ""
|
111 |
+
|
112 |
def generate_follow_up(user_text):
|
113 |
"""
|
114 |
Generates two variant follow-up questions and randomly selects one.
|
|
|
129 |
|
130 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
131 |
"""
|
132 |
+
Generates HAL's detailed, in-depth response with a follow-up question.
|
133 |
+
Incorporates sentiment analysis and additional NLP context extraction.
|
134 |
"""
|
135 |
sentiment = analyze_sentiment(user_text)
|
136 |
action = predict_action(user_text)
|
137 |
|
138 |
+
# Extract additional context (e.g., named entities) using spaCy.
|
139 |
context_info = extract_context(user_text)
|
140 |
+
context_clause = f" The key topics here are: {context_info}." if context_info else ""
|
|
|
|
|
|
|
141 |
|
142 |
+
# Extract style instructions if provided.
|
143 |
style_instruction = ""
|
144 |
lower_text = user_text.lower()
|
145 |
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
|
|
170 |
(
|
171 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
172 |
"User: {user_text}.\n [/INST]\n"
|
173 |
+
"AI: Please provide a detailed, in-depth answer in a friendly, conversational tone that thoroughly covers the topic."
|
174 |
+ style_clause + context_clause +
|
175 |
"\nHAL:"
|
176 |
)
|
|
|
203 |
st.session_state.follow_up = ""
|
204 |
st.experimental_rerun()
|
205 |
|
|
|
206 |
st.markdown("<div class='container'>", unsafe_allow_html=True)
|
207 |
for message in st.session_state.chat_history:
|
208 |
if message["role"] == "user":
|