Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -70,13 +70,13 @@ def predict_action(user_text):
|
|
70 |
def generate_follow_up(user_text):
|
71 |
"""
|
72 |
Generates a concise and conversational follow-up question related to the user's input.
|
73 |
-
This version is designed to prompt a follow-up question that is relevant to the topic.
|
74 |
"""
|
75 |
prompt_text = (
|
76 |
-
f"Given the user's question: '{user_text}', generate a friendly
|
77 |
"For example, if the question is about quarks, you might ask, "
|
78 |
"'Would you like to know more about the six types of quarks, or is there another topic you'd like to explore?' "
|
79 |
-
"
|
80 |
)
|
81 |
hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
|
82 |
return hf.invoke(input=prompt_text).strip()
|
@@ -84,13 +84,13 @@ def generate_follow_up(user_text):
|
|
84 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
85 |
"""
|
86 |
Generates HAL's response in a friendly, conversational manner.
|
87 |
-
|
88 |
-
|
89 |
"""
|
90 |
sentiment = analyze_sentiment(user_text)
|
91 |
action = predict_action(user_text)
|
92 |
|
93 |
-
# Handle NASA-related queries separately
|
94 |
if action == "nasa_info":
|
95 |
nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
|
96 |
response = f"**{nasa_title}**\n\n{nasa_explanation}"
|
@@ -102,7 +102,7 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
102 |
|
103 |
hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
|
104 |
|
105 |
-
# Build a filtered conversation history excluding the initial greeting
|
106 |
filtered_history = ""
|
107 |
for message in chat_history:
|
108 |
if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
|
@@ -113,8 +113,8 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
113 |
(
|
114 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
115 |
"User: {user_text}.\n [/INST]\n"
|
116 |
-
"AI: Please answer the user's question without repeating previous greetings. "
|
117 |
-
"Keep your response friendly and conversational,
|
118 |
"'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
|
119 |
)
|
120 |
)
|
@@ -123,14 +123,13 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
123 |
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
124 |
response = response.split("HAL:")[-1].strip()
|
125 |
|
126 |
-
# Append user and assistant messages to the chat history
|
127 |
chat_history.append({'role': 'user', 'content': user_text})
|
128 |
chat_history.append({'role': 'assistant', 'content': response})
|
129 |
|
130 |
-
#
|
131 |
-
if sentiment == "NEGATIVE":
|
132 |
response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
|
133 |
-
# Update the last assistant message in chat_history with the empathetic response
|
134 |
chat_history[-1]['content'] = response
|
135 |
|
136 |
follow_up = generate_follow_up(user_text)
|
@@ -197,7 +196,7 @@ if user_input:
|
|
197 |
st.session_state.follow_up = follow_up
|
198 |
st.session_state.response_ready = True
|
199 |
|
200 |
-
# Render the entire chat history
|
201 |
st.markdown("<div class='container'>", unsafe_allow_html=True)
|
202 |
for message in st.session_state.chat_history:
|
203 |
if message["role"] == "user":
|
|
|
70 |
def generate_follow_up(user_text):
|
71 |
"""
|
72 |
Generates a concise and conversational follow-up question related to the user's input.
|
73 |
+
This version is designed to prompt a friendly follow-up question that is relevant to the topic.
|
74 |
"""
|
75 |
prompt_text = (
|
76 |
+
f"Given the user's question: '{user_text}', generate a friendly and varied follow-up question. "
|
77 |
"For example, if the question is about quarks, you might ask, "
|
78 |
"'Would you like to know more about the six types of quarks, or is there another topic you'd like to explore?' "
|
79 |
+
"Keep it concise and conversational."
|
80 |
)
|
81 |
hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.7)
|
82 |
return hf.invoke(input=prompt_text).strip()
|
|
|
84 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
85 |
"""
|
86 |
Generates HAL's response in a friendly, conversational manner.
|
87 |
+
Uses sentiment analysis to adjust tone when appropriate, but only overrides for negative sentiment if
|
88 |
+
the user's input is not a direct question.
|
89 |
"""
|
90 |
sentiment = analyze_sentiment(user_text)
|
91 |
action = predict_action(user_text)
|
92 |
|
93 |
+
# Handle NASA-related queries separately.
|
94 |
if action == "nasa_info":
|
95 |
nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
|
96 |
response = f"**{nasa_title}**\n\n{nasa_explanation}"
|
|
|
102 |
|
103 |
hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
|
104 |
|
105 |
+
# Build a filtered conversation history excluding the initial greeting.
|
106 |
filtered_history = ""
|
107 |
for message in chat_history:
|
108 |
if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
|
|
|
113 |
(
|
114 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
115 |
"User: {user_text}.\n [/INST]\n"
|
116 |
+
"AI: Please answer the user's question without repeating any previous greetings. "
|
117 |
+
"Keep your response friendly and conversational, starting with a phrase like "
|
118 |
"'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
|
119 |
)
|
120 |
)
|
|
|
123 |
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
124 |
response = response.split("HAL:")[-1].strip()
|
125 |
|
|
|
126 |
chat_history.append({'role': 'user', 'content': user_text})
|
127 |
chat_history.append({'role': 'assistant', 'content': response})
|
128 |
|
129 |
+
# Only override for negative sentiment if the user input is not a direct question.
|
130 |
+
if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
|
131 |
response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
|
132 |
+
# Update the last assistant message in chat_history with the empathetic response.
|
133 |
chat_history[-1]['content'] = response
|
134 |
|
135 |
follow_up = generate_follow_up(user_text)
|
|
|
196 |
st.session_state.follow_up = follow_up
|
197 |
st.session_state.response_ready = True
|
198 |
|
199 |
+
# Render the entire chat history.
|
200 |
st.markdown("<div class='container'>", unsafe_allow_html=True)
|
201 |
for message in st.session_state.chat_history:
|
202 |
if message["role"] == "user":
|