Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -70,22 +70,25 @@ def predict_action(user_text):
|
|
70 |
def generate_follow_up(user_text):
|
71 |
"""
|
72 |
Generates a concise and conversational follow-up question related to the user's input.
|
73 |
-
This version
|
74 |
"""
|
75 |
prompt_text = (
|
76 |
-
f"
|
77 |
-
"For example, if the
|
78 |
-
"'Would you like to
|
79 |
-
"
|
80 |
)
|
81 |
-
hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.
|
82 |
-
|
|
|
|
|
|
|
83 |
|
84 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
85 |
"""
|
86 |
Generates HAL's response in a friendly, conversational manner.
|
87 |
-
Uses sentiment analysis to adjust tone when appropriate
|
88 |
-
|
89 |
"""
|
90 |
sentiment = analyze_sentiment(user_text)
|
91 |
action = predict_action(user_text)
|
@@ -113,7 +116,7 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
113 |
(
|
114 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
115 |
"User: {user_text}.\n [/INST]\n"
|
116 |
-
"AI: Please answer the user's question without repeating
|
117 |
"Keep your response friendly and conversational, starting with a phrase like "
|
118 |
"'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
|
119 |
)
|
@@ -126,10 +129,9 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
126 |
chat_history.append({'role': 'user', 'content': user_text})
|
127 |
chat_history.append({'role': 'assistant', 'content': response})
|
128 |
|
129 |
-
# Only override for negative sentiment if
|
130 |
if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
|
131 |
response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
|
132 |
-
# Update the last assistant message in chat_history with the empathetic response.
|
133 |
chat_history[-1]['content'] = response
|
134 |
|
135 |
follow_up = generate_follow_up(user_text)
|
|
|
70 |
def generate_follow_up(user_text):
|
71 |
"""
|
72 |
Generates a concise and conversational follow-up question related to the user's input.
|
73 |
+
This version uses a clear prompt and provides a fallback if the generated output is empty.
|
74 |
"""
|
75 |
prompt_text = (
|
76 |
+
f"Generate a friendly, concise follow-up question based on the user's question: '{user_text}'. "
|
77 |
+
"The follow-up should invite further discussion. For example, if the user asked about quarks, you might ask, "
|
78 |
+
"'Would you like to learn more about the six types of quarks, or is there another topic you're curious about?' "
|
79 |
+
"Always return a follow-up question."
|
80 |
)
|
81 |
+
hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.8)
|
82 |
+
follow_up = hf.invoke(input=prompt_text).strip()
|
83 |
+
if not follow_up:
|
84 |
+
follow_up = "Would you like to explore this topic further?"
|
85 |
+
return follow_up
|
86 |
|
87 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
88 |
"""
|
89 |
Generates HAL's response in a friendly, conversational manner.
|
90 |
+
Uses sentiment analysis to adjust tone when appropriate.
|
91 |
+
Always generates a follow-up question that is appended to the chat history.
|
92 |
"""
|
93 |
sentiment = analyze_sentiment(user_text)
|
94 |
action = predict_action(user_text)
|
|
|
116 |
(
|
117 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
118 |
"User: {user_text}.\n [/INST]\n"
|
119 |
+
"AI: Please answer the user's question without repeating previous greetings. "
|
120 |
"Keep your response friendly and conversational, starting with a phrase like "
|
121 |
"'Certainly!', 'Of course!', or 'Great question!'.\nHAL:"
|
122 |
)
|
|
|
129 |
chat_history.append({'role': 'user', 'content': user_text})
|
130 |
chat_history.append({'role': 'assistant', 'content': response})
|
131 |
|
132 |
+
# Only override with an empathetic response for negative sentiment if appropriate.
|
133 |
if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
|
134 |
response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
|
|
|
135 |
chat_history[-1]['content'] = response
|
136 |
|
137 |
follow_up = generate_follow_up(user_text)
|