Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import os
|
|
|
2 |
import requests
|
3 |
import streamlit as st
|
4 |
from langchain_huggingface import HuggingFaceEndpoint
|
@@ -70,16 +71,21 @@ def predict_action(user_text):
|
|
70 |
def generate_follow_up(user_text):
|
71 |
"""
|
72 |
Generates a concise and conversational follow-up question related to the user's input.
|
73 |
-
|
74 |
"""
|
75 |
prompt_text = (
|
76 |
-
f"Generate a
|
77 |
-
"
|
78 |
-
"
|
79 |
-
"
|
80 |
)
|
81 |
hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.8)
|
82 |
follow_up = hf.invoke(input=prompt_text).strip()
|
|
|
|
|
|
|
|
|
|
|
83 |
if not follow_up:
|
84 |
follow_up = "Would you like to explore this topic further?"
|
85 |
return follow_up
|
@@ -87,11 +93,22 @@ def generate_follow_up(user_text):
|
|
87 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
88 |
"""
|
89 |
Generates HAL's response in a friendly, conversational manner.
|
90 |
-
Uses sentiment analysis to adjust tone when appropriate.
|
91 |
-
|
|
|
92 |
"""
|
93 |
sentiment = analyze_sentiment(user_text)
|
94 |
action = predict_action(user_text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
|
96 |
# Handle NASA-related queries separately.
|
97 |
if action == "nasa_info":
|
@@ -112,13 +129,19 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
112 |
continue
|
113 |
filtered_history += f"{message['role']}: {message['content']}\n"
|
114 |
|
|
|
|
|
|
|
|
|
|
|
115 |
prompt = PromptTemplate.from_template(
|
116 |
(
|
117 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
118 |
"User: {user_text}.\n [/INST]\n"
|
119 |
-
"AI: Please answer the user's question without repeating previous greetings.
|
120 |
-
"Keep your response friendly and conversational, starting with a phrase like "
|
121 |
-
|
|
|
122 |
)
|
123 |
)
|
124 |
|
@@ -129,7 +152,7 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
|
129 |
chat_history.append({'role': 'user', 'content': user_text})
|
130 |
chat_history.append({'role': 'assistant', 'content': response})
|
131 |
|
132 |
-
# Only override with an empathetic response for negative sentiment if
|
133 |
if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
|
134 |
response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
|
135 |
chat_history[-1]['content'] = response
|
|
|
1 |
import os
|
2 |
+
import re
|
3 |
import requests
|
4 |
import streamlit as st
|
5 |
from langchain_huggingface import HuggingFaceEndpoint
|
|
|
71 |
def generate_follow_up(user_text):
|
72 |
"""
|
73 |
Generates a concise and conversational follow-up question related to the user's input.
|
74 |
+
The prompt instructs the model to avoid meta commentary.
|
75 |
"""
|
76 |
prompt_text = (
|
77 |
+
f"Generate a concise, friendly follow-up question based on the user's question: '{user_text}'. "
|
78 |
+
"Do not include meta instructions or commentary such as 'Never return just a statement.' "
|
79 |
+
"For example, if the user asked about quarks, you might ask: "
|
80 |
+
"'Would you like to know more about the six types of quarks, or is there another aspect of quantum physics you're curious about?'"
|
81 |
)
|
82 |
hf = get_llm_hf_inference(max_new_tokens=64, temperature=0.8)
|
83 |
follow_up = hf.invoke(input=prompt_text).strip()
|
84 |
+
# Remove extraneous quotes if present.
|
85 |
+
follow_up = follow_up.strip('\'"')
|
86 |
+
# Optionally, remove any unwanted phrases (you can add more replacements if needed).
|
87 |
+
follow_up = re.sub(r"Never return just a statement\.?", "", follow_up, flags=re.IGNORECASE).strip()
|
88 |
+
# Ensure that something non-empty is returned.
|
89 |
if not follow_up:
|
90 |
follow_up = "Would you like to explore this topic further?"
|
91 |
return follow_up
|
|
|
93 |
def get_response(system_message, chat_history, user_text, max_new_tokens=256):
|
94 |
"""
|
95 |
Generates HAL's response in a friendly, conversational manner.
|
96 |
+
Uses sentiment analysis to adjust tone when appropriate and always generates a follow-up question.
|
97 |
+
If the user's input includes style instructions (e.g., 'in the voice of an astrophysicist'),
|
98 |
+
the prompt instructs HAL to adapt accordingly.
|
99 |
"""
|
100 |
sentiment = analyze_sentiment(user_text)
|
101 |
action = predict_action(user_text)
|
102 |
+
|
103 |
+
# Check for style instructions in the user message.
|
104 |
+
style_instruction = ""
|
105 |
+
lower_text = user_text.lower()
|
106 |
+
if "in the voice of" in lower_text or "speaking as" in lower_text:
|
107 |
+
# Extract the style instruction (a simple heuristic: take the part after "in the voice of")
|
108 |
+
match = re.search(r"(in the voice of|speaking as)(.*)", lower_text)
|
109 |
+
if match:
|
110 |
+
style_instruction = match.group(2).strip().capitalize()
|
111 |
+
style_instruction = f" Please respond in the voice of {style_instruction}."
|
112 |
|
113 |
# Handle NASA-related queries separately.
|
114 |
if action == "nasa_info":
|
|
|
129 |
continue
|
130 |
filtered_history += f"{message['role']}: {message['content']}\n"
|
131 |
|
132 |
+
# Add style instruction to the prompt if applicable.
|
133 |
+
style_clause = ""
|
134 |
+
if style_instruction:
|
135 |
+
style_clause = style_instruction
|
136 |
+
|
137 |
prompt = PromptTemplate.from_template(
|
138 |
(
|
139 |
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
140 |
"User: {user_text}.\n [/INST]\n"
|
141 |
+
"AI: Please answer the user's question without repeating any previous greetings."
|
142 |
+
" Keep your response friendly and conversational, starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'." +
|
143 |
+
style_clause +
|
144 |
+
"\nHAL:"
|
145 |
)
|
146 |
)
|
147 |
|
|
|
152 |
chat_history.append({'role': 'user', 'content': user_text})
|
153 |
chat_history.append({'role': 'assistant', 'content': response})
|
154 |
|
155 |
+
# Only override with an empathetic response for negative sentiment if the input is not a direct question.
|
156 |
if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
|
157 |
response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
|
158 |
chat_history[-1]['content'] = response
|