Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -116,7 +116,10 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
|
|
116 |
The prompt instructs the model to provide a detailed explanation and then generate a follow-up.
|
117 |
If the answer comes back empty, a fallback answer is used.
|
118 |
"""
|
119 |
-
|
|
|
|
|
|
|
120 |
# Extract style instruction if present
|
121 |
style_instruction = ""
|
122 |
lower_text = user_text.lower()
|
@@ -125,7 +128,8 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
|
|
125 |
if match:
|
126 |
style_instruction = match.group(2).strip().capitalize()
|
127 |
style_instruction = f" Please respond in the voice of {style_instruction}."
|
128 |
-
|
|
|
129 |
if action == "nasa_info":
|
130 |
nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
|
131 |
response = f"**{nasa_title}**\n\n{nasa_explanation}"
|
@@ -134,29 +138,29 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
|
|
134 |
follow_up = generate_follow_up(user_text)
|
135 |
chat_history.append({'role': 'assistant', 'content': follow_up})
|
136 |
return response, follow_up, chat_history, nasa_url
|
137 |
-
|
138 |
hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
|
139 |
filtered_history = ""
|
140 |
for message in chat_history:
|
141 |
if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
|
142 |
continue
|
143 |
filtered_history += f"{message['role']}: {message['content']}\n"
|
144 |
-
|
145 |
style_clause = style_instruction if style_instruction else ""
|
146 |
-
|
147 |
# Instruct the model to generate a detailed, in-depth answer.
|
148 |
prompt = PromptTemplate.from_template(
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
157 |
)
|
158 |
-
|
159 |
-
|
160 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
161 |
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
162 |
response = response.split("HAL:")[-1].strip()
|
@@ -164,24 +168,10 @@ def get_response(system_message, chat_history, user_text, max_new_tokens=512):
|
|
164 |
# π¨ Ensure the response is in English
|
165 |
response = ensure_english(response)
|
166 |
|
167 |
-
|
168 |
# Fallback in case the generated answer is empty
|
169 |
if not response:
|
170 |
-
|
171 |
-
|
172 |
-
chat_history.append({'role': 'user', 'content': user_text})
|
173 |
-
chat_history.append({'role': 'assistant', 'content': response})
|
174 |
-
|
175 |
-
# π FIX: Only override if strongly negative and NOT a question
|
176 |
-
if sentiment == "NEGATIVE" and not user_text.strip().endswith("?"):
|
177 |
-
response = "I'm sorry you're feeling this way. I'm here to help. What can I do to assist you further?"
|
178 |
-
chat_history[-1]['content'] = response
|
179 |
-
follow_up = None # π¨ Don't generate follow-up if negative sentiment triggers a different message
|
180 |
-
else:
|
181 |
-
follow_up = generate_follow_up(user_text)
|
182 |
-
chat_history.append({'role': 'assistant', 'content': follow_up})
|
183 |
|
184 |
-
return response, follow_up, chat_history, None
|
185 |
|
186 |
|
187 |
# --- Chat UI ---
|
|
|
116 |
The prompt instructs the model to provide a detailed explanation and then generate a follow-up.
|
117 |
If the answer comes back empty, a fallback answer is used.
|
118 |
"""
|
119 |
+
|
120 |
+
# π Determine the user's intent (NASA Info or General Query)
|
121 |
+
action = predict_action(user_text) # π₯ Define 'action' here
|
122 |
+
|
123 |
# Extract style instruction if present
|
124 |
style_instruction = ""
|
125 |
lower_text = user_text.lower()
|
|
|
128 |
if match:
|
129 |
style_instruction = match.group(2).strip().capitalize()
|
130 |
style_instruction = f" Please respond in the voice of {style_instruction}."
|
131 |
+
|
132 |
+
# π Handle NASA-specific queries
|
133 |
if action == "nasa_info":
|
134 |
nasa_url, nasa_title, nasa_explanation = get_nasa_apod()
|
135 |
response = f"**{nasa_title}**\n\n{nasa_explanation}"
|
|
|
138 |
follow_up = generate_follow_up(user_text)
|
139 |
chat_history.append({'role': 'assistant', 'content': follow_up})
|
140 |
return response, follow_up, chat_history, nasa_url
|
141 |
+
|
142 |
hf = get_llm_hf_inference(max_new_tokens=max_new_tokens, temperature=0.9)
|
143 |
filtered_history = ""
|
144 |
for message in chat_history:
|
145 |
if message["role"] == "assistant" and message["content"].strip() == "Hello! How can I assist you today?":
|
146 |
continue
|
147 |
filtered_history += f"{message['role']}: {message['content']}\n"
|
148 |
+
|
149 |
style_clause = style_instruction if style_instruction else ""
|
150 |
+
|
151 |
# Instruct the model to generate a detailed, in-depth answer.
|
152 |
prompt = PromptTemplate.from_template(
|
153 |
+
(
|
154 |
+
"[INST] {system_message}\n\nCurrent Conversation:\n{chat_history}\n\n"
|
155 |
+
"User: {user_text}.\n [/INST]\n"
|
156 |
+
"AI: Please provide a detailed explanation in depth. "
|
157 |
+
"Ensure your response covers the topic thoroughly and is written in a friendly, conversational style, "
|
158 |
+
"starting with a phrase like 'Certainly!', 'Of course!', or 'Great question!'."
|
159 |
+
"π¨ IMPORTANT: Answer exclusively in **English only**. Do not generate responses in any other language."
|
160 |
+
"\nHAL:"
|
161 |
+
)
|
162 |
)
|
163 |
+
|
|
|
164 |
chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
|
165 |
response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=filtered_history))
|
166 |
response = response.split("HAL:")[-1].strip()
|
|
|
168 |
# π¨ Ensure the response is in English
|
169 |
response = ensure_english(response)
|
170 |
|
|
|
171 |
# Fallback in case the generated answer is empty
|
172 |
if not response:
|
173 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
|
|
|
175 |
|
176 |
|
177 |
# --- Chat UI ---
|