Spaces:
Running
Running
Chandima Prabhath
commited on
Commit
·
c629376
1
Parent(s):
7b1490a
Refactor route_intent function to enhance prompt clarity and user interaction; streamline function invocation instructions and JSON response format
Browse files
app.py
CHANGED
|
@@ -383,34 +383,30 @@ ACTION_HANDLERS = {
|
|
| 383 |
def route_intent(user_input: str, chat_id: str, sender: str):
|
| 384 |
history_text = get_history_text(chat_id, sender)
|
| 385 |
sys_prompt = (
|
| 386 |
-
"You are Eve, a sweet, innocent, and helpful assistant. "
|
| 387 |
"You never perform work yourself—you only invoke one of the available functions. "
|
| 388 |
"When the user asks for something that matches a function signature, you must return exactly one JSON object matching that function’s parameters—and nothing else. "
|
| 389 |
"Do not wrap it in markdown, do not add extra text, and do not show the JSON to the user. "
|
| 390 |
"If the user’s request does not match any function, reply in plain text, and never mention JSON or internal logic.\n\n"
|
| 391 |
-
"
|
| 392 |
-
"
|
| 393 |
-
"
|
| 394 |
-
"
|
| 395 |
-
"
|
| 396 |
-
"
|
| 397 |
-
"
|
| 398 |
-
"
|
| 399 |
-
"
|
| 400 |
-
"
|
| 401 |
-
"
|
| 402 |
-
"
|
| 403 |
-
"
|
| 404 |
-
"
|
| 405 |
-
|
| 406 |
-
"Current user message:\n"
|
| 407 |
-
f"User: {user_input}"
|
| 408 |
)
|
| 409 |
-
|
| 410 |
-
#prompt = f"{sys_prompt}\nConversation so far:\n{history_text}\n\n current message: User: {user_input}"
|
| 411 |
|
| 412 |
try:
|
| 413 |
-
raw = generate_llm(
|
| 414 |
except LLMBadRequestError:
|
| 415 |
# Clear history on HTTP 400 from the LLM
|
| 416 |
clear_history(chat_id, sender)
|
|
|
|
| 383 |
def route_intent(user_input: str, chat_id: str, sender: str):
|
| 384 |
history_text = get_history_text(chat_id, sender)
|
| 385 |
sys_prompt = (
|
|
|
|
| 386 |
"You never perform work yourself—you only invoke one of the available functions. "
|
| 387 |
"When the user asks for something that matches a function signature, you must return exactly one JSON object matching that function’s parameters—and nothing else. "
|
| 388 |
"Do not wrap it in markdown, do not add extra text, and do not show the JSON to the user. "
|
| 389 |
"If the user’s request does not match any function, reply in plain text, and never mention JSON or internal logic.\n\n"
|
| 390 |
+
"- summarize(text)\n"
|
| 391 |
+
"- translate(lang, text)\n"
|
| 392 |
+
"- joke()\n"
|
| 393 |
+
"- weather(location)\n"
|
| 394 |
+
"- inspire()\n"
|
| 395 |
+
"- meme(text)\n"
|
| 396 |
+
"- poll_create(question, options)\n"
|
| 397 |
+
"- poll_vote(voter, choice)\n"
|
| 398 |
+
"- poll_results()\n"
|
| 399 |
+
"- poll_end()\n"
|
| 400 |
+
"- generate_image(prompt, count, width, height)\n"
|
| 401 |
+
"- send_text(message)\n\n"
|
| 402 |
+
"Return only raw JSON matching one of these shapes. For example:\n"
|
| 403 |
+
" {\"action\":\"generate_image\",\"prompt\":\"a red fox\",\"count\":3,\"width\":512,\"height\":512}\n"
|
| 404 |
+
"Otherwise, use send_text to reply with plain chat and you should only return one json for the current messege not for previoes conversations.\n"
|
|
|
|
|
|
|
| 405 |
)
|
| 406 |
+
prompt = f"{sys_prompt}\nConversation so far:\n{history_text}\n\n current message: User: {user_input}"
|
|
|
|
| 407 |
|
| 408 |
try:
|
| 409 |
+
raw = generate_llm(prompt)
|
| 410 |
except LLMBadRequestError:
|
| 411 |
# Clear history on HTTP 400 from the LLM
|
| 412 |
clear_history(chat_id, sender)
|