File size: 77,458 Bytes
a51a15b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 |
"""
LLM Response Processor for AgentPress.
This module handles processing of LLM responses including:
- Parsing of content for both streaming and non-streaming responses
- Detection and extraction of tool calls (both XML-based and native function calling)
- Tool execution with different strategies
- Adding tool results back to the conversation thread
"""
import json
import asyncio
import re
import uuid
from typing import List, Dict, Any, Optional, Tuple, AsyncGenerator, Callable, Union, Literal
from dataclasses import dataclass
from datetime import datetime, timezone
from litellm import completion_cost, token_counter
from agentpress.tool import Tool, ToolResult
from agentpress.tool_registry import ToolRegistry
from utils.logger import logger
# Type alias for XML result adding strategy
XmlAddingStrategy = Literal["user_message", "assistant_message", "inline_edit"]
# Type alias for tool execution strategy
ToolExecutionStrategy = Literal["sequential", "parallel"]
@dataclass
class ToolExecutionContext:
"""Context for a tool execution including call details, result, and display info."""
tool_call: Dict[str, Any]
tool_index: int
result: Optional[ToolResult] = None
function_name: Optional[str] = None
xml_tag_name: Optional[str] = None
error: Optional[Exception] = None
assistant_message_id: Optional[str] = None
parsing_details: Optional[Dict[str, Any]] = None
@dataclass
class ProcessorConfig:
"""
Configuration for response processing and tool execution.
This class controls how the LLM's responses are processed, including how tool calls
are detected, executed, and their results handled.
Attributes:
xml_tool_calling: Enable XML-based tool call detection (<tool>...</tool>)
native_tool_calling: Enable OpenAI-style function calling format
execute_tools: Whether to automatically execute detected tool calls
execute_on_stream: For streaming, execute tools as they appear vs. at the end
tool_execution_strategy: How to execute multiple tools ("sequential" or "parallel")
xml_adding_strategy: How to add XML tool results to the conversation
max_xml_tool_calls: Maximum number of XML tool calls to process (0 = no limit)
"""
xml_tool_calling: bool = True
native_tool_calling: bool = False
execute_tools: bool = True
execute_on_stream: bool = False
tool_execution_strategy: ToolExecutionStrategy = "sequential"
xml_adding_strategy: XmlAddingStrategy = "assistant_message"
max_xml_tool_calls: int = 0 # 0 means no limit
def __post_init__(self):
"""Validate configuration after initialization."""
if self.xml_tool_calling is False and self.native_tool_calling is False and self.execute_tools:
raise ValueError("At least one tool calling format (XML or native) must be enabled if execute_tools is True")
if self.xml_adding_strategy not in ["user_message", "assistant_message", "inline_edit"]:
raise ValueError("xml_adding_strategy must be 'user_message', 'assistant_message', or 'inline_edit'")
if self.max_xml_tool_calls < 0:
raise ValueError("max_xml_tool_calls must be a non-negative integer (0 = no limit)")
class ResponseProcessor:
"""Processes LLM responses, extracting and executing tool calls."""
def __init__(self, tool_registry: ToolRegistry, add_message_callback: Callable):
"""Initialize the ResponseProcessor.
Args:
tool_registry: Registry of available tools
add_message_callback: Callback function to add messages to the thread.
MUST return the full saved message object (dict) or None.
"""
self.tool_registry = tool_registry
self.add_message = add_message_callback
async def process_streaming_response(
self,
llm_response: AsyncGenerator,
thread_id: str,
prompt_messages: List[Dict[str, Any]],
llm_model: str,
config: ProcessorConfig = ProcessorConfig(),
) -> AsyncGenerator[Dict[str, Any], None]:
"""Process a streaming LLM response, handling tool calls and execution.
Args:
llm_response: Streaming response from the LLM
thread_id: ID of the conversation thread
prompt_messages: List of messages sent to the LLM (the prompt)
llm_model: The name of the LLM model used
config: Configuration for parsing and execution
Yields:
Complete message objects matching the DB schema, except for content chunks.
"""
accumulated_content = ""
tool_calls_buffer = {}
current_xml_content = ""
xml_chunks_buffer = []
pending_tool_executions = []
yielded_tool_indices = set() # Stores indices of tools whose *status* has been yielded
tool_index = 0
xml_tool_call_count = 0
finish_reason = None
last_assistant_message_object = None # Store the final saved assistant message object
tool_result_message_objects = {} # tool_index -> full saved message object
has_printed_thinking_prefix = False # Flag for printing thinking prefix only once
logger.info(f"Streaming Config: XML={config.xml_tool_calling}, Native={config.native_tool_calling}, "
f"Execute on stream={config.execute_on_stream}, Strategy={config.tool_execution_strategy}")
thread_run_id = str(uuid.uuid4())
try:
# --- Save and Yield Start Events ---
start_content = {"status_type": "thread_run_start", "thread_run_id": thread_run_id}
start_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=start_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id}
)
if start_msg_obj: yield start_msg_obj
assist_start_content = {"status_type": "assistant_response_start"}
assist_start_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=assist_start_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id}
)
if assist_start_msg_obj: yield assist_start_msg_obj
# --- End Start Events ---
async for chunk in llm_response:
if hasattr(chunk, 'choices') and chunk.choices and hasattr(chunk.choices[0], 'finish_reason') and chunk.choices[0].finish_reason:
finish_reason = chunk.choices[0].finish_reason
logger.debug(f"Detected finish_reason: {finish_reason}")
if hasattr(chunk, 'choices') and chunk.choices:
delta = chunk.choices[0].delta if hasattr(chunk.choices[0], 'delta') else None
# Check for and log Anthropic thinking content
if delta and hasattr(delta, 'reasoning_content') and delta.reasoning_content:
if not has_printed_thinking_prefix:
# print("[THINKING]: ", end='', flush=True)
has_printed_thinking_prefix = True
# print(delta.reasoning_content, end='', flush=True)
# Append reasoning to main content to be saved in the final message
accumulated_content += delta.reasoning_content
# Process content chunk
if delta and hasattr(delta, 'content') and delta.content:
chunk_content = delta.content
# print(chunk_content, end='', flush=True)
accumulated_content += chunk_content
current_xml_content += chunk_content
if not (config.max_xml_tool_calls > 0 and xml_tool_call_count >= config.max_xml_tool_calls):
# Yield ONLY content chunk (don't save)
now_chunk = datetime.now(timezone.utc).isoformat()
yield {
"message_id": None, "thread_id": thread_id, "type": "assistant",
"is_llm_message": True,
"content": json.dumps({"role": "assistant", "content": chunk_content}),
"metadata": json.dumps({"stream_status": "chunk", "thread_run_id": thread_run_id}),
"created_at": now_chunk, "updated_at": now_chunk
}
else:
logger.info("XML tool call limit reached - not yielding more content chunks")
# --- Process XML Tool Calls (if enabled and limit not reached) ---
if config.xml_tool_calling and not (config.max_xml_tool_calls > 0 and xml_tool_call_count >= config.max_xml_tool_calls):
xml_chunks = self._extract_xml_chunks(current_xml_content)
for xml_chunk in xml_chunks:
current_xml_content = current_xml_content.replace(xml_chunk, "", 1)
xml_chunks_buffer.append(xml_chunk)
result = self._parse_xml_tool_call(xml_chunk)
if result:
tool_call, parsing_details = result
xml_tool_call_count += 1
current_assistant_id = last_assistant_message_object['message_id'] if last_assistant_message_object else None
context = self._create_tool_context(
tool_call, tool_index, current_assistant_id, parsing_details
)
if config.execute_tools and config.execute_on_stream:
# Save and Yield tool_started status
started_msg_obj = await self._yield_and_save_tool_started(context, thread_id, thread_run_id)
if started_msg_obj: yield started_msg_obj
yielded_tool_indices.add(tool_index) # Mark status as yielded
execution_task = asyncio.create_task(self._execute_tool(tool_call))
pending_tool_executions.append({
"task": execution_task, "tool_call": tool_call,
"tool_index": tool_index, "context": context
})
tool_index += 1
if config.max_xml_tool_calls > 0 and xml_tool_call_count >= config.max_xml_tool_calls:
logger.debug(f"Reached XML tool call limit ({config.max_xml_tool_calls})")
finish_reason = "xml_tool_limit_reached"
break # Stop processing more XML chunks in this delta
# --- Process Native Tool Call Chunks ---
if config.native_tool_calling and delta and hasattr(delta, 'tool_calls') and delta.tool_calls:
for tool_call_chunk in delta.tool_calls:
# Yield Native Tool Call Chunk (transient status, not saved)
# ... (safe extraction logic for tool_call_data_chunk) ...
tool_call_data_chunk = {} # Placeholder for extracted data
if hasattr(tool_call_chunk, 'model_dump'): tool_call_data_chunk = tool_call_chunk.model_dump()
else: # Manual extraction...
if hasattr(tool_call_chunk, 'id'): tool_call_data_chunk['id'] = tool_call_chunk.id
if hasattr(tool_call_chunk, 'index'): tool_call_data_chunk['index'] = tool_call_chunk.index
if hasattr(tool_call_chunk, 'type'): tool_call_data_chunk['type'] = tool_call_chunk.type
if hasattr(tool_call_chunk, 'function'):
tool_call_data_chunk['function'] = {}
if hasattr(tool_call_chunk.function, 'name'): tool_call_data_chunk['function']['name'] = tool_call_chunk.function.name
if hasattr(tool_call_chunk.function, 'arguments'): tool_call_data_chunk['function']['arguments'] = tool_call_chunk.function.arguments
now_tool_chunk = datetime.now(timezone.utc).isoformat()
yield {
"message_id": None, "thread_id": thread_id, "type": "status", "is_llm_message": True,
"content": json.dumps({"role": "assistant", "status_type": "tool_call_chunk", "tool_call_chunk": tool_call_data_chunk}),
"metadata": json.dumps({"thread_run_id": thread_run_id}),
"created_at": now_tool_chunk, "updated_at": now_tool_chunk
}
# --- Buffer and Execute Complete Native Tool Calls ---
if not hasattr(tool_call_chunk, 'function'): continue
idx = tool_call_chunk.index if hasattr(tool_call_chunk, 'index') else 0
# ... (buffer update logic remains same) ...
# ... (check complete logic remains same) ...
has_complete_tool_call = False # Placeholder
if (tool_calls_buffer.get(idx) and
tool_calls_buffer[idx]['id'] and
tool_calls_buffer[idx]['function']['name'] and
tool_calls_buffer[idx]['function']['arguments']):
try:
json.loads(tool_calls_buffer[idx]['function']['arguments'])
has_complete_tool_call = True
except json.JSONDecodeError: pass
if has_complete_tool_call and config.execute_tools and config.execute_on_stream:
current_tool = tool_calls_buffer[idx]
tool_call_data = {
"function_name": current_tool['function']['name'],
"arguments": json.loads(current_tool['function']['arguments']),
"id": current_tool['id']
}
current_assistant_id = last_assistant_message_object['message_id'] if last_assistant_message_object else None
context = self._create_tool_context(
tool_call_data, tool_index, current_assistant_id
)
# Save and Yield tool_started status
started_msg_obj = await self._yield_and_save_tool_started(context, thread_id, thread_run_id)
if started_msg_obj: yield started_msg_obj
yielded_tool_indices.add(tool_index) # Mark status as yielded
execution_task = asyncio.create_task(self._execute_tool(tool_call_data))
pending_tool_executions.append({
"task": execution_task, "tool_call": tool_call_data,
"tool_index": tool_index, "context": context
})
tool_index += 1
if finish_reason == "xml_tool_limit_reached":
logger.info("Stopping stream processing after loop due to XML tool call limit")
break
# print() # Add a final newline after the streaming loop finishes
# --- After Streaming Loop ---
# Wait for pending tool executions from streaming phase
tool_results_buffer = [] # Stores (tool_call, result, tool_index, context)
if pending_tool_executions:
logger.info(f"Waiting for {len(pending_tool_executions)} pending streamed tool executions")
# ... (asyncio.wait logic) ...
pending_tasks = [execution["task"] for execution in pending_tool_executions]
done, _ = await asyncio.wait(pending_tasks)
for execution in pending_tool_executions:
tool_idx = execution.get("tool_index", -1)
context = execution["context"]
# Check if status was already yielded during stream run
if tool_idx in yielded_tool_indices:
logger.debug(f"Status for tool index {tool_idx} already yielded.")
# Still need to process the result for the buffer
try:
if execution["task"].done():
result = execution["task"].result()
context.result = result
tool_results_buffer.append((execution["tool_call"], result, tool_idx, context))
else: # Should not happen with asyncio.wait
logger.warning(f"Task for tool index {tool_idx} not done after wait.")
except Exception as e:
logger.error(f"Error getting result for pending tool execution {tool_idx}: {str(e)}")
context.error = e
# Save and Yield tool error status message (even if started was yielded)
error_msg_obj = await self._yield_and_save_tool_error(context, thread_id, thread_run_id)
if error_msg_obj: yield error_msg_obj
continue # Skip further status yielding for this tool index
# If status wasn't yielded before (shouldn't happen with current logic), yield it now
try:
if execution["task"].done():
result = execution["task"].result()
context.result = result
tool_results_buffer.append((execution["tool_call"], result, tool_idx, context))
# Save and Yield tool completed/failed status
completed_msg_obj = await self._yield_and_save_tool_completed(
context, None, thread_id, thread_run_id
)
if completed_msg_obj: yield completed_msg_obj
yielded_tool_indices.add(tool_idx)
except Exception as e:
logger.error(f"Error getting result/yielding status for pending tool execution {tool_idx}: {str(e)}")
context.error = e
# Save and Yield tool error status
error_msg_obj = await self._yield_and_save_tool_error(context, thread_id, thread_run_id)
if error_msg_obj: yield error_msg_obj
yielded_tool_indices.add(tool_idx)
# Save and yield finish status if limit was reached
if finish_reason == "xml_tool_limit_reached":
finish_content = {"status_type": "finish", "finish_reason": "xml_tool_limit_reached"}
finish_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=finish_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id}
)
if finish_msg_obj: yield finish_msg_obj
logger.info(f"Stream finished with reason: xml_tool_limit_reached after {xml_tool_call_count} XML tool calls")
# --- SAVE and YIELD Final Assistant Message ---
if accumulated_content:
# ... (Truncate accumulated_content logic) ...
if config.max_xml_tool_calls > 0 and xml_tool_call_count >= config.max_xml_tool_calls and xml_chunks_buffer:
last_xml_chunk = xml_chunks_buffer[-1]
last_chunk_end_pos = accumulated_content.find(last_xml_chunk) + len(last_xml_chunk)
if last_chunk_end_pos > 0:
accumulated_content = accumulated_content[:last_chunk_end_pos]
# ... (Extract complete_native_tool_calls logic) ...
complete_native_tool_calls = []
if config.native_tool_calling:
for idx, tc_buf in tool_calls_buffer.items():
if tc_buf['id'] and tc_buf['function']['name'] and tc_buf['function']['arguments']:
try:
args = json.loads(tc_buf['function']['arguments'])
complete_native_tool_calls.append({
"id": tc_buf['id'], "type": "function",
"function": {"name": tc_buf['function']['name'],"arguments": args}
})
except json.JSONDecodeError: continue
message_data = { # Dict to be saved in 'content'
"role": "assistant", "content": accumulated_content,
"tool_calls": complete_native_tool_calls or None
}
last_assistant_message_object = await self.add_message(
thread_id=thread_id, type="assistant", content=message_data,
is_llm_message=True, metadata={"thread_run_id": thread_run_id}
)
if last_assistant_message_object:
# Yield the complete saved object, adding stream_status metadata just for yield
yield_metadata = json.loads(last_assistant_message_object.get('metadata', '{}'))
yield_metadata['stream_status'] = 'complete'
yield {**last_assistant_message_object, 'metadata': json.dumps(yield_metadata)}
else:
logger.error(f"Failed to save final assistant message for thread {thread_id}")
# Save and yield an error status
err_content = {"role": "system", "status_type": "error", "message": "Failed to save final assistant message"}
err_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=err_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id}
)
if err_msg_obj: yield err_msg_obj
# --- Process All Tool Results Now ---
if config.execute_tools:
final_tool_calls_to_process = []
# ... (Gather final_tool_calls_to_process from native and XML buffers) ...
# Gather native tool calls from buffer
if config.native_tool_calling and complete_native_tool_calls:
for tc in complete_native_tool_calls:
final_tool_calls_to_process.append({
"function_name": tc["function"]["name"],
"arguments": tc["function"]["arguments"], # Already parsed object
"id": tc["id"]
})
# Gather XML tool calls from buffer (up to limit)
parsed_xml_data = []
if config.xml_tool_calling:
# Reparse remaining content just in case (should be empty if processed correctly)
xml_chunks = self._extract_xml_chunks(current_xml_content)
xml_chunks_buffer.extend(xml_chunks)
# Process only chunks not already handled in the stream loop
remaining_limit = config.max_xml_tool_calls - xml_tool_call_count if config.max_xml_tool_calls > 0 else len(xml_chunks_buffer)
xml_chunks_to_process = xml_chunks_buffer[:remaining_limit] # Ensure limit is respected
for chunk in xml_chunks_to_process:
parsed_result = self._parse_xml_tool_call(chunk)
if parsed_result:
tool_call, parsing_details = parsed_result
# Avoid adding if already processed during streaming
if not any(exec['tool_call'] == tool_call for exec in pending_tool_executions):
final_tool_calls_to_process.append(tool_call)
parsed_xml_data.append({'tool_call': tool_call, 'parsing_details': parsing_details})
all_tool_data_map = {} # tool_index -> {'tool_call': ..., 'parsing_details': ...}
# Add native tool data
native_tool_index = 0
if config.native_tool_calling and complete_native_tool_calls:
for tc in complete_native_tool_calls:
# Find the corresponding entry in final_tool_calls_to_process if needed
# For now, assume order matches if only native used
exec_tool_call = {
"function_name": tc["function"]["name"],
"arguments": tc["function"]["arguments"],
"id": tc["id"]
}
all_tool_data_map[native_tool_index] = {"tool_call": exec_tool_call, "parsing_details": None}
native_tool_index += 1
# Add XML tool data
xml_tool_index_start = native_tool_index
for idx, item in enumerate(parsed_xml_data):
all_tool_data_map[xml_tool_index_start + idx] = item
tool_results_map = {} # tool_index -> (tool_call, result, context)
# Populate from buffer if executed on stream
if config.execute_on_stream and tool_results_buffer:
logger.info(f"Processing {len(tool_results_buffer)} buffered tool results")
for tool_call, result, tool_idx, context in tool_results_buffer:
if last_assistant_message_object: context.assistant_message_id = last_assistant_message_object['message_id']
tool_results_map[tool_idx] = (tool_call, result, context)
# Or execute now if not streamed
elif final_tool_calls_to_process and not config.execute_on_stream:
logger.info(f"Executing {len(final_tool_calls_to_process)} tools ({config.tool_execution_strategy}) after stream")
results_list = await self._execute_tools(final_tool_calls_to_process, config.tool_execution_strategy)
current_tool_idx = 0
for tc, res in results_list:
# Map back using all_tool_data_map which has correct indices
if current_tool_idx in all_tool_data_map:
tool_data = all_tool_data_map[current_tool_idx]
context = self._create_tool_context(
tc, current_tool_idx,
last_assistant_message_object['message_id'] if last_assistant_message_object else None,
tool_data.get('parsing_details')
)
context.result = res
tool_results_map[current_tool_idx] = (tc, res, context)
else: logger.warning(f"Could not map result for tool index {current_tool_idx}")
current_tool_idx += 1
# Save and Yield each result message
if tool_results_map:
logger.info(f"Saving and yielding {len(tool_results_map)} final tool result messages")
for tool_idx in sorted(tool_results_map.keys()):
tool_call, result, context = tool_results_map[tool_idx]
context.result = result
if not context.assistant_message_id and last_assistant_message_object:
context.assistant_message_id = last_assistant_message_object['message_id']
# Yield start status ONLY IF executing non-streamed (already yielded if streamed)
if not config.execute_on_stream and tool_idx not in yielded_tool_indices:
started_msg_obj = await self._yield_and_save_tool_started(context, thread_id, thread_run_id)
if started_msg_obj: yield started_msg_obj
yielded_tool_indices.add(tool_idx) # Mark status yielded
# Save the tool result message to DB
saved_tool_result_object = await self._add_tool_result( # Returns full object or None
thread_id, tool_call, result, config.xml_adding_strategy,
context.assistant_message_id, context.parsing_details
)
# Yield completed/failed status (linked to saved result ID if available)
completed_msg_obj = await self._yield_and_save_tool_completed(
context,
saved_tool_result_object['message_id'] if saved_tool_result_object else None,
thread_id, thread_run_id
)
if completed_msg_obj: yield completed_msg_obj
# Don't add to yielded_tool_indices here, completion status is separate yield
# Yield the saved tool result object
if saved_tool_result_object:
tool_result_message_objects[tool_idx] = saved_tool_result_object
yield saved_tool_result_object
else:
logger.error(f"Failed to save tool result for index {tool_idx}, not yielding result message.")
# Optionally yield error status for saving failure?
# --- Calculate and Store Cost ---
if last_assistant_message_object: # Only calculate if assistant message was saved
try:
# Use accumulated_content for streaming cost calculation
final_cost = completion_cost(
model=llm_model,
messages=prompt_messages, # Use the prompt messages provided
completion=accumulated_content
)
if final_cost is not None and final_cost > 0:
logger.info(f"Calculated final cost for stream: {final_cost}")
await self.add_message(
thread_id=thread_id,
type="cost",
content={"cost": final_cost},
is_llm_message=False, # Cost is metadata
metadata={"thread_run_id": thread_run_id} # Keep track of the run
)
logger.info(f"Cost message saved for stream: {final_cost}")
else:
logger.info("Stream cost calculation resulted in zero or None, not storing cost message.")
except Exception as e:
logger.error(f"Error calculating final cost for stream: {str(e)}")
# --- Final Finish Status ---
if finish_reason and finish_reason != "xml_tool_limit_reached":
finish_content = {"status_type": "finish", "finish_reason": finish_reason}
finish_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=finish_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id}
)
if finish_msg_obj: yield finish_msg_obj
except Exception as e:
logger.error(f"Error processing stream: {str(e)}", exc_info=True)
# Save and yield error status message
err_content = {"role": "system", "status_type": "error", "message": str(e)}
err_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=err_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
)
if err_msg_obj: yield err_msg_obj # Yield the saved error message
finally:
# Save and Yield the final thread_run_end status
end_content = {"status_type": "thread_run_end"}
end_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=end_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
)
if end_msg_obj: yield end_msg_obj
async def process_non_streaming_response(
self,
llm_response: Any,
thread_id: str,
prompt_messages: List[Dict[str, Any]],
llm_model: str,
config: ProcessorConfig = ProcessorConfig()
) -> AsyncGenerator[Dict[str, Any], None]:
"""Process a non-streaming LLM response, handling tool calls and execution.
Args:
llm_response: Response from the LLM
thread_id: ID of the conversation thread
prompt_messages: List of messages sent to the LLM (the prompt)
llm_model: The name of the LLM model used
config: Configuration for parsing and execution
Yields:
Complete message objects matching the DB schema.
"""
content = ""
thread_run_id = str(uuid.uuid4())
all_tool_data = [] # Stores {'tool_call': ..., 'parsing_details': ...}
tool_index = 0
assistant_message_object = None
tool_result_message_objects = {}
finish_reason = None
native_tool_calls_for_message = []
try:
# Save and Yield thread_run_start status message
start_content = {"status_type": "thread_run_start", "thread_run_id": thread_run_id}
start_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=start_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id}
)
if start_msg_obj: yield start_msg_obj
# Extract finish_reason, content, tool calls
if hasattr(llm_response, 'choices') and llm_response.choices:
if hasattr(llm_response.choices[0], 'finish_reason'):
finish_reason = llm_response.choices[0].finish_reason
logger.info(f"Non-streaming finish_reason: {finish_reason}")
response_message = llm_response.choices[0].message if hasattr(llm_response.choices[0], 'message') else None
if response_message:
if hasattr(response_message, 'content') and response_message.content:
content = response_message.content
if config.xml_tool_calling:
parsed_xml_data = self._parse_xml_tool_calls(content)
if config.max_xml_tool_calls > 0 and len(parsed_xml_data) > config.max_xml_tool_calls:
# Truncate content and tool data if limit exceeded
# ... (Truncation logic similar to streaming) ...
if parsed_xml_data:
xml_chunks = self._extract_xml_chunks(content)[:config.max_xml_tool_calls]
if xml_chunks:
last_chunk = xml_chunks[-1]
last_chunk_pos = content.find(last_chunk)
if last_chunk_pos >= 0: content = content[:last_chunk_pos + len(last_chunk)]
parsed_xml_data = parsed_xml_data[:config.max_xml_tool_calls]
finish_reason = "xml_tool_limit_reached"
all_tool_data.extend(parsed_xml_data)
if config.native_tool_calling and hasattr(response_message, 'tool_calls') and response_message.tool_calls:
for tool_call in response_message.tool_calls:
if hasattr(tool_call, 'function'):
exec_tool_call = {
"function_name": tool_call.function.name,
"arguments": json.loads(tool_call.function.arguments) if isinstance(tool_call.function.arguments, str) else tool_call.function.arguments,
"id": tool_call.id if hasattr(tool_call, 'id') else str(uuid.uuid4())
}
all_tool_data.append({"tool_call": exec_tool_call, "parsing_details": None})
native_tool_calls_for_message.append({
"id": exec_tool_call["id"], "type": "function",
"function": {
"name": tool_call.function.name,
"arguments": tool_call.function.arguments if isinstance(tool_call.function.arguments, str) else json.dumps(tool_call.function.arguments)
}
})
# --- SAVE and YIELD Final Assistant Message ---
message_data = {"role": "assistant", "content": content, "tool_calls": native_tool_calls_for_message or None}
assistant_message_object = await self.add_message(
thread_id=thread_id, type="assistant", content=message_data,
is_llm_message=True, metadata={"thread_run_id": thread_run_id}
)
if assistant_message_object:
yield assistant_message_object
else:
logger.error(f"Failed to save non-streaming assistant message for thread {thread_id}")
err_content = {"role": "system", "status_type": "error", "message": "Failed to save assistant message"}
err_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=err_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id}
)
if err_msg_obj: yield err_msg_obj
# --- Calculate and Store Cost ---
if assistant_message_object: # Only calculate if assistant message was saved
try:
# Use the full llm_response object for potentially more accurate cost calculation
final_cost = None
if hasattr(llm_response, '_hidden_params') and 'response_cost' in llm_response._hidden_params and llm_response._hidden_params['response_cost'] is not None and llm_response._hidden_params['response_cost'] != 0.0:
final_cost = llm_response._hidden_params['response_cost']
logger.info(f"Using response_cost from _hidden_params: {final_cost}")
if final_cost is None: # Fall back to calculating cost if direct cost not available or zero
logger.info("Calculating cost using completion_cost function.")
# Note: litellm might need 'messages' kwarg depending on model/provider
final_cost = completion_cost(
completion_response=llm_response,
model=llm_model, # Explicitly pass the model name
# messages=prompt_messages # Pass prompt messages if needed by litellm for this model
)
if final_cost is not None and final_cost > 0:
logger.info(f"Calculated final cost for non-stream: {final_cost}")
await self.add_message(
thread_id=thread_id,
type="cost",
content={"cost": final_cost},
is_llm_message=False, # Cost is metadata
metadata={"thread_run_id": thread_run_id} # Keep track of the run
)
logger.info(f"Cost message saved for non-stream: {final_cost}")
else:
logger.info("Non-stream cost calculation resulted in zero or None, not storing cost message.")
except Exception as e:
logger.error(f"Error calculating final cost for non-stream: {str(e)}")
# --- Execute Tools and Yield Results ---
tool_calls_to_execute = [item['tool_call'] for item in all_tool_data]
if config.execute_tools and tool_calls_to_execute:
logger.info(f"Executing {len(tool_calls_to_execute)} tools with strategy: {config.tool_execution_strategy}")
tool_results = await self._execute_tools(tool_calls_to_execute, config.tool_execution_strategy)
for i, (returned_tool_call, result) in enumerate(tool_results):
original_data = all_tool_data[i]
tool_call_from_data = original_data['tool_call']
parsing_details = original_data['parsing_details']
current_assistant_id = assistant_message_object['message_id'] if assistant_message_object else None
context = self._create_tool_context(
tool_call_from_data, tool_index, current_assistant_id, parsing_details
)
context.result = result
# Save and Yield start status
started_msg_obj = await self._yield_and_save_tool_started(context, thread_id, thread_run_id)
if started_msg_obj: yield started_msg_obj
# Save tool result
saved_tool_result_object = await self._add_tool_result(
thread_id, tool_call_from_data, result, config.xml_adding_strategy,
current_assistant_id, parsing_details
)
# Save and Yield completed/failed status
completed_msg_obj = await self._yield_and_save_tool_completed(
context,
saved_tool_result_object['message_id'] if saved_tool_result_object else None,
thread_id, thread_run_id
)
if completed_msg_obj: yield completed_msg_obj
# Yield the saved tool result object
if saved_tool_result_object:
tool_result_message_objects[tool_index] = saved_tool_result_object
yield saved_tool_result_object
else:
logger.error(f"Failed to save tool result for index {tool_index}")
tool_index += 1
# --- Save and Yield Final Status ---
if finish_reason:
finish_content = {"status_type": "finish", "finish_reason": finish_reason}
finish_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=finish_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id}
)
if finish_msg_obj: yield finish_msg_obj
except Exception as e:
logger.error(f"Error processing non-streaming response: {str(e)}", exc_info=True)
# Save and yield error status
err_content = {"role": "system", "status_type": "error", "message": str(e)}
err_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=err_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
)
if err_msg_obj: yield err_msg_obj
finally:
# Save and Yield the final thread_run_end status
end_content = {"status_type": "thread_run_end"}
end_msg_obj = await self.add_message(
thread_id=thread_id, type="status", content=end_content,
is_llm_message=False, metadata={"thread_run_id": thread_run_id if 'thread_run_id' in locals() else None}
)
if end_msg_obj: yield end_msg_obj
# XML parsing methods
def _extract_tag_content(self, xml_chunk: str, tag_name: str) -> Tuple[Optional[str], Optional[str]]:
"""Extract content between opening and closing tags, handling nested tags."""
start_tag = f'<{tag_name}'
end_tag = f'</{tag_name}>'
try:
# Find start tag position
start_pos = xml_chunk.find(start_tag)
if start_pos == -1:
return None, xml_chunk
# Find end of opening tag
tag_end = xml_chunk.find('>', start_pos)
if tag_end == -1:
return None, xml_chunk
# Find matching closing tag
content_start = tag_end + 1
nesting_level = 1
pos = content_start
while nesting_level > 0 and pos < len(xml_chunk):
next_start = xml_chunk.find(start_tag, pos)
next_end = xml_chunk.find(end_tag, pos)
if next_end == -1:
return None, xml_chunk
if next_start != -1 and next_start < next_end:
nesting_level += 1
pos = next_start + len(start_tag)
else:
nesting_level -= 1
if nesting_level == 0:
content = xml_chunk[content_start:next_end]
remaining = xml_chunk[next_end + len(end_tag):]
return content, remaining
else:
pos = next_end + len(end_tag)
return None, xml_chunk
except Exception as e:
logger.error(f"Error extracting tag content: {e}")
return None, xml_chunk
def _extract_attribute(self, opening_tag: str, attr_name: str) -> Optional[str]:
"""Extract attribute value from opening tag."""
try:
# Handle both single and double quotes with raw strings
patterns = [
fr'{attr_name}="([^"]*)"', # Double quotes
fr"{attr_name}='([^']*)'", # Single quotes
fr'{attr_name}=([^\s/>;]+)' # No quotes - fixed escape sequence
]
for pattern in patterns:
match = re.search(pattern, opening_tag)
if match:
value = match.group(1)
# Unescape common XML entities
value = value.replace('"', '"').replace(''', "'")
value = value.replace('<', '<').replace('>', '>')
value = value.replace('&', '&')
return value
return None
except Exception as e:
logger.error(f"Error extracting attribute: {e}")
return None
def _extract_xml_chunks(self, content: str) -> List[str]:
"""Extract complete XML chunks using start and end pattern matching."""
chunks = []
pos = 0
try:
while pos < len(content):
# Find the next tool tag
next_tag_start = -1
current_tag = None
# Find the earliest occurrence of any registered tag
for tag_name in self.tool_registry.xml_tools.keys():
start_pattern = f'<{tag_name}'
tag_pos = content.find(start_pattern, pos)
if tag_pos != -1 and (next_tag_start == -1 or tag_pos < next_tag_start):
next_tag_start = tag_pos
current_tag = tag_name
if next_tag_start == -1 or not current_tag:
break
# Find the matching end tag
end_pattern = f'</{current_tag}>'
tag_stack = []
chunk_start = next_tag_start
current_pos = next_tag_start
while current_pos < len(content):
# Look for next start or end tag of the same type
next_start = content.find(f'<{current_tag}', current_pos + 1)
next_end = content.find(end_pattern, current_pos)
if next_end == -1: # No closing tag found
break
if next_start != -1 and next_start < next_end:
# Found nested start tag
tag_stack.append(next_start)
current_pos = next_start + 1
else:
# Found end tag
if not tag_stack: # This is our matching end tag
chunk_end = next_end + len(end_pattern)
chunk = content[chunk_start:chunk_end]
chunks.append(chunk)
pos = chunk_end
break
else:
# Pop nested tag
tag_stack.pop()
current_pos = next_end + 1
if current_pos >= len(content): # Reached end without finding closing tag
break
pos = max(pos + 1, current_pos)
except Exception as e:
logger.error(f"Error extracting XML chunks: {e}")
logger.error(f"Content was: {content}")
return chunks
def _parse_xml_tool_call(self, xml_chunk: str) -> Optional[Tuple[Dict[str, Any], Dict[str, Any]]]:
"""Parse XML chunk into tool call format and return parsing details.
Returns:
Tuple of (tool_call, parsing_details) or None if parsing fails.
- tool_call: Dict with 'function_name', 'xml_tag_name', 'arguments'
- parsing_details: Dict with 'attributes', 'elements', 'text_content', 'root_content'
"""
try:
# Extract tag name and validate
tag_match = re.match(r'<([^\s>]+)', xml_chunk)
if not tag_match:
logger.error(f"No tag found in XML chunk: {xml_chunk}")
return None
# This is the XML tag as it appears in the text (e.g., "create-file")
xml_tag_name = tag_match.group(1)
logger.info(f"Found XML tag: {xml_tag_name}")
# Get tool info and schema from registry
tool_info = self.tool_registry.get_xml_tool(xml_tag_name)
if not tool_info or not tool_info['schema'].xml_schema:
logger.error(f"No tool or schema found for tag: {xml_tag_name}")
return None
# This is the actual function name to call (e.g., "create_file")
function_name = tool_info['method']
schema = tool_info['schema'].xml_schema
params = {}
remaining_chunk = xml_chunk
# --- Store detailed parsing info ---
parsing_details = {
"attributes": {},
"elements": {},
"text_content": None,
"root_content": None,
"raw_chunk": xml_chunk # Store the original chunk for reference
}
# ---
# Process each mapping
for mapping in schema.mappings:
try:
if mapping.node_type == "attribute":
# Extract attribute from opening tag
opening_tag = remaining_chunk.split('>', 1)[0]
value = self._extract_attribute(opening_tag, mapping.param_name)
if value is not None:
params[mapping.param_name] = value
parsing_details["attributes"][mapping.param_name] = value # Store raw attribute
logger.info(f"Found attribute {mapping.param_name}: {value}")
elif mapping.node_type == "element":
# Extract element content
content, remaining_chunk = self._extract_tag_content(remaining_chunk, mapping.path)
if content is not None:
params[mapping.param_name] = content.strip()
parsing_details["elements"][mapping.param_name] = content.strip() # Store raw element content
logger.info(f"Found element {mapping.param_name}: {content.strip()}")
elif mapping.node_type == "text":
# Extract text content
content, _ = self._extract_tag_content(remaining_chunk, xml_tag_name)
if content is not None:
params[mapping.param_name] = content.strip()
parsing_details["text_content"] = content.strip() # Store raw text content
logger.info(f"Found text content for {mapping.param_name}: {content.strip()}")
elif mapping.node_type == "content":
# Extract root content
content, _ = self._extract_tag_content(remaining_chunk, xml_tag_name)
if content is not None:
params[mapping.param_name] = content.strip()
parsing_details["root_content"] = content.strip() # Store raw root content
logger.info(f"Found root content for {mapping.param_name}")
except Exception as e:
logger.error(f"Error processing mapping {mapping}: {e}")
continue
# Validate required parameters
missing = [mapping.param_name for mapping in schema.mappings if mapping.required and mapping.param_name not in params]
if missing:
logger.error(f"Missing required parameters: {missing}")
logger.error(f"Current params: {params}")
logger.error(f"XML chunk: {xml_chunk}")
return None
# Create tool call with clear separation between function_name and xml_tag_name
tool_call = {
"function_name": function_name, # The actual method to call (e.g., create_file)
"xml_tag_name": xml_tag_name, # The original XML tag (e.g., create-file)
"arguments": params # The extracted parameters
}
logger.debug(f"Created tool call: {tool_call}")
return tool_call, parsing_details # Return both dicts
except Exception as e:
logger.error(f"Error parsing XML chunk: {e}")
logger.error(f"XML chunk was: {xml_chunk}")
return None
def _parse_xml_tool_calls(self, content: str) -> List[Dict[str, Any]]:
"""Parse XML tool calls from content string.
Returns:
List of dictionaries, each containing {'tool_call': ..., 'parsing_details': ...}
"""
parsed_data = []
try:
xml_chunks = self._extract_xml_chunks(content)
for xml_chunk in xml_chunks:
result = self._parse_xml_tool_call(xml_chunk)
if result:
tool_call, parsing_details = result
parsed_data.append({
"tool_call": tool_call,
"parsing_details": parsing_details
})
except Exception as e:
logger.error(f"Error parsing XML tool calls: {e}", exc_info=True)
return parsed_data
# Tool execution methods
async def _execute_tool(self, tool_call: Dict[str, Any]) -> ToolResult:
"""Execute a single tool call and return the result."""
try:
function_name = tool_call["function_name"]
arguments = tool_call["arguments"]
logger.info(f"Executing tool: {function_name} with arguments: {arguments}")
if isinstance(arguments, str):
try:
arguments = json.loads(arguments)
except json.JSONDecodeError:
arguments = {"text": arguments}
# Get available functions from tool registry
available_functions = self.tool_registry.get_available_functions()
# Look up the function by name
tool_fn = available_functions.get(function_name)
if not tool_fn:
logger.error(f"Tool function '{function_name}' not found in registry")
return ToolResult(success=False, output=f"Tool function '{function_name}' not found")
logger.debug(f"Found tool function for '{function_name}', executing...")
result = await tool_fn(**arguments)
logger.info(f"Tool execution complete: {function_name} -> {result}")
return result
except Exception as e:
logger.error(f"Error executing tool {tool_call['function_name']}: {str(e)}", exc_info=True)
return ToolResult(success=False, output=f"Error executing tool: {str(e)}")
async def _execute_tools(
self,
tool_calls: List[Dict[str, Any]],
execution_strategy: ToolExecutionStrategy = "sequential"
) -> List[Tuple[Dict[str, Any], ToolResult]]:
"""Execute tool calls with the specified strategy.
This is the main entry point for tool execution. It dispatches to the appropriate
execution method based on the provided strategy.
Args:
tool_calls: List of tool calls to execute
execution_strategy: Strategy for executing tools:
- "sequential": Execute tools one after another, waiting for each to complete
- "parallel": Execute all tools simultaneously for better performance
Returns:
List of tuples containing the original tool call and its result
"""
logger.info(f"Executing {len(tool_calls)} tools with strategy: {execution_strategy}")
if execution_strategy == "sequential":
return await self._execute_tools_sequentially(tool_calls)
elif execution_strategy == "parallel":
return await self._execute_tools_in_parallel(tool_calls)
else:
logger.warning(f"Unknown execution strategy: {execution_strategy}, falling back to sequential")
return await self._execute_tools_sequentially(tool_calls)
async def _execute_tools_sequentially(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], ToolResult]]:
"""Execute tool calls sequentially and return results.
This method executes tool calls one after another, waiting for each tool to complete
before starting the next one. This is useful when tools have dependencies on each other.
Args:
tool_calls: List of tool calls to execute
Returns:
List of tuples containing the original tool call and its result
"""
if not tool_calls:
return []
try:
tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
logger.info(f"Executing {len(tool_calls)} tools sequentially: {tool_names}")
results = []
for index, tool_call in enumerate(tool_calls):
tool_name = tool_call.get('function_name', 'unknown')
logger.debug(f"Executing tool {index+1}/{len(tool_calls)}: {tool_name}")
try:
result = await self._execute_tool(tool_call)
results.append((tool_call, result))
logger.debug(f"Completed tool {tool_name} with success={result.success}")
except Exception as e:
logger.error(f"Error executing tool {tool_name}: {str(e)}")
error_result = ToolResult(success=False, output=f"Error executing tool: {str(e)}")
results.append((tool_call, error_result))
logger.info(f"Sequential execution completed for {len(tool_calls)} tools")
return results
except Exception as e:
logger.error(f"Error in sequential tool execution: {str(e)}", exc_info=True)
# Return partial results plus error results for remaining tools
completed_tool_names = [r[0].get('function_name', 'unknown') for r in results] if 'results' in locals() else []
remaining_tools = [t for t in tool_calls if t.get('function_name', 'unknown') not in completed_tool_names]
# Add error results for remaining tools
error_results = [(tool, ToolResult(success=False, output=f"Execution error: {str(e)}"))
for tool in remaining_tools]
return (results if 'results' in locals() else []) + error_results
async def _execute_tools_in_parallel(self, tool_calls: List[Dict[str, Any]]) -> List[Tuple[Dict[str, Any], ToolResult]]:
"""Execute tool calls in parallel and return results.
This method executes all tool calls simultaneously using asyncio.gather, which
can significantly improve performance when executing multiple independent tools.
Args:
tool_calls: List of tool calls to execute
Returns:
List of tuples containing the original tool call and its result
"""
if not tool_calls:
return []
try:
tool_names = [t.get('function_name', 'unknown') for t in tool_calls]
logger.info(f"Executing {len(tool_calls)} tools in parallel: {tool_names}")
# Create tasks for all tool calls
tasks = [self._execute_tool(tool_call) for tool_call in tool_calls]
# Execute all tasks concurrently with error handling
results = await asyncio.gather(*tasks, return_exceptions=True)
# Process results and handle any exceptions
processed_results = []
for i, (tool_call, result) in enumerate(zip(tool_calls, results)):
if isinstance(result, Exception):
logger.error(f"Error executing tool {tool_call.get('function_name', 'unknown')}: {str(result)}")
# Create error result
error_result = ToolResult(success=False, output=f"Error executing tool: {str(result)}")
processed_results.append((tool_call, error_result))
else:
processed_results.append((tool_call, result))
logger.info(f"Parallel execution completed for {len(tool_calls)} tools")
return processed_results
except Exception as e:
logger.error(f"Error in parallel tool execution: {str(e)}", exc_info=True)
# Return error results for all tools if the gather itself fails
return [(tool_call, ToolResult(success=False, output=f"Execution error: {str(e)}"))
for tool_call in tool_calls]
async def _add_tool_result(
self,
thread_id: str,
tool_call: Dict[str, Any],
result: ToolResult,
strategy: Union[XmlAddingStrategy, str] = "assistant_message",
assistant_message_id: Optional[str] = None,
parsing_details: Optional[Dict[str, Any]] = None
) -> Optional[str]: # Return the message ID
"""Add a tool result to the conversation thread based on the specified format.
This method formats tool results and adds them to the conversation history,
making them visible to the LLM in subsequent interactions. Results can be
added either as native tool messages (OpenAI format) or as XML-wrapped content
with a specified role (user or assistant).
Args:
thread_id: ID of the conversation thread
tool_call: The original tool call that produced this result
result: The result from the tool execution
strategy: How to add XML tool results to the conversation
("user_message", "assistant_message", or "inline_edit")
assistant_message_id: ID of the assistant message that generated this tool call
parsing_details: Detailed parsing info for XML calls (attributes, elements, etc.)
"""
try:
message_id = None # Initialize message_id
# Create metadata with assistant_message_id if provided
metadata = {}
if assistant_message_id:
metadata["assistant_message_id"] = assistant_message_id
logger.info(f"Linking tool result to assistant message: {assistant_message_id}")
# --- Add parsing details to metadata if available ---
if parsing_details:
metadata["parsing_details"] = parsing_details
logger.info("Adding parsing_details to tool result metadata")
# ---
# Check if this is a native function call (has id field)
if "id" in tool_call:
# Format as a proper tool message according to OpenAI spec
function_name = tool_call.get("function_name", "")
# Format the tool result content - tool role needs string content
if isinstance(result, str):
content = result
elif hasattr(result, 'output'):
# If it's a ToolResult object
if isinstance(result.output, dict) or isinstance(result.output, list):
# If output is already a dict or list, convert to JSON string
content = json.dumps(result.output)
else:
# Otherwise just use the string representation
content = str(result.output)
else:
# Fallback to string representation of the whole result
content = str(result)
logger.info(f"Formatted tool result content: {content[:100]}...")
# Create the tool response message with proper format
tool_message = {
"role": "tool",
"tool_call_id": tool_call["id"],
"name": function_name,
"content": content
}
logger.info(f"Adding native tool result for tool_call_id={tool_call['id']} with role=tool")
# Add as a tool message to the conversation history
# This makes the result visible to the LLM in the next turn
message_id = await self.add_message(
thread_id=thread_id,
type="tool", # Special type for tool responses
content=tool_message,
is_llm_message=True,
metadata=metadata
)
return message_id # Return the message ID
# For XML and other non-native tools, continue with the original logic
# Determine message role based on strategy
result_role = "user" if strategy == "user_message" else "assistant"
# Create a context for consistent formatting
context = self._create_tool_context(tool_call, 0, assistant_message_id, parsing_details)
context.result = result
# Format the content using the formatting helper
content = self._format_xml_tool_result(tool_call, result)
# Add the message with the appropriate role to the conversation history
# This allows the LLM to see the tool result in subsequent interactions
result_message = {
"role": result_role,
"content": content
}
message_id = await self.add_message(
thread_id=thread_id,
type="tool",
content=result_message,
is_llm_message=True,
metadata=metadata
)
return message_id # Return the message ID
except Exception as e:
logger.error(f"Error adding tool result: {str(e)}", exc_info=True)
# Fallback to a simple message
try:
fallback_message = {
"role": "user",
"content": str(result)
}
message_id = await self.add_message(
thread_id=thread_id,
type="tool",
content=fallback_message,
is_llm_message=True,
metadata={"assistant_message_id": assistant_message_id} if assistant_message_id else {}
)
return message_id # Return the message ID
except Exception as e2:
logger.error(f"Failed even with fallback message: {str(e2)}", exc_info=True)
return None # Return None on error
def _format_xml_tool_result(self, tool_call: Dict[str, Any], result: ToolResult) -> str:
"""Format a tool result wrapped in a <tool_result> tag.
Args:
tool_call: The tool call that was executed
result: The result of the tool execution
Returns:
String containing the formatted result wrapped in <tool_result> tag
"""
# Always use xml_tag_name if it exists
if "xml_tag_name" in tool_call:
xml_tag_name = tool_call["xml_tag_name"]
return f"<tool_result> <{xml_tag_name}> {str(result)} </{xml_tag_name}> </tool_result>"
# Non-XML tool, just return the function result
function_name = tool_call["function_name"]
return f"Result for {function_name}: {str(result)}"
def _create_tool_context(self, tool_call: Dict[str, Any], tool_index: int, assistant_message_id: Optional[str] = None, parsing_details: Optional[Dict[str, Any]] = None) -> ToolExecutionContext:
"""Create a tool execution context with display name and parsing details populated."""
context = ToolExecutionContext(
tool_call=tool_call,
tool_index=tool_index,
assistant_message_id=assistant_message_id,
parsing_details=parsing_details
)
# Set function_name and xml_tag_name fields
if "xml_tag_name" in tool_call:
context.xml_tag_name = tool_call["xml_tag_name"]
context.function_name = tool_call.get("function_name", tool_call["xml_tag_name"])
else:
# For non-XML tools, use function name directly
context.function_name = tool_call.get("function_name", "unknown")
context.xml_tag_name = None
return context
async def _yield_and_save_tool_started(self, context: ToolExecutionContext, thread_id: str, thread_run_id: str) -> Optional[Dict[str, Any]]:
"""Formats, saves, and returns a tool started status message."""
tool_name = context.xml_tag_name or context.function_name
content = {
"role": "assistant", "status_type": "tool_started",
"function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
"message": f"Starting execution of {tool_name}", "tool_index": context.tool_index,
"tool_call_id": context.tool_call.get("id") # Include tool_call ID if native
}
metadata = {"thread_run_id": thread_run_id}
saved_message_obj = await self.add_message(
thread_id=thread_id, type="status", content=content, is_llm_message=False, metadata=metadata
)
return saved_message_obj # Return the full object (or None if saving failed)
async def _yield_and_save_tool_completed(self, context: ToolExecutionContext, tool_message_id: Optional[str], thread_id: str, thread_run_id: str) -> Optional[Dict[str, Any]]:
"""Formats, saves, and returns a tool completed/failed status message."""
if not context.result:
# Delegate to error saving if result is missing (e.g., execution failed)
return await self._yield_and_save_tool_error(context, thread_id, thread_run_id)
tool_name = context.xml_tag_name or context.function_name
status_type = "tool_completed" if context.result.success else "tool_failed"
message_text = f"Tool {tool_name} {'completed successfully' if context.result.success else 'failed'}"
content = {
"role": "assistant", "status_type": status_type,
"function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
"message": message_text, "tool_index": context.tool_index,
"tool_call_id": context.tool_call.get("id")
}
metadata = {"thread_run_id": thread_run_id}
# Add the *actual* tool result message ID to the metadata if available and successful
if context.result.success and tool_message_id:
metadata["linked_tool_result_message_id"] = tool_message_id
# <<< ADDED: Signal if this is a terminating tool >>>
if context.function_name in ['ask', 'complete']:
metadata["agent_should_terminate"] = True
logger.info(f"Marking tool status for '{context.function_name}' with termination signal.")
# <<< END ADDED >>>
saved_message_obj = await self.add_message(
thread_id=thread_id, type="status", content=content, is_llm_message=False, metadata=metadata
)
return saved_message_obj
async def _yield_and_save_tool_error(self, context: ToolExecutionContext, thread_id: str, thread_run_id: str) -> Optional[Dict[str, Any]]:
"""Formats, saves, and returns a tool error status message."""
error_msg = str(context.error) if context.error else "Unknown error during tool execution"
tool_name = context.xml_tag_name or context.function_name
content = {
"role": "assistant", "status_type": "tool_error",
"function_name": context.function_name, "xml_tag_name": context.xml_tag_name,
"message": f"Error executing tool {tool_name}: {error_msg}",
"tool_index": context.tool_index,
"tool_call_id": context.tool_call.get("id")
}
metadata = {"thread_run_id": thread_run_id}
# Save the status message with is_llm_message=False
saved_message_obj = await self.add_message(
thread_id=thread_id, type="status", content=content, is_llm_message=False, metadata=metadata
)
return saved_message_obj |