Spaces:
Build error
Build error
File size: 3,069 Bytes
81d00fe 401799d 9bd791c 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 81d00fe 401799d 9bd791c 81d00fe 9bd791c 401799d 9bd791c 401799d 81d00fe 9bd791c 81d00fe 9bd791c 81d00fe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
import logging
import os
import uuid
from langgraph.types import Command
from graph import agent_graph
# Configure logging
logging.basicConfig(level=logging.INFO) # Default to INFO level
logger = logging.getLogger(__name__)
# Enable LiteLLM debug logging only if environment variable is set
import litellm
if os.getenv("LITELLM_DEBUG", "false").lower() == "true":
litellm.set_verbose = True
logger.setLevel(logging.DEBUG)
else:
litellm.set_verbose = False
logger.setLevel(logging.INFO)
class AgentRunner:
"""Runner class for the code agent."""
def __init__(self):
"""Initialize the agent runner with graph and tools."""
logger.info("Initializing AgentRunner")
self.graph = agent_graph
self.last_state = None # Store the last state for testing/debugging
self.thread_id = str(uuid.uuid4()) # Generate a unique thread_id for this runner
def __call__(self, input_data) -> str:
"""Process a question through the agent graph and return the answer.
Args:
input_data: Either a question string or a Command object for resuming
Returns:
str: The agent's response
"""
try:
config = {"configurable": {"thread_id": self.thread_id}}
if isinstance(input_data, str):
# Initial question
logger.info(f"Processing question: {input_data}")
initial_state = {
"question": input_data,
"messages": [],
"answer": None,
"step_logs": [],
"is_complete": False,
"step_count": 0,
# Initialize new memory fields
"context": {},
"memory_buffer": [],
"last_action": None,
"action_history": [],
"error_count": 0,
"success_count": 0,
}
# Use stream to get interrupt information
for chunk in self.graph.stream(initial_state, config):
if isinstance(chunk, tuple) and len(chunk) > 0 and hasattr(chunk[0], '__interrupt__'):
# If we hit an interrupt, resume with 'c'
for result in self.graph.stream(Command(resume="c"), config):
self.last_state = result
return result.get("answer", "No answer generated")
self.last_state = chunk
return chunk.get("answer", "No answer generated")
else:
# Resuming from interrupt
logger.info("Resuming from interrupt")
for result in self.graph.stream(input_data, config):
self.last_state = result
return result.get("answer", "No answer generated")
except Exception as e:
logger.error(f"Error processing input: {str(e)}")
raise
|