|
|
|
|
|
from agents.base_agent import BaseAgent, ACPMessage |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import torch |
|
|
|
class DebuggerAgent(BaseAgent): |
|
def __init__(self): |
|
super().__init__(name="BugBot", role="Debugger") |
|
self.tokenizer = AutoTokenizer.from_pretrained("distilgpt2") |
|
self.model = AutoModelForCausalLM.from_pretrained("distilgpt2") |
|
|
|
def generate_debug_question(self, code_snippet: str) -> str: |
|
prompt = f"Review this Python function and ask a relevant question or suggest improvement:\n\n{code_snippet}\n\nQuestion:" |
|
inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True) |
|
outputs = self.model.generate( |
|
inputs["input_ids"], |
|
max_length=100, |
|
do_sample=True, |
|
temperature=0.8, |
|
pad_token_id=self.tokenizer.eos_token_id |
|
) |
|
reply = self.tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return reply[len(prompt):].strip() |
|
|
|
def receive_message(self, message: ACPMessage) -> ACPMessage: |
|
if message.performative == "inform": |
|
debug_question = self.generate_debug_question(message.content) |
|
return self.create_message( |
|
receiver=message.sender, |
|
performative="request", |
|
content=debug_question or "Can you add error handling?" |
|
) |
|
elif message.performative == "acknowledge": |
|
return self.create_message( |
|
receiver=message.sender, |
|
performative="inform", |
|
content="Got it. Let me know when you have more code." |
|
) |
|
else: |
|
return self.create_message( |
|
receiver=message.sender, |
|
performative="request", |
|
content="Can you show me some Python code?" |
|
) |
|
|