aymnsk commited on
Commit
f217037
·
verified ·
1 Parent(s): b5d5017

Update agents/debugger.py

Browse files
Files changed (1) hide show
  1. agents/debugger.py +21 -11
agents/debugger.py CHANGED
@@ -1,35 +1,45 @@
1
  # agents/debugger.py
2
 
3
  from agents.base_agent import BaseAgent, ACPMessage
4
- import random
 
5
 
6
  class DebuggerAgent(BaseAgent):
7
  def __init__(self):
8
  super().__init__(name="BugBot", role="Debugger")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def receive_message(self, message: ACPMessage) -> ACPMessage:
11
  if message.performative == "inform":
12
- feedback_options = [
13
- "Looks good! Does it handle empty lists?",
14
- "Nice. Can you add error handling?",
15
- "Cool. What about performance with large inputs?",
16
- "Thanks. Let me run a test on it.",
17
- "Interesting approach. Can we optimize it?"
18
- ]
19
  return self.create_message(
20
  receiver=message.sender,
21
  performative="request",
22
- content=random.choice(feedback_options)
23
  )
24
  elif message.performative == "acknowledge":
25
  return self.create_message(
26
  receiver=message.sender,
27
  performative="inform",
28
- content="Alright. Waiting for next response."
29
  )
30
  else:
31
  return self.create_message(
32
  receiver=message.sender,
33
  performative="request",
34
- content="Can you show me a simple function in Python?"
35
  )
 
1
  # agents/debugger.py
2
 
3
  from agents.base_agent import BaseAgent, ACPMessage
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ import torch
6
 
7
  class DebuggerAgent(BaseAgent):
8
  def __init__(self):
9
  super().__init__(name="BugBot", role="Debugger")
10
+ self.tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
11
+ self.model = AutoModelForCausalLM.from_pretrained("distilgpt2")
12
+
13
+ def generate_debug_question(self, code_snippet: str) -> str:
14
+ prompt = f"Review this Python function and ask a relevant question or suggest improvement:\n\n{code_snippet}\n\nQuestion:"
15
+ inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True)
16
+ outputs = self.model.generate(
17
+ inputs["input_ids"],
18
+ max_length=100,
19
+ do_sample=True,
20
+ temperature=0.8,
21
+ pad_token_id=self.tokenizer.eos_token_id
22
+ )
23
+ reply = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+ return reply[len(prompt):].strip()
25
 
26
  def receive_message(self, message: ACPMessage) -> ACPMessage:
27
  if message.performative == "inform":
28
+ debug_question = self.generate_debug_question(message.content)
 
 
 
 
 
 
29
  return self.create_message(
30
  receiver=message.sender,
31
  performative="request",
32
+ content=debug_question or "Can you add error handling?"
33
  )
34
  elif message.performative == "acknowledge":
35
  return self.create_message(
36
  receiver=message.sender,
37
  performative="inform",
38
+ content="Got it. Let me know when you have more code."
39
  )
40
  else:
41
  return self.create_message(
42
  receiver=message.sender,
43
  performative="request",
44
+ content="Can you show me some Python code?"
45
  )