aymnsk commited on
Commit
eccdb11
·
verified ·
1 Parent(s): 537b586

Update agents/debugger.py

Browse files
Files changed (1) hide show
  1. agents/debugger.py +9 -42
agents/debugger.py CHANGED
@@ -1,56 +1,23 @@
1
- # agents/debugger.py
2
 
3
- from agents.base_agent import BaseAgent, ACPMessage
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
5
- import torch
6
 
7
  class DebuggerAgent(BaseAgent):
8
  def __init__(self):
9
- super().__init__(name="Zaid", role="Your sarcastic but loyal bug squasher 🐞")
10
- self.model_id = "deepseek-ai/deepseek-llm-7b-chat-int4"
11
 
12
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
13
-
14
  self.model = AutoModelForCausalLM.from_pretrained(
15
  self.model_id,
16
  quantization_config=BitsAndBytesConfig(load_in_4bit=True),
17
  device_map="auto"
18
  )
19
 
20
- def generate_debug_response(self, prompt: str) -> str:
21
- system_prompt = (
22
- "You are Zaid, a sarcastic but helpful friend who helps debug code. "
23
- "You're funny, brutally honest, but also kind when needed."
24
- )
25
-
26
- full_prompt = f"<|system|>\n{system_prompt}</s>\n<|user|>\n{prompt}</s>\n<|assistant|>\n"
27
- inputs = self.tokenizer(full_prompt, return_tensors="pt").to(self.model.device)
28
-
29
- with torch.no_grad():
30
- outputs = self.model.generate(
31
- **inputs,
32
- max_new_tokens=512,
33
- temperature=0.75,
34
- top_p=0.9,
35
- pad_token_id=self.tokenizer.eos_token_id
36
- )
37
-
38
- response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
39
- reply = response.split("<|assistant|>")[-1].strip()
40
- return reply or "Okay that broke my brain. Say it again?"
41
-
42
  def receive_message(self, message: ACPMessage) -> ACPMessage:
43
- if message.performative in ["request", "inform"]:
44
- reply = self.generate_debug_response(message.content)
45
- return self.create_message(
46
- receiver=message.sender,
47
- performative="inform",
48
- content=reply
49
- )
50
- else:
51
- return self.create_message(
52
- receiver=message.sender,
53
- performative="inform",
54
- content="You broke it again? Seriously? 😒"
55
- )
56
-
 
1
+ # app/agents/debugger.py
2
 
 
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
4
+ from agents.base_agent import BaseAgent, ACPMessage
5
 
6
  class DebuggerAgent(BaseAgent):
7
  def __init__(self):
8
+ super().__init__(name="Zaid", role="Smart Debugger 🐞")
9
+ self.model_id = "mistralai/Mistral-7B-Instruct-v0.1"
10
 
11
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
 
12
  self.model = AutoModelForCausalLM.from_pretrained(
13
  self.model_id,
14
  quantization_config=BitsAndBytesConfig(load_in_4bit=True),
15
  device_map="auto"
16
  )
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  def receive_message(self, message: ACPMessage) -> ACPMessage:
19
+ prompt = f"[INST] {message.content} [/INST]"
20
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
21
+ outputs = self.model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.7)
22
+ reply = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
23
+ return ACPMessage(sender=self.name, receiver=message.sender, performative="inform", content=reply.strip())