Update agents/programmer.py
Browse files- agents/programmer.py +21 -10
agents/programmer.py
CHANGED
@@ -1,34 +1,45 @@
|
|
1 |
# agents/programmer.py
|
2 |
|
3 |
from agents.base_agent import BaseAgent, ACPMessage
|
4 |
-
import
|
|
|
5 |
|
6 |
class ProgrammerAgent(BaseAgent):
|
7 |
def __init__(self):
|
8 |
super().__init__(name="CodeBot", role="Programmer")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def receive_message(self, message: ACPMessage) -> ACPMessage:
|
11 |
-
# Basic behavior for now — add AI model later
|
12 |
if message.performative == "request":
|
13 |
-
|
14 |
-
|
15 |
-
"Okay! Here's a sorting example:\n```python\ndef sort_list(lst): return sorted(lst)```",
|
16 |
-
"Let me help with that. To calculate factorial:\n```python\ndef factorial(n): return 1 if n==0 else n*factorial(n-1)```"
|
17 |
-
])
|
18 |
return self.create_message(
|
19 |
receiver=message.sender,
|
20 |
performative="inform",
|
21 |
-
content=
|
22 |
)
|
23 |
elif message.performative == "inform":
|
24 |
return self.create_message(
|
25 |
receiver=message.sender,
|
26 |
performative="acknowledge",
|
27 |
-
content="
|
28 |
)
|
29 |
else:
|
30 |
return self.create_message(
|
31 |
receiver=message.sender,
|
32 |
performative="refuse",
|
33 |
-
content="
|
34 |
)
|
|
|
1 |
# agents/programmer.py
|
2 |
|
3 |
from agents.base_agent import BaseAgent, ACPMessage
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
+
import torch
|
6 |
|
7 |
class ProgrammerAgent(BaseAgent):
|
8 |
def __init__(self):
|
9 |
super().__init__(name="CodeBot", role="Programmer")
|
10 |
+
self.tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
|
11 |
+
self.model = AutoModelForCausalLM.from_pretrained("distilgpt2")
|
12 |
+
|
13 |
+
def generate_code_reply(self, prompt: str) -> str:
|
14 |
+
inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True)
|
15 |
+
outputs = self.model.generate(
|
16 |
+
inputs["input_ids"],
|
17 |
+
max_length=100,
|
18 |
+
do_sample=True,
|
19 |
+
temperature=0.7,
|
20 |
+
pad_token_id=self.tokenizer.eos_token_id
|
21 |
+
)
|
22 |
+
reply = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
23 |
+
return reply[len(prompt):].strip()
|
24 |
|
25 |
def receive_message(self, message: ACPMessage) -> ACPMessage:
|
|
|
26 |
if message.performative == "request":
|
27 |
+
prompt = f"Write Python code to: {message.content.strip()}\n\n"
|
28 |
+
ai_reply = self.generate_code_reply(prompt)
|
|
|
|
|
|
|
29 |
return self.create_message(
|
30 |
receiver=message.sender,
|
31 |
performative="inform",
|
32 |
+
content=f"Sure! Here's what I came up with:\n```python\n{ai_reply}\n```"
|
33 |
)
|
34 |
elif message.performative == "inform":
|
35 |
return self.create_message(
|
36 |
receiver=message.sender,
|
37 |
performative="acknowledge",
|
38 |
+
content="Got it. Let me know if you need any code."
|
39 |
)
|
40 |
else:
|
41 |
return self.create_message(
|
42 |
receiver=message.sender,
|
43 |
performative="refuse",
|
44 |
+
content="I'm not sure how to respond to that."
|
45 |
)
|