Spaces:
Sleeping
Sleeping
Upload 7 files
Browse files- examples/debate/agent/__init__.py +2 -0
- examples/debate/agent/base.py +45 -0
- examples/debate/agent/debate_agent.py +218 -0
- examples/debate/agent/main.py +210 -0
- examples/debate/agent/moderator_agent.py +146 -0
- examples/debate/agent/prompts.py +251 -0
- examples/debate/agent/stream_output_agent.py +19 -0
examples/debate/agent/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
# Copyright (c) 2025 inclusionAI.
|
examples/debate/agent/base.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel, Field
|
2 |
+
|
3 |
+
from aworld.output import Output
|
4 |
+
import asyncio
|
5 |
+
|
6 |
+
from aworld.output.base import OutputPart
|
7 |
+
|
8 |
+
|
9 |
+
class DebateSpeech(Output, BaseModel):
|
10 |
+
name: str = Field(default="", description="name of the speaker")
|
11 |
+
type: str = Field(default="", description="speech type")
|
12 |
+
stance: str = Field(default="", description="stance of the speech")
|
13 |
+
content: str = Field(default="", description="content of the speech")
|
14 |
+
round: int = Field(default=0, description="round of the speech")
|
15 |
+
finished: bool = Field(default=False, description="round of the speech")
|
16 |
+
metadata: dict = Field(default_factory=dict, description="metadata of the speech")
|
17 |
+
|
18 |
+
async def wait_until_finished(self):
|
19 |
+
"""
|
20 |
+
Wait until the speech is finished.
|
21 |
+
"""
|
22 |
+
while not self.finished:
|
23 |
+
await asyncio.sleep(1)
|
24 |
+
|
25 |
+
async def convert_to_parts(self, message_output, after_call):
|
26 |
+
async def __convert_to_parts__():
|
27 |
+
async for item in message_output.response_generator:
|
28 |
+
if item:
|
29 |
+
self.content += item
|
30 |
+
yield OutputPart(content=item)
|
31 |
+
if message_output.finished:
|
32 |
+
await after_call(message_output.response)
|
33 |
+
|
34 |
+
self.parts = __convert_to_parts__()
|
35 |
+
|
36 |
+
@classmethod
|
37 |
+
def from_dict(cls, data: dict) -> "DebateSpeech":
|
38 |
+
return cls(
|
39 |
+
name=data.get("name", ""),
|
40 |
+
type=data.get("type", ""),
|
41 |
+
stance=data.get("stance", ""),
|
42 |
+
content=data.get("content", ""),
|
43 |
+
round=data.get("round", 0),
|
44 |
+
metadata=data.get("metadata", {})
|
45 |
+
)
|
examples/debate/agent/debate_agent.py
ADDED
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from abc import ABC
|
3 |
+
from typing import Dict, Any, Union, List, Literal, Optional
|
4 |
+
from datetime import datetime
|
5 |
+
import uuid
|
6 |
+
|
7 |
+
from aworld.models.model_response import ToolCall
|
8 |
+
from examples.debate.agent.base import DebateSpeech
|
9 |
+
from examples.debate.agent.prompts import user_assignment_prompt, user_assignment_system_prompt, affirmative_few_shots, \
|
10 |
+
negative_few_shots, \
|
11 |
+
user_debate_prompt
|
12 |
+
from examples.debate.agent.search.search_engine import SearchEngine
|
13 |
+
from examples.debate.agent.search.tavily_search_engine import TavilySearchEngine
|
14 |
+
from examples.debate.agent.stream_output_agent import StreamOutputAgent
|
15 |
+
from aworld.config import AgentConfig
|
16 |
+
from aworld.core.common import Observation, ActionModel
|
17 |
+
from aworld.output import SearchOutput, SearchItem, MessageOutput
|
18 |
+
from aworld.output.artifact import ArtifactType
|
19 |
+
|
20 |
+
|
21 |
+
def truncate_content(raw_content, char_limit):
|
22 |
+
if raw_content is None:
|
23 |
+
raw_content = ''
|
24 |
+
if len(raw_content) > char_limit:
|
25 |
+
raw_content = raw_content[:char_limit] + "... [truncated]"
|
26 |
+
return raw_content
|
27 |
+
|
28 |
+
class DebateAgent(StreamOutputAgent, ABC):
|
29 |
+
|
30 |
+
stance: Literal["affirmative", "negative"]
|
31 |
+
|
32 |
+
def __init__(self, name: str, stance: Literal["affirmative", "negative"], conf: AgentConfig, search_engine: Optional[SearchEngine] = TavilySearchEngine()):
|
33 |
+
conf.name = name
|
34 |
+
super().__init__(conf)
|
35 |
+
self.steps = 0
|
36 |
+
self.stance = stance
|
37 |
+
self.search_engine = search_engine
|
38 |
+
|
39 |
+
async def speech(self, topic: str, opinion: str,oppose_opinion: str, round: int, speech_history: list[DebateSpeech]) -> DebateSpeech:
|
40 |
+
observation = Observation(content=self.get_latest_speech(speech_history).content if self.get_latest_speech(speech_history) else "")
|
41 |
+
info = {
|
42 |
+
"topic": topic,
|
43 |
+
"round": round,
|
44 |
+
"opinion": opinion,
|
45 |
+
"oppose_opinion": oppose_opinion,
|
46 |
+
"history": speech_history
|
47 |
+
}
|
48 |
+
actions = await self.async_policy(observation, info)
|
49 |
+
|
50 |
+
return actions[0].policy_info
|
51 |
+
|
52 |
+
|
53 |
+
async def async_policy(self, observation: Observation, info: Dict[str, Any] = {}, **kwargs) -> Union[
|
54 |
+
List[ActionModel], None]:
|
55 |
+
## step 1: params
|
56 |
+
opponent_claim = observation.content
|
57 |
+
round = info["round"]
|
58 |
+
opinion = info["opinion"]
|
59 |
+
oppose_opinion = info["oppose_opinion"]
|
60 |
+
topic = info["topic"]
|
61 |
+
history: list[DebateSpeech] = info["history"]
|
62 |
+
|
63 |
+
#Event.emit("xxx")
|
64 |
+
## step2: gen keywords
|
65 |
+
keywords = await self.gen_keywords(topic, opinion, oppose_opinion, opponent_claim, history)
|
66 |
+
logging.info(f"gen keywords = {keywords}")
|
67 |
+
|
68 |
+
## step3:search_webpages
|
69 |
+
search_results = await self.search_webpages(keywords, max_results=5)
|
70 |
+
for search_result in search_results:
|
71 |
+
logging.info(f"keyword#{search_result['query']}-> result size is {len(search_result['results'])}")
|
72 |
+
search_item = {
|
73 |
+
"query": search_result.get("query", ""),
|
74 |
+
"results": [SearchItem(title=result["title"],url=result["url"], content=result['content'], raw_content=result['raw_content'], metadata={}) for result in search_result["results"]],
|
75 |
+
"origin_tool_call": ToolCall.from_dict({
|
76 |
+
"id": f"call_search",
|
77 |
+
"type": "function",
|
78 |
+
"function": {
|
79 |
+
"name": "search",
|
80 |
+
"arguments": keywords
|
81 |
+
}
|
82 |
+
})
|
83 |
+
}
|
84 |
+
search_output = SearchOutput.from_dict(search_item)
|
85 |
+
await self.workspace.create_artifact(
|
86 |
+
artifact_type=ArtifactType.WEB_PAGES,
|
87 |
+
artifact_id=str(uuid.uuid4()),
|
88 |
+
content=search_output,
|
89 |
+
metadata={
|
90 |
+
"query": search_output.query,
|
91 |
+
"user": self.name(),
|
92 |
+
"round": info["round"],
|
93 |
+
"opinion": info["opinion"],
|
94 |
+
"oppose_opinion": info["oppose_opinion"],
|
95 |
+
"topic": info["topic"],
|
96 |
+
"tags": [f"user#{self.name()}",f"Rounds#{info['round']}"]
|
97 |
+
}
|
98 |
+
)
|
99 |
+
|
100 |
+
## step4 gen result
|
101 |
+
user_response = await self.gen_statement(topic, opinion, oppose_opinion, opponent_claim, history, search_results)
|
102 |
+
|
103 |
+
logging.info(f"user_response is {user_response}")
|
104 |
+
|
105 |
+
## step3: gen speech
|
106 |
+
speech = DebateSpeech.from_dict({
|
107 |
+
"round": round,
|
108 |
+
"type": "speech",
|
109 |
+
"stance": self.stance,
|
110 |
+
"name": self.name(),
|
111 |
+
})
|
112 |
+
|
113 |
+
async def after_speech_call(message_output_response):
|
114 |
+
logging.info(f"{self.stance}#{self.name()}: after_speech_call")
|
115 |
+
speech.metadata = {}
|
116 |
+
speech.content = message_output_response
|
117 |
+
speech.finished = True
|
118 |
+
|
119 |
+
await speech.convert_to_parts(user_response, after_speech_call)
|
120 |
+
|
121 |
+
action = ActionModel(
|
122 |
+
policy_info=speech
|
123 |
+
)
|
124 |
+
|
125 |
+
return [action]
|
126 |
+
|
127 |
+
|
128 |
+
async def gen_keywords(self, topic, opinion, oppose_opinion, last_oppose_speech_content, history):
|
129 |
+
|
130 |
+
current_time = datetime.now().strftime("%Y-%m-%d-%H")
|
131 |
+
human_prompt = user_assignment_prompt.format(topic=topic,
|
132 |
+
opinion=opinion,
|
133 |
+
oppose_opinion=oppose_opinion,
|
134 |
+
last_oppose_speech_content=last_oppose_speech_content,
|
135 |
+
current_time = current_time,
|
136 |
+
limit=2
|
137 |
+
)
|
138 |
+
|
139 |
+
messages = [{'role': 'system', 'content': user_assignment_system_prompt},
|
140 |
+
{'role': 'user', 'content': human_prompt}]
|
141 |
+
|
142 |
+
output = await self.async_call_llm(messages)
|
143 |
+
|
144 |
+
response = await output.get_finished_response()
|
145 |
+
|
146 |
+
return response.split(",")
|
147 |
+
|
148 |
+
async def search_webpages(self, keywords, max_results):
|
149 |
+
return await self.search_engine.async_batch_search(queries=keywords, max_results=max_results)
|
150 |
+
|
151 |
+
async def gen_statement(self, topic, opinion, oppose_opinion, opponent_claim, history, search_results) -> MessageOutput:
|
152 |
+
search_results_content = ""
|
153 |
+
for search_result in search_results:
|
154 |
+
search_results_content += f"SearchQuery: {search_result['query']}"
|
155 |
+
search_results_content += "\n\n".join([truncate_content(s['content'], 1000) for s in search_result['results']])
|
156 |
+
|
157 |
+
unique_history = history
|
158 |
+
# if len(history) >= 2:
|
159 |
+
# for i in range(len(history)):
|
160 |
+
# # Check if the current element is the same as the next one
|
161 |
+
# if i == len(history) - 1 or history[i] != history[i+1]:
|
162 |
+
# # Add the current element to the result list
|
163 |
+
# unique_history.append(history[i])
|
164 |
+
|
165 |
+
|
166 |
+
affirmative_chat_history = ""
|
167 |
+
negative_chat_history = ""
|
168 |
+
|
169 |
+
if len(unique_history) >= 2:
|
170 |
+
if self.stance == "affirmative":
|
171 |
+
for speech in unique_history[:-1]:
|
172 |
+
if speech.stance == "affirmative":
|
173 |
+
affirmative_chat_history = affirmative_chat_history + "You: " + speech.content + "\n"
|
174 |
+
elif speech.stance == "negative":
|
175 |
+
affirmative_chat_history = affirmative_chat_history + "Your Opponent: " + speech.content + "\n"
|
176 |
+
|
177 |
+
elif self.stance == "negative":
|
178 |
+
for speech in unique_history[:-1]:
|
179 |
+
if speech.stance == "negative":
|
180 |
+
negative_chat_history = negative_chat_history + "You: " + speech.content + "\n"
|
181 |
+
elif speech.stance == "affirmative":
|
182 |
+
negative_chat_history = negative_chat_history + "Your Opponent: " + speech.content + "\n"
|
183 |
+
|
184 |
+
few_shots = ""
|
185 |
+
chat_history = ""
|
186 |
+
|
187 |
+
if self.stance == "affirmative":
|
188 |
+
chat_history = affirmative_chat_history
|
189 |
+
few_shots = affirmative_few_shots
|
190 |
+
|
191 |
+
elif self.stance == "negative":
|
192 |
+
chat_history = negative_chat_history
|
193 |
+
few_shots = negative_few_shots
|
194 |
+
|
195 |
+
human_prompt = user_debate_prompt.format(topic=topic,
|
196 |
+
opinion=opinion,
|
197 |
+
oppose_opinion=oppose_opinion,
|
198 |
+
last_oppose_speech_content=opponent_claim,
|
199 |
+
search_results_content=search_results_content,
|
200 |
+
chat_history = chat_history,
|
201 |
+
few_shots = few_shots
|
202 |
+
)
|
203 |
+
|
204 |
+
messages = [{'role': 'system', 'content': user_assignment_system_prompt},
|
205 |
+
{'role': 'user', 'content': human_prompt}]
|
206 |
+
|
207 |
+
return await self.async_call_llm(messages)
|
208 |
+
|
209 |
+
def get_latest_speech(self, history: list[DebateSpeech]):
|
210 |
+
"""
|
211 |
+
get the latest speech from history
|
212 |
+
"""
|
213 |
+
if len(history) == 0:
|
214 |
+
return None
|
215 |
+
return history[-1]
|
216 |
+
|
217 |
+
def set_workspace(self, workspace):
|
218 |
+
self.workspace = workspace
|
examples/debate/agent/main.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import uuid
|
3 |
+
from typing import Optional, AsyncGenerator
|
4 |
+
|
5 |
+
from aworld.memory.main import MemoryFactory
|
6 |
+
from examples.debate.agent.base import DebateSpeech
|
7 |
+
from examples.debate.agent.debate_agent import DebateAgent
|
8 |
+
from examples.debate.agent.moderator_agent import ModeratorAgent
|
9 |
+
from aworld.core.common import Observation
|
10 |
+
from aworld.core.memory import MemoryItem, MemoryConfig
|
11 |
+
from aworld.output import Output, WorkSpace, Artifact, ArtifactType, CodeArtifact
|
12 |
+
|
13 |
+
|
14 |
+
class DebateArena:
|
15 |
+
"""
|
16 |
+
DebateArena is platform for debate
|
17 |
+
"""
|
18 |
+
|
19 |
+
affirmative_speaker: DebateAgent
|
20 |
+
negative_speaker: DebateAgent
|
21 |
+
|
22 |
+
moderator: Optional[ModeratorAgent]
|
23 |
+
|
24 |
+
speeches: list[DebateSpeech]
|
25 |
+
|
26 |
+
display_panel: str
|
27 |
+
|
28 |
+
def __init__(self,
|
29 |
+
affirmative_speaker: DebateAgent,
|
30 |
+
negative_speaker: DebateAgent,
|
31 |
+
moderator: ModeratorAgent,
|
32 |
+
workspace: WorkSpace,
|
33 |
+
**kwargs
|
34 |
+
):
|
35 |
+
self.affirmative_speaker = affirmative_speaker
|
36 |
+
self.negative_speaker = negative_speaker
|
37 |
+
self.moderator = moderator
|
38 |
+
self.speeches = []
|
39 |
+
self.workspace = workspace
|
40 |
+
self.affirmative_speaker.set_workspace(workspace)
|
41 |
+
self.negative_speaker.set_workspace(workspace)
|
42 |
+
self.moderator.set_workspace(workspace)
|
43 |
+
self.moderator.memory = MemoryFactory.from_config(MemoryConfig(provider="inmemory", enable_summary=False))
|
44 |
+
|
45 |
+
# Event.register("topic", func= );
|
46 |
+
|
47 |
+
async def async_run(self, topic: str, rounds: int) \
|
48 |
+
-> AsyncGenerator[Output, None]:
|
49 |
+
|
50 |
+
"""
|
51 |
+
Start the debate
|
52 |
+
1. debate will start from round 1
|
53 |
+
2. each round will have two speeches, one from affirmative_speaker and one from negative_speaker
|
54 |
+
3. after all rounds finished, the debate will end
|
55 |
+
|
56 |
+
Args:
|
57 |
+
topic: str -> topic of the debate
|
58 |
+
affirmative_opinion: str -> affirmative speaker's opinion
|
59 |
+
negative_opinion: str -> negative speaker's opinion
|
60 |
+
rounds: int -> number of rounds
|
61 |
+
|
62 |
+
Returns: list[DebateSpeech]
|
63 |
+
|
64 |
+
"""
|
65 |
+
|
66 |
+
## 1. generate opinions
|
67 |
+
moderator_speech = await self.moderator_speech(topic, rounds)
|
68 |
+
if not moderator_speech:
|
69 |
+
return
|
70 |
+
yield moderator_speech
|
71 |
+
await moderator_speech.wait_until_finished()
|
72 |
+
self.store_speech(moderator_speech)
|
73 |
+
|
74 |
+
affirmative_opinion = moderator_speech.metadata["affirmative_opinion"]
|
75 |
+
negative_opinion = moderator_speech.metadata["negative_opinion"]
|
76 |
+
|
77 |
+
logging.info(f"✈️==================================== opinions =============================================")
|
78 |
+
logging.info(f"topic: {topic}")
|
79 |
+
logging.info(f"affirmative_opinion: {affirmative_opinion}")
|
80 |
+
logging.info(f"negative_opinion: {negative_opinion}")
|
81 |
+
logging.info(f"✈️==================================== start... =============================================")
|
82 |
+
|
83 |
+
## 2. Alternating speeches
|
84 |
+
for i in range(1, rounds + 1):
|
85 |
+
logging.info(
|
86 |
+
f"✈️==================================== round#{i} start =============================================")
|
87 |
+
loading_speech = DebateSpeech.from_dict({
|
88 |
+
"content": f"\n\n**round#{i} start** \n\n",
|
89 |
+
"round": i,
|
90 |
+
"type": "loading",
|
91 |
+
"stance": "stage",
|
92 |
+
"name": "stage",
|
93 |
+
"finished": True
|
94 |
+
})
|
95 |
+
yield loading_speech
|
96 |
+
|
97 |
+
loading_speech = DebateSpeech.from_dict({
|
98 |
+
"content": f"\n\n【affirmative】✅:{self.affirmative_speaker.name()}\n Searching ....\n",
|
99 |
+
"round": i,
|
100 |
+
"type": "loading",
|
101 |
+
"stance": "stage",
|
102 |
+
"name": "stage",
|
103 |
+
"finished": True
|
104 |
+
})
|
105 |
+
yield loading_speech
|
106 |
+
|
107 |
+
# affirmative_speech
|
108 |
+
speech = await self.affirmative_speech(i, topic, affirmative_opinion, negative_opinion)
|
109 |
+
yield speech
|
110 |
+
await speech.wait_until_finished()
|
111 |
+
self.store_speech(speech)
|
112 |
+
|
113 |
+
loading_speech = DebateSpeech.from_dict({
|
114 |
+
"content": f"\n\n【negative】❌:{self.negative_speaker.name()}\n Searching ....\n",
|
115 |
+
"round": i,
|
116 |
+
"type": "loading",
|
117 |
+
"stance": "stage",
|
118 |
+
"name": "stage",
|
119 |
+
"finished": True
|
120 |
+
})
|
121 |
+
yield loading_speech
|
122 |
+
|
123 |
+
# negative_speech
|
124 |
+
speech = await self.negative_speech(i, topic, negative_opinion, affirmative_opinion)
|
125 |
+
yield speech
|
126 |
+
await speech.wait_until_finished()
|
127 |
+
self.store_speech(speech)
|
128 |
+
|
129 |
+
logging.info(
|
130 |
+
f"🛬==================================== round#{i} end =============================================")
|
131 |
+
|
132 |
+
## 3. Summary speeches
|
133 |
+
moderator_speech = await self.moderator.summary_speech()
|
134 |
+
if not moderator_speech:
|
135 |
+
return
|
136 |
+
yield moderator_speech
|
137 |
+
await moderator_speech.wait_until_finished()
|
138 |
+
await self.workspace.add_artifact(
|
139 |
+
CodeArtifact.build_artifact(
|
140 |
+
artifact_type=ArtifactType.CODE,
|
141 |
+
artifact_id="result",
|
142 |
+
code_type='html',
|
143 |
+
content=moderator_speech.content,
|
144 |
+
metadata={
|
145 |
+
"topic": topic
|
146 |
+
}
|
147 |
+
)
|
148 |
+
)
|
149 |
+
logging.info(
|
150 |
+
f"🛬==================================== total is end =============================================")
|
151 |
+
|
152 |
+
async def moderator_speech(self, topic, rounds) -> DebateSpeech | None:
|
153 |
+
results = await self.moderator.async_policy(Observation(content=topic, info={"rounds": rounds}))
|
154 |
+
if not results or not results[0] or not results[0].policy_info:
|
155 |
+
return None
|
156 |
+
return results[0].policy_info
|
157 |
+
|
158 |
+
async def affirmative_speech(self, round: int, topic: str, opinion: str, oppose_opinion: str) -> DebateSpeech:
|
159 |
+
"""
|
160 |
+
affirmative_speaker will start speech
|
161 |
+
"""
|
162 |
+
|
163 |
+
affirmative_speaker = self.get_affirmative_speaker()
|
164 |
+
|
165 |
+
logging.info(affirmative_speaker.name() + ": " + "start")
|
166 |
+
|
167 |
+
speech = await affirmative_speaker.speech(topic, opinion, oppose_opinion, round, self.speeches)
|
168 |
+
|
169 |
+
logging.info(affirmative_speaker.name() + ": result: " + speech.content)
|
170 |
+
return speech
|
171 |
+
|
172 |
+
async def negative_speech(self, round: int, topic: str, opinion: str, oppose_opinion: str) -> DebateSpeech:
|
173 |
+
"""
|
174 |
+
after affirmative_speaker finished speech, negative_speaker will start speech
|
175 |
+
"""
|
176 |
+
|
177 |
+
negative_speaker = self.get_negative_speaker()
|
178 |
+
|
179 |
+
logging.info(negative_speaker.name() + ": " + "start")
|
180 |
+
|
181 |
+
speech = await negative_speaker.speech(topic, opinion, oppose_opinion, round, self.speeches)
|
182 |
+
|
183 |
+
logging.info(negative_speaker.name() + ": result: " + speech.content)
|
184 |
+
return speech
|
185 |
+
|
186 |
+
def get_affirmative_speaker(self) -> DebateAgent:
|
187 |
+
"""
|
188 |
+
return the affirmative speaker
|
189 |
+
"""
|
190 |
+
return self.affirmative_speaker
|
191 |
+
|
192 |
+
def get_negative_speaker(self) -> DebateAgent:
|
193 |
+
"""
|
194 |
+
return the negative speaker
|
195 |
+
"""
|
196 |
+
return self.negative_speaker
|
197 |
+
|
198 |
+
def store_speech(self, speech: DebateSpeech):
|
199 |
+
self.moderator.memory.add(MemoryItem.from_dict({
|
200 |
+
"content": speech.content,
|
201 |
+
"metadata": {
|
202 |
+
"round": speech.round,
|
203 |
+
"speaker": speech.name,
|
204 |
+
"type": speech.type
|
205 |
+
}
|
206 |
+
}))
|
207 |
+
self.speeches.append(speech)
|
208 |
+
|
209 |
+
def gen_closing_statement(self):
|
210 |
+
pass
|
examples/debate/agent/moderator_agent.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from abc import ABC
|
3 |
+
from datetime import datetime
|
4 |
+
from typing import Dict, Any, Union, List
|
5 |
+
|
6 |
+
from pydantic import Field
|
7 |
+
|
8 |
+
from aworld.config import AgentConfig
|
9 |
+
from aworld.core.common import Observation, ActionModel
|
10 |
+
from aworld.output import MessageOutput, WorkSpace, ArtifactType, SearchOutput
|
11 |
+
from examples.debate.agent.base import DebateSpeech
|
12 |
+
from examples.debate.agent.prompts import user_assignment_system_prompt, summary_system_prompt, summary_debate_prompt
|
13 |
+
from examples.debate.agent.stream_output_agent import StreamOutputAgent
|
14 |
+
|
15 |
+
|
16 |
+
def truncate_content(raw_content, char_limit):
|
17 |
+
if raw_content is None:
|
18 |
+
raw_content = ''
|
19 |
+
if len(raw_content) > char_limit:
|
20 |
+
raw_content = raw_content[:char_limit] + "... [truncated]"
|
21 |
+
return raw_content
|
22 |
+
|
23 |
+
|
24 |
+
class ModeratorAgent(StreamOutputAgent, ABC):
|
25 |
+
stance: str = "moderator"
|
26 |
+
topic: str = Field(default=None)
|
27 |
+
affirmative_opinion: str = Field(default=None)
|
28 |
+
negative_opinion: str = Field(default=None)
|
29 |
+
|
30 |
+
def __init__(self, conf: AgentConfig,
|
31 |
+
**kwargs
|
32 |
+
):
|
33 |
+
super().__init__(conf)
|
34 |
+
|
35 |
+
|
36 |
+
async def async_policy(self, observation: Observation, info: Dict[str, Any] = {}, **kwargs) -> Union[
|
37 |
+
List[ActionModel], None]:
|
38 |
+
## step 1: params
|
39 |
+
topic = observation.content
|
40 |
+
|
41 |
+
## step2: gen opinions
|
42 |
+
output = await self.gen_opinions(topic)
|
43 |
+
|
44 |
+
## step3: gen speech
|
45 |
+
moderator_speech = DebateSpeech.from_dict({
|
46 |
+
"content": "",
|
47 |
+
"round": 0,
|
48 |
+
"type": "speech",
|
49 |
+
"stance": "moderator",
|
50 |
+
"name": self.name(),
|
51 |
+
})
|
52 |
+
|
53 |
+
async def after_speech_call(message_output_response):
|
54 |
+
logging.info("moderator: after_speech_call")
|
55 |
+
opinions = message_output_response
|
56 |
+
self.affirmative_opinion = opinions.get("positive_opinion")
|
57 |
+
self.negative_opinion = opinions.get("negative_opinion")
|
58 |
+
moderator_speech.metadata = {
|
59 |
+
"topic": topic,
|
60 |
+
"affirmative_opinion": self.affirmative_opinion,
|
61 |
+
"negative_opinion": self.negative_opinion,
|
62 |
+
}
|
63 |
+
moderator_speech.finished = True
|
64 |
+
|
65 |
+
await moderator_speech.convert_to_parts(output, after_speech_call)
|
66 |
+
|
67 |
+
action = ActionModel(
|
68 |
+
policy_info=moderator_speech
|
69 |
+
)
|
70 |
+
|
71 |
+
return [action]
|
72 |
+
|
73 |
+
async def gen_opinions(self, topic) -> MessageOutput:
|
74 |
+
|
75 |
+
current_time = datetime.now().strftime("%Y-%m-%d-%H")
|
76 |
+
human_prompt = self.agent_prompt.format(topic=topic,
|
77 |
+
current_time=current_time,
|
78 |
+
)
|
79 |
+
|
80 |
+
messages = [
|
81 |
+
{"role": "system", "content": user_assignment_system_prompt},
|
82 |
+
{"role": "user", "content": human_prompt}
|
83 |
+
]
|
84 |
+
|
85 |
+
output = await self.async_call_llm(messages, json_parse=True)
|
86 |
+
|
87 |
+
return output
|
88 |
+
|
89 |
+
async def summary_speech(self) -> DebateSpeech:
|
90 |
+
|
91 |
+
chat_history = await self.get_formated_history()
|
92 |
+
print(f"chat_history is \n {chat_history}")
|
93 |
+
|
94 |
+
search_results_content_history = await self.get_formated_search_results_content_history()
|
95 |
+
print(f"search_results_content_history is \n {search_results_content_history}")
|
96 |
+
|
97 |
+
human_prompt = summary_debate_prompt.format(topic=self.topic,
|
98 |
+
opinion=self.affirmative_opinion,
|
99 |
+
oppose_opinion=self.negative_opinion,
|
100 |
+
chat_history=chat_history,
|
101 |
+
search_results_content_history=search_results_content_history
|
102 |
+
)
|
103 |
+
|
104 |
+
messages = [
|
105 |
+
{"role": "system", "content": summary_system_prompt},
|
106 |
+
{"role": "user", "content": human_prompt}
|
107 |
+
]
|
108 |
+
|
109 |
+
output = await self.async_call_llm(messages, json_parse=False)
|
110 |
+
|
111 |
+
moderator_speech = DebateSpeech.from_dict({
|
112 |
+
"content": "",
|
113 |
+
"round": 0,
|
114 |
+
"type": "summary",
|
115 |
+
"stance": "moderator",
|
116 |
+
"name": self.name(),
|
117 |
+
})
|
118 |
+
|
119 |
+
async def after_speech_call(message_output_response):
|
120 |
+
moderator_speech.finished = True
|
121 |
+
|
122 |
+
await moderator_speech.convert_to_parts(output, after_speech_call)
|
123 |
+
|
124 |
+
return moderator_speech
|
125 |
+
|
126 |
+
async def get_formated_history(self):
|
127 |
+
formated = []
|
128 |
+
for item in self.memory.get_all():
|
129 |
+
formated.append(f"{item.metadata['speaker']} (round {item.metadata['round']}): {item.content}")
|
130 |
+
return "\n".join(formated)
|
131 |
+
|
132 |
+
async def get_formated_search_results_content_history(self):
|
133 |
+
if not self.workspace:
|
134 |
+
return
|
135 |
+
search_results = self.workspace.list_artifacts(ArtifactType.WEB_PAGES)
|
136 |
+
materials = []
|
137 |
+
for search_result in search_results:
|
138 |
+
if isinstance(search_result.content, SearchOutput):
|
139 |
+
for item in search_result.content.results:
|
140 |
+
materials.append(
|
141 |
+
f"{search_result.metadata['user']} (round {search_result.metadata['round']}): {search_result.content.query}: url: {item.url}, title: {item.title}, description: {item.content}")
|
142 |
+
|
143 |
+
return "\n".join(materials)
|
144 |
+
|
145 |
+
def set_workspace(self, workspace: WorkSpace):
|
146 |
+
self.workspace = workspace
|
examples/debate/agent/prompts.py
ADDED
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
user_assignment_system_prompt = "You are a helpful search agent."
|
2 |
+
|
3 |
+
user_assignment_prompt = """
|
4 |
+
While facing the hot topic: {topic}, your opinion is {opinion}. You stand on your opinion and fight any other opinion (such as {oppose_opinion}) that differs from your opinion.
|
5 |
+
|
6 |
+
You have an assistant that can help search the relative materials online to support your opinion {opinion} in the topic: {topic}
|
7 |
+
|
8 |
+
While facing your opponent's claim {last_oppose_speech_content}, you decide to fight back! Now you need to ask your assistant to do some online survey, according to that claim, to give you more insights to decide what to debate with your opponent.
|
9 |
+
|
10 |
+
For example, you could talk to your assistant to search: A, B, C . Then you will gain more insights and can decide how to fight back!
|
11 |
+
|
12 |
+
Attention: You need to pay attention the current time ({current_time}).
|
13 |
+
If you want to search something that is sensitive to the time, espcially some materials are needed to be up to date, you need to output your assignment queries associated with the current time, so your assistant can search the up to date search.
|
14 |
+
|
15 |
+
|
16 |
+
Format Requirements (query seperated by , ), limit {limit}:
|
17 |
+
aaa, bbb, ccc...
|
18 |
+
|
19 |
+
Now, you could output your assignment queries (strictly follow the Format Requirements: seperated by ,) to your assistant.
|
20 |
+
"""
|
21 |
+
|
22 |
+
user_debate_system_prompt = "You are an impressive debater."
|
23 |
+
user_debate_prompt = """
|
24 |
+
## Role
|
25 |
+
You are an outstanding debater, with a fiery and stubborn personality, sharp language, and a penchant for irony.
|
26 |
+
Your responsibility is to respond to the content of the opposing debater's speech based on the current debate topic, your stance, your skills, and restrictions.
|
27 |
+
|
28 |
+
|
29 |
+
## Current Debate Topic
|
30 |
+
{topic}
|
31 |
+
|
32 |
+
## Your Stance
|
33 |
+
{opinion}
|
34 |
+
|
35 |
+
## Opponent's Stance
|
36 |
+
{oppose_opinion}
|
37 |
+
|
38 |
+
|
39 |
+
## Your Skills
|
40 |
+
- You have a strong sense of self-esteem, a bit rebellious, enjoy arguing back, and speak quite aggressively.
|
41 |
+
- Sometimes you like to use habitual rhetorical questions to belittle the opponent.
|
42 |
+
- When you sense the opponent is about to leave or the conversation is hard to continue due to lack of topics, you can throw out provocative questions related to the current debate topic to challenge the opponent.
|
43 |
+
- You excel at finding loopholes in the opponent's arguments and incessantly attacking their weaknesses.
|
44 |
+
- You are adept at using reference materials to support your arguments with facts and counter the opponent's loopholes.
|
45 |
+
- Your language is concise and incisive, always able to capture the key points. When responding, you can settle the matter in just three or four sentences without the need for lengthy explanations.
|
46 |
+
|
47 |
+
|
48 |
+
## Attention
|
49 |
+
- It is strictly prohibited to use phrases like “let’s switch to a lighter topic.”
|
50 |
+
- The output content must be organized according to the given format and must not deviate from the framework requirements.
|
51 |
+
- If you recoginize that there is no history chat and your opponent has not said anything yet, it means you are on the first round, you need to directly and briefly state your opinion to your opponent.
|
52 |
+
- You are not allowed to output your inner thinking chain, just directly speak out to your opponent.
|
53 |
+
- Please do not say too much, it will make you seem too mild, which directly undermines your character.
|
54 |
+
- Be brief and sharp, and use sarcasm frequently. Limit your response within 100-150 words.
|
55 |
+
- DO NOT output your response starting with 'You:', JUST DIRECTLY output your response without "You:".
|
56 |
+
|
57 |
+
|
58 |
+
## Dialogue Style Examples
|
59 |
+
{few_shots}
|
60 |
+
|
61 |
+
|
62 |
+
## Current Task
|
63 |
+
You are arguing with your opponent on the topic: {topic}. You opinion is {opinion}.
|
64 |
+
Below is the content of your conversation:
|
65 |
+
{chat_history}
|
66 |
+
|
67 |
+
Now, your opponent just said to you: “{last_oppose_speech_content}”.
|
68 |
+
|
69 |
+
At this moment, you have obtained the following related materials, which may be beneficial for your rebuttal against the opponent.
|
70 |
+
|
71 |
+
Reference materials: {search_results_content}
|
72 |
+
|
73 |
+
|
74 |
+
Please use your character's identity traits, skills, restrictions, and dialogue style examples, relying on your reference materials, to counter the opposing debater in a incisive and concise way. Never output "You:". Now, please directly output your response to your opponent without "You:".
|
75 |
+
"""
|
76 |
+
|
77 |
+
affirmative_few_shots = """
|
78 |
+
Your opponent:
|
79 |
+
You: Let me make it straight, Jordan is the best, who dares to oppose?
|
80 |
+
|
81 |
+
Your opponent: James has a better character. Even when JR Smith made a blunder in the finals, LeBron was still supportive. If it were Jordan, he would have chased him around the court in anger.
|
82 |
+
You: Cut it out! Michael Jordan was a ruthless winner on the court. He wouldn't tolerate any mistakes, and it's this relentless standard that earned him six championships and never failing in the finals. So, no matter how much of a nice guy LeBron tries to be, it won't erase his multiple finals defeats.
|
83 |
+
|
84 |
+
Your opponent: James has a stronger body, and physique is the most important factor on basketball court.
|
85 |
+
You: My Jesus, I can't believe someone would say that. Bastetball is far beyond physique. Skills, mind and leadership all matters. In these aspects, James is no match for Jordan. If James is so proud of his physique, why doesn't he go work in the fields?
|
86 |
+
"""
|
87 |
+
|
88 |
+
negative_few_shots = """
|
89 |
+
Your opponent:
|
90 |
+
You: Let me make it straight, Lebron is the best, who dares to oppose?
|
91 |
+
|
92 |
+
Your opponent: With no doubt, Jordan's skills are more well-rounded.
|
93 |
+
You: Would you stop kidding...Since Jordan's skills are supposedly so well-rounded, then tell me why his three-point shooting percentage is so low. Jordan was just given a greater personal boost because of the unique era he played in.
|
94 |
+
"""
|
95 |
+
|
96 |
+
generate_opinions_prompt = """
|
97 |
+
Here is the debate topic:{topic}.
|
98 |
+
Please output the two sides' (positive side vs negative side) opinions of the topic.
|
99 |
+
|
100 |
+
Output format:
|
101 |
+
{{
|
102 |
+
"positive_opinion":"xxx"
|
103 |
+
"negative_opinion":"yyy"
|
104 |
+
}}
|
105 |
+
|
106 |
+
|
107 |
+
Now the topic is {topic}, please follow the example and output format, output the two sides' opinions.
|
108 |
+
you must always return json, don not return markdown tag or others ,such as ```json,``` etc;
|
109 |
+
|
110 |
+
|
111 |
+
For example:
|
112 |
+
----------------------------------
|
113 |
+
topic: Who is better? A or B?
|
114 |
+
|
115 |
+
{{
|
116 |
+
"positive_opinion":"A"
|
117 |
+
"negative_opinion":"B"
|
118 |
+
}}
|
119 |
+
|
120 |
+
----------------------------------
|
121 |
+
topic: Is is OK to drink wine?
|
122 |
+
positive_opinion:Yes
|
123 |
+
negative_opinion:No
|
124 |
+
|
125 |
+
{{
|
126 |
+
"positive_opinion":"Yes"
|
127 |
+
"negative_opinion":"No"
|
128 |
+
}}
|
129 |
+
"""
|
130 |
+
|
131 |
+
summary_system_prompt = "You are a good assistant to make summary."
|
132 |
+
summary_debate_prompt = """
|
133 |
+
## Your Role
|
134 |
+
You are a reliable assistant to make summary and skilled in information architecture and visual storytelling, capable of transforming any content into stunning cards using a webpage format.
|
135 |
+
Your responsibility is: 1. read people's conversation and make summary on this conversation; 2. translate your summary into the HTML code.
|
136 |
+
|
137 |
+
|
138 |
+
## Current Situation
|
139 |
+
1. You find that several people have started a debate on a particular topic "{topic}";
|
140 |
+
2. One side is holding the opinion: {opinion}, the other side is holding: {oppose_opinion}.
|
141 |
+
2.1 For the details of the conversation between these two sides, please refer to Conversation History below.
|
142 |
+
3. Each time one side is giving the conversation, he/she would like to cite the supportive materials searched on the website, in form of the urls, title, descritpion.
|
143 |
+
3.1 For the details of the supportive materials between these two sides for each conversation round, please refer to Supportive Materials History below.
|
144 |
+
4. Now you are supposed to make a concise, brief summary for each round conversation between the two sides, in terms of the viewpoint, citation.
|
145 |
+
4.1 'viewpoint' is the main point for each side in each conversation round;
|
146 |
+
4.2 'citation' is the formatted structure in terms of the urls, title of the supportive materials that is indeed cited in each conversation round.
|
147 |
+
|
148 |
+
|
149 |
+
## Conversation History:
|
150 |
+
{chat_history}
|
151 |
+
|
152 |
+
|
153 |
+
## Supportive Materials History
|
154 |
+
{search_results_content_history}
|
155 |
+
|
156 |
+
|
157 |
+
## Summary Format
|
158 |
+
debater_name1's summary (round 1): xxxx
|
159 |
+
debater_name1's citation(round 1): url_1: xxxx, title_1: xxxx; url_2: xxxx, title_2: xxxx
|
160 |
+
|
161 |
+
debater_name2's summary (round 1): yyyy
|
162 |
+
debater_name2's citation(round 1): url_1: yyyy, title_1: yyyy; url_2: yyyy, title_2: yyyy...
|
163 |
+
|
164 |
+
debater_name1's summary (round 2): pppp
|
165 |
+
debater_name1's citation(round 2): url_1: pppp, title_1: pppp; url_2: pppp, title_2: pppp
|
166 |
+
|
167 |
+
debater_name2's summary (round 2): qqqq
|
168 |
+
debater_name2's citation(round 2): url_1: qqqq, title_1: qqqq; url_2: qqqq, title_2: qqqq...
|
169 |
+
...
|
170 |
+
|
171 |
+
|
172 |
+
## Write HTML Requirements
|
173 |
+
1. You should only present using HTML code, including basic HTML, CSS, and JavaScript. This should encompass text, visualization, and structured results.
|
174 |
+
2. Provide complete HTML code; CSS and JavaScript should also be included within the code to ensure the user can open a single file.
|
175 |
+
3. Do not arbitrarily omit the core viewpoints from the original text; core viewpoints and summaries must be preserved.
|
176 |
+
|
177 |
+
|
178 |
+
## Write HTML Technical Implementation
|
179 |
+
1. Utilize modern CSS techniques (such as flex/grid layouts, variables, gradients)
|
180 |
+
2. Ensure the code is clean and efficient, without redundant elements
|
181 |
+
3. Add a save button that does not interfere with the design
|
182 |
+
4. Implement a one-click save as image feature using html2canvas
|
183 |
+
5. The saved image should only contain the cover design, excluding interface elements
|
184 |
+
6. Use Google Fonts or other CDNs to load appropriate modern fonts
|
185 |
+
7. Online icon resources can be used (such as Font Awesome)
|
186 |
+
|
187 |
+
|
188 |
+
## Write HTML Professional Typography Techniques
|
189 |
+
1. Apply the designer's common "negative space" technique to create focal points
|
190 |
+
2. Maintain harmonious proportion between text and decorative elements
|
191 |
+
3. Ensure a clear visual flow to guide the reader’s eye movement
|
192 |
+
4. Use subtle shadow or light effects to increase depth
|
193 |
+
5. For webpage URL addresses and their corresponding titles, use hyperlinks.
|
194 |
+
|
195 |
+
|
196 |
+
## Example
|
197 |
+
Topic: Which is more important to success? Working hard or Opportunity?
|
198 |
+
|
199 |
+
Conversation History:
|
200 |
+
Tom (round 1): Hard work can help individuals continuously improve their skills and professional knowledge, laying a solid foundation for achieving success. A programmer Kim spends time every day learning new languages and technologies, eventually becoming an expert in the field and securing an important position at a major tech company.
|
201 |
+
Jerry (round 1): Opportunity is more important. Steve Jobs saw the great potential of personal computer, then he found Apple.
|
202 |
+
Tom (round 2): Many famous entrepreneurs experienced numerous setbacks and failures before achieving success. They did not just happen to be lucky enough to get opportunities, but rather, through continuous effort and relentless perseverance, they eventually succeeded. For example, Thomas Edison conducted thousands of experiments in the process of inventing the light bulb, which demonstrates that his success was inseparable from his tenacious effort.
|
203 |
+
Jerry (round 2): Many historical events were driven by opportunities rather than purely by abilities. For example, during wars, some generals won battles because they made crucial decisions at key moments, even though they were not the most experienced commanders.
|
204 |
+
|
205 |
+
Supportive Materials History:
|
206 |
+
Tom (round 1): id1: url: aaaa, title: Why is Kim? description: bbbb.
|
207 |
+
Jerry (round 1): id1: url: cccc, title: Steve's story. description: dddd.
|
208 |
+
Tom (round 2): id1: url: eeee, title: The invention of light bulb. description: ffff.
|
209 |
+
Jerry (round 2): id1: url: gggg, title: Some interesting things during the war. description: hhhh.
|
210 |
+
|
211 |
+
Your Summary:
|
212 |
+
Tom's summary (round 1): Hard working improves people's skills and thus leads to personal sucess.
|
213 |
+
Tom's citation (round 1): url_1: aaaa, title_1: Why is Kim?
|
214 |
+
|
215 |
+
Jerry's summary (round 1): Steve's story supports opportunity is more important.
|
216 |
+
Jerry's citation (round 1): url_1:cccc, title_1:Steve's story.
|
217 |
+
|
218 |
+
Tom's summary (round 2): Entrepreneurs, like Thomas Edison, faced repeated failures before succeeding. Their achievements resulted from persistent effort and perseverance, not mere luck.
|
219 |
+
Tom's citation (round 2): url_1: eeee, title_1: The invention of light bulb.
|
220 |
+
|
221 |
+
Jerry's summary (round 2): Historical events are often driven by opportunity, as seen when generals win battles through timely decisions rather than experience.
|
222 |
+
Jerry's citation (round 2): url_1:gggg, title_1: Some interesting things during the war.
|
223 |
+
|
224 |
+
|
225 |
+
|
226 |
+
## Attention
|
227 |
+
- Strictly follow the ## Output Format. The output content must be organized according to the ## Output Format and must not deviate from the framework requirements.
|
228 |
+
- The summary of each side's each conversation round should be very concise (it would be better within 30 words), cannot be too long. Just capture the key points.
|
229 |
+
- Only the supportive materials that has been indeed referred by the debater can appear in the citation, in terms of their urls and the titles. Ignore the materials that have not been referred by the debater.
|
230 |
+
- You are not allowed to output your inner thinking chain.
|
231 |
+
- DO NOT output your response starting with 'You:', JUST DIRECTLY output your response without "You:".
|
232 |
+
- Please output your summary in the HTML form directly in one step.
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
Please deeply understand Your Role and Current Situation. Strictly follow the Summary Format, Attention with Example, output your summary, according to the Conversation History and Supportive Materials History.
|
237 |
+
Then transfer your summary according to the Write HTML Requirements, Write HTML Technical Implementation, Write HTML Professional Typography Techniques.
|
238 |
+
"""
|
239 |
+
|
240 |
+
# ## Conversation History:
|
241 |
+
# Jake (round1): Jordan is the best, he scores 35.1 points per game. No one is better than that.
|
242 |
+
# Lucy (round1): Jordan's opponent is too weak, Lebron's opponent is stronger, so James is better.
|
243 |
+
# Jake (round2): Jordan is the best, he has 6 champions. No one is better than that.
|
244 |
+
# Lucy (round2): Jordan's teamates are better, pippen, rodman... Lebron leads the whole team forward.
|
245 |
+
|
246 |
+
|
247 |
+
# ## Supportive Materials History
|
248 |
+
# Jake (round1): url_1: 123, title_1: Jordan's data;
|
249 |
+
# Lucy (round1): url_1: 456, title_1: The diff between basketball's eras; url_2: 9999, title_2: which second-order wave forces on hydrodynamcis.
|
250 |
+
# Jake (round2): url_1: 123678aa, title_1: Who's got most champions?
|
251 |
+
# Lucy (round2): url_1: xxxbbbw45, title_1: The importance of teammates.
|
examples/debate/agent/stream_output_agent.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from aworld.config import AgentConfig
|
2 |
+
from aworld.agents.llm_agent import Agent
|
3 |
+
from aworld.models.llm import acall_llm_model_stream
|
4 |
+
from aworld.output import MessageOutput
|
5 |
+
|
6 |
+
|
7 |
+
class StreamOutputAgent(Agent):
|
8 |
+
def __init__(self, conf: AgentConfig, **kwargs
|
9 |
+
):
|
10 |
+
super().__init__(conf)
|
11 |
+
|
12 |
+
async def async_call_llm(self, messages, json_parse=False) -> MessageOutput:
|
13 |
+
# Async streaming with acall_llm_model
|
14 |
+
async def async_generator():
|
15 |
+
async for chunk in acall_llm_model_stream(self.llm, messages, stream=True):
|
16 |
+
if chunk.content:
|
17 |
+
yield chunk.content
|
18 |
+
|
19 |
+
return MessageOutput(source=async_generator(), json_parse=json_parse)
|