Spaces:
Sleeping
Sleeping
Upload 15 files
Browse files- aworld/config/android_agent.yaml +13 -0
- aworld/config/android_tool.yaml +8 -0
- aworld/config/browser_agent.yaml +25 -0
- aworld/config/browser_tool.yaml +24 -0
- aworld/config/cl100k_base.tiktoken +0 -0
- aworld/config/conf.py +273 -0
- aworld/config/document_analysis_tool.yaml +4 -0
- aworld/config/human_confirm_tool.yaml +4 -0
- aworld/config/mcp.json +16 -0
- aworld/config/mcp_tool.yaml +3 -0
- aworld/config/openai_gym_tool.yaml +3 -0
- aworld/config/python_execute_tool.yaml +5 -0
- aworld/config/qwen.tiktoken +0 -0
- aworld/config/search_api_tool.yaml +5 -0
- aworld/config/shell_tool.yaml +6 -0
aworld/config/android_agent.yaml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "android_agent"
|
2 |
+
llm_config:
|
3 |
+
llm_provider: openai
|
4 |
+
llm_model_name: gpt-4o
|
5 |
+
llm_temperature: 1.
|
6 |
+
llm_base_url:
|
7 |
+
llm_api_key:
|
8 |
+
max_steps: 10
|
9 |
+
max_input_tokens: 128000
|
10 |
+
max_actions_per_step: 10
|
11 |
+
system_prompt:
|
12 |
+
working_dir:
|
13 |
+
enable_recording: False
|
aworld/config/android_tool.yaml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
avd_name:
|
2 |
+
adb_path:
|
3 |
+
emulator_path:
|
4 |
+
headless: False
|
5 |
+
custom_executor: True
|
6 |
+
enable_recording: False
|
7 |
+
working_dir:
|
8 |
+
max_retry: 3
|
aworld/config/browser_agent.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "browser_agent"
|
2 |
+
need_reset: False
|
3 |
+
llm_config:
|
4 |
+
llm_provider: openai
|
5 |
+
llm_model_name: gpt-4o
|
6 |
+
llm_temperature: 1.
|
7 |
+
llm_base_url:
|
8 |
+
llm_api_key:
|
9 |
+
max_steps: 10
|
10 |
+
max_input_tokens: 128000
|
11 |
+
max_actions_per_step: 10
|
12 |
+
system_prompt:
|
13 |
+
working_dir:
|
14 |
+
enable_recording: False
|
15 |
+
include_attributes:
|
16 |
+
- title
|
17 |
+
- type
|
18 |
+
- name
|
19 |
+
- role
|
20 |
+
- aria-label
|
21 |
+
- placeholder
|
22 |
+
- value
|
23 |
+
- alt
|
24 |
+
- aria-expanded
|
25 |
+
- data-date-format
|
aworld/config/browser_tool.yaml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
browse_name: chromium
|
2 |
+
headless: False
|
3 |
+
width: 1280
|
4 |
+
height: 720
|
5 |
+
slow_mo: 0
|
6 |
+
disable_security: False
|
7 |
+
custom_executor: False
|
8 |
+
dom_js_path:
|
9 |
+
private:
|
10 |
+
locale:
|
11 |
+
geolocation:
|
12 |
+
storage_state:
|
13 |
+
do_highlight: True
|
14 |
+
focus_highlight: -1
|
15 |
+
viewport_expansion: 0
|
16 |
+
cdp_url:
|
17 |
+
wss_url:
|
18 |
+
proxy:
|
19 |
+
cookies_file:
|
20 |
+
working_dir:
|
21 |
+
enable_recording: False
|
22 |
+
sleep_after_init: 0
|
23 |
+
max_retry: 3
|
24 |
+
reuse: True
|
aworld/config/cl100k_base.tiktoken
ADDED
The diff for this file is too large to render.
See raw diff
|
|
aworld/config/conf.py
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
# Copyright (c) 2025 inclusionAI.
|
3 |
+
import os
|
4 |
+
import traceback
|
5 |
+
import uuid
|
6 |
+
from collections import OrderedDict
|
7 |
+
from pathlib import Path
|
8 |
+
from typing import Optional, List, Dict, Any
|
9 |
+
|
10 |
+
import yaml
|
11 |
+
from pydantic import BaseModel,Field
|
12 |
+
from enum import Enum
|
13 |
+
|
14 |
+
from aworld.logs.util import logger
|
15 |
+
|
16 |
+
|
17 |
+
def load_config(file_name: str, dir_name: str = None) -> Dict[str, Any]:
|
18 |
+
"""Dynamically load config file form current path.
|
19 |
+
|
20 |
+
Args:
|
21 |
+
file_name: Config file name.
|
22 |
+
dir_name: Config file directory.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
Config dict.
|
26 |
+
"""
|
27 |
+
|
28 |
+
if dir_name:
|
29 |
+
file_path = os.path.join(dir_name, file_name)
|
30 |
+
else:
|
31 |
+
# load conf form current path
|
32 |
+
current_dir = Path(__file__).parent.absolute()
|
33 |
+
file_path = os.path.join(current_dir, file_name)
|
34 |
+
if not os.path.exists(file_path):
|
35 |
+
logger.debug(f"{file_path} not exists, please check it.")
|
36 |
+
|
37 |
+
configs = dict()
|
38 |
+
try:
|
39 |
+
with open(file_path, "r") as file:
|
40 |
+
yaml_data = yaml.safe_load(file)
|
41 |
+
configs.update(yaml_data)
|
42 |
+
except FileNotFoundError:
|
43 |
+
logger.debug(f"Can not find the file: {file_path}")
|
44 |
+
except Exception as e:
|
45 |
+
logger.warning(f"{file_name} read fail.\n", traceback.format_exc())
|
46 |
+
return configs
|
47 |
+
|
48 |
+
|
49 |
+
def wipe_secret_info(config: Dict[str, Any], keys: List[str]) -> Dict[str, Any]:
|
50 |
+
"""Return a deep copy of this config as a plain Dict as well ass wipe up secret info, used to log."""
|
51 |
+
|
52 |
+
def _wipe_secret(conf):
|
53 |
+
def _wipe_secret_plain_value(v):
|
54 |
+
if isinstance(v, List):
|
55 |
+
return [_wipe_secret_plain_value(e) for e in v]
|
56 |
+
elif isinstance(v, Dict):
|
57 |
+
return _wipe_secret(v)
|
58 |
+
else:
|
59 |
+
return v
|
60 |
+
|
61 |
+
key_list = []
|
62 |
+
for key in conf.keys():
|
63 |
+
key_list.append(key)
|
64 |
+
for key in key_list:
|
65 |
+
if key.strip('"') in keys:
|
66 |
+
conf[key] = '-^_^-'
|
67 |
+
else:
|
68 |
+
_wipe_secret_plain_value(conf[key])
|
69 |
+
return conf
|
70 |
+
|
71 |
+
if not config:
|
72 |
+
return config
|
73 |
+
return _wipe_secret(config)
|
74 |
+
|
75 |
+
|
76 |
+
class ClientType(Enum):
|
77 |
+
SDK = "sdk"
|
78 |
+
HTTP = "http"
|
79 |
+
|
80 |
+
|
81 |
+
class ConfigDict(dict):
|
82 |
+
"""Object mode operates dict, can read non-existent attributes through `get` method."""
|
83 |
+
__setattr__ = dict.__setitem__
|
84 |
+
__getattr__ = dict.__getitem__
|
85 |
+
|
86 |
+
def __init__(self, seq: dict = None, **kwargs):
|
87 |
+
if seq is None:
|
88 |
+
seq = OrderedDict()
|
89 |
+
super(ConfigDict, self).__init__(seq, **kwargs)
|
90 |
+
self.nested(self)
|
91 |
+
|
92 |
+
def nested(self, seq: dict):
|
93 |
+
"""Nested recursive processing dict.
|
94 |
+
|
95 |
+
Args:
|
96 |
+
seq: Python original format dict
|
97 |
+
"""
|
98 |
+
for k, v in seq.items():
|
99 |
+
if isinstance(v, dict):
|
100 |
+
seq[k] = ConfigDict(v)
|
101 |
+
self.nested(v)
|
102 |
+
|
103 |
+
|
104 |
+
class BaseConfig(BaseModel):
|
105 |
+
def config_dict(self) -> ConfigDict:
|
106 |
+
return ConfigDict(self.model_dump())
|
107 |
+
|
108 |
+
|
109 |
+
class ModelConfig(BaseConfig):
|
110 |
+
llm_provider: str = None
|
111 |
+
llm_model_name: str = None
|
112 |
+
llm_temperature: float = 1.
|
113 |
+
llm_base_url: str = None
|
114 |
+
llm_api_key: str = None
|
115 |
+
llm_client_type: ClientType = ClientType.SDK
|
116 |
+
llm_sync_enabled: bool = True
|
117 |
+
llm_async_enabled: bool = True
|
118 |
+
max_retries: int = 3
|
119 |
+
max_model_len: Optional[int] = None # Maximum model context length
|
120 |
+
model_type: Optional[str] = 'qwen' # Model type determines tokenizer and maximum length
|
121 |
+
|
122 |
+
def __init__(self, **kwargs):
|
123 |
+
super().__init__(**kwargs)
|
124 |
+
for key, value in kwargs.items():
|
125 |
+
if hasattr(self, key):
|
126 |
+
setattr(self, key, value)
|
127 |
+
|
128 |
+
# init max_model_len
|
129 |
+
if not hasattr(self, 'max_model_len') or self.max_model_len is None:
|
130 |
+
# qwen or other default model_type
|
131 |
+
self.max_model_len = 128000
|
132 |
+
if hasattr(self, 'model_type') and self.model_type == 'claude':
|
133 |
+
self.max_model_len = 200000
|
134 |
+
|
135 |
+
class LlmCompressionConfig(BaseConfig):
|
136 |
+
enabled: bool = False
|
137 |
+
compress_type: str = 'llm' # llm, llmlingua
|
138 |
+
trigger_compress_token_length: int = 10000 # Trigger compression when exceeding this length
|
139 |
+
compress_model: ModelConfig = None
|
140 |
+
|
141 |
+
class OptimizationConfig(BaseConfig):
|
142 |
+
enabled: bool = False
|
143 |
+
max_token_budget_ratio: float = 0.5 # Maximum context length ratio
|
144 |
+
|
145 |
+
class ContextRuleConfig(BaseConfig):
|
146 |
+
"""Context interference rule configuration"""
|
147 |
+
|
148 |
+
# ===== Performance optimization configuration =====
|
149 |
+
optimization_config: OptimizationConfig = OptimizationConfig()
|
150 |
+
|
151 |
+
# ===== LLM conversation compression configuration =====
|
152 |
+
llm_compression_config: LlmCompressionConfig = LlmCompressionConfig()
|
153 |
+
|
154 |
+
class AgentConfig(BaseConfig):
|
155 |
+
name: str = None
|
156 |
+
desc: str = None
|
157 |
+
llm_config: ModelConfig = ModelConfig()
|
158 |
+
# for compatibility
|
159 |
+
llm_provider: str = None
|
160 |
+
llm_model_name: str = None
|
161 |
+
llm_temperature: float = 1.
|
162 |
+
llm_base_url: str = None
|
163 |
+
llm_api_key: str = None
|
164 |
+
llm_client_type: ClientType = ClientType.SDK
|
165 |
+
llm_sync_enabled: bool = True
|
166 |
+
llm_async_enabled: bool = True
|
167 |
+
max_retries: int = 3
|
168 |
+
max_model_len: Optional[int] = None # Maximum model context length
|
169 |
+
model_type: Optional[str] = 'qwen' # Model type determines tokenizer and maximum length
|
170 |
+
|
171 |
+
# default reset init in first
|
172 |
+
need_reset: bool = True
|
173 |
+
# use vision model
|
174 |
+
use_vision: bool = True
|
175 |
+
max_steps: int = 10
|
176 |
+
max_input_tokens: int = 128000
|
177 |
+
max_actions_per_step: int = 10
|
178 |
+
system_prompt: Optional[str] = None
|
179 |
+
agent_prompt: Optional[str] = None
|
180 |
+
working_dir: Optional[str] = None
|
181 |
+
enable_recording: bool = False
|
182 |
+
use_tools_in_prompt: bool = False
|
183 |
+
exit_on_failure: bool = False
|
184 |
+
ext: dict = {}
|
185 |
+
human_tools: List[str] = []
|
186 |
+
|
187 |
+
# context rule
|
188 |
+
context_rule: ContextRuleConfig = ContextRuleConfig()
|
189 |
+
|
190 |
+
def __init__(self, **kwargs):
|
191 |
+
super().__init__(**kwargs)
|
192 |
+
|
193 |
+
# Apply all provided kwargs to the instance
|
194 |
+
for key, value in kwargs.items():
|
195 |
+
if hasattr(self, key):
|
196 |
+
setattr(self, key, value)
|
197 |
+
|
198 |
+
# Synchronize model configuration between AgentConfig and llm_config
|
199 |
+
self._sync_model_config()
|
200 |
+
|
201 |
+
# Initialize max_model_len if not set
|
202 |
+
if not hasattr(self, 'max_model_len') or self.max_model_len is None:
|
203 |
+
# Default to qwen or other model_type
|
204 |
+
self.max_model_len = 128000
|
205 |
+
if hasattr(self, 'model_type') and self.model_type == 'claude':
|
206 |
+
self.max_model_len = 200000
|
207 |
+
|
208 |
+
def _sync_model_config(self):
|
209 |
+
"""Synchronize model configuration between AgentConfig and llm_config"""
|
210 |
+
# Ensure llm_config is initialized
|
211 |
+
if self.llm_config is None:
|
212 |
+
self.llm_config = ModelConfig()
|
213 |
+
|
214 |
+
# Dynamically get all field names from ModelConfig
|
215 |
+
model_fields = list(ModelConfig.model_fields.keys())
|
216 |
+
|
217 |
+
# Filter to only include fields that exist in current AgentConfig
|
218 |
+
agent_fields = set(self.model_fields.keys())
|
219 |
+
filtered_model_fields = [field for field in model_fields if field in agent_fields]
|
220 |
+
|
221 |
+
# Check which configuration has llm_model_name set
|
222 |
+
agent_has_model_name = getattr(self, 'llm_model_name', None) is not None
|
223 |
+
llm_config_has_model_name = getattr(self.llm_config, 'llm_model_name', None) is not None
|
224 |
+
|
225 |
+
if agent_has_model_name:
|
226 |
+
# If AgentConfig has llm_model_name, sync all fields from AgentConfig to llm_config
|
227 |
+
for field in filtered_model_fields:
|
228 |
+
agent_value = getattr(self, field, None)
|
229 |
+
if agent_value is not None:
|
230 |
+
setattr(self.llm_config, field, agent_value)
|
231 |
+
elif llm_config_has_model_name:
|
232 |
+
# If llm_config has llm_model_name, sync all fields from llm_config to AgentConfig
|
233 |
+
for field in filtered_model_fields:
|
234 |
+
llm_config_value = getattr(self.llm_config, field, None)
|
235 |
+
if llm_config_value is not None:
|
236 |
+
setattr(self, field, llm_config_value)
|
237 |
+
|
238 |
+
class TaskConfig(BaseConfig):
|
239 |
+
task_id: str = str(uuid.uuid4())
|
240 |
+
task_name: str | None = None
|
241 |
+
max_steps: int = 100
|
242 |
+
max_actions_per_step: int = 10
|
243 |
+
stream: bool = False
|
244 |
+
exit_on_failure: bool = False
|
245 |
+
ext: dict = {}
|
246 |
+
|
247 |
+
|
248 |
+
class ToolConfig(BaseConfig):
|
249 |
+
name: str = None
|
250 |
+
custom_executor: bool = False
|
251 |
+
enable_recording: bool = False
|
252 |
+
working_dir: str = ""
|
253 |
+
max_retry: int = 3
|
254 |
+
llm_config: ModelConfig = None
|
255 |
+
reuse: bool = False
|
256 |
+
use_async: bool = False
|
257 |
+
exit_on_failure: bool = False
|
258 |
+
ext: dict = {}
|
259 |
+
|
260 |
+
|
261 |
+
class RunConfig(BaseConfig):
|
262 |
+
name: str = 'local'
|
263 |
+
worker_num: int = 1
|
264 |
+
reuse_process: bool = True
|
265 |
+
cls: Optional[str] = None
|
266 |
+
event_bus: Optional[Dict[str, Any]] = None
|
267 |
+
tracer: Optional[Dict[str, Any]] = None
|
268 |
+
replay_buffer: Optional[Dict[str, Any]] = None
|
269 |
+
|
270 |
+
|
271 |
+
class EvaluationConfig(BaseConfig):
|
272 |
+
work_dir: Optional[str] = None
|
273 |
+
run_times: int = 1
|
aworld/config/document_analysis_tool.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
custom_executor: False
|
2 |
+
enable_recording: False
|
3 |
+
working_dir:
|
4 |
+
max_retry: 3
|
aworld/config/human_confirm_tool.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
custom_executor: False
|
2 |
+
enable_recording: False
|
3 |
+
working_dir:
|
4 |
+
max_retry: 3
|
aworld/config/mcp.json
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"mcpServers": {
|
3 |
+
"amap-amap-sse": {
|
4 |
+
"url": "https://mcp.amap.com/sse?key=YOUR_API_KEY",
|
5 |
+
"timeout": 5.0,
|
6 |
+
"sse_read_timeout": 300.0
|
7 |
+
},
|
8 |
+
"tavily-mcp": {
|
9 |
+
"command": "npx",
|
10 |
+
"args": ["-y", "[email protected]"],
|
11 |
+
"env": {
|
12 |
+
"TAVILY_API_KEY": "YOUR_API_KEY"
|
13 |
+
}
|
14 |
+
}
|
15 |
+
}
|
16 |
+
}
|
aworld/config/mcp_tool.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
working_dir:
|
2 |
+
enable_recording: False
|
3 |
+
max_retry: 3
|
aworld/config/openai_gym_tool.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
env_id: "CartPole-v1"
|
2 |
+
render_mode: "human"
|
3 |
+
render: True
|
aworld/config/python_execute_tool.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
custom_executor: False
|
2 |
+
enable_recording: False
|
3 |
+
working_dir:
|
4 |
+
max_retry: 3
|
5 |
+
|
aworld/config/qwen.tiktoken
ADDED
The diff for this file is too large to render.
See raw diff
|
|
aworld/config/search_api_tool.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
custom_executor: False
|
2 |
+
enable_recording: False
|
3 |
+
working_dir:
|
4 |
+
max_retry: 3
|
5 |
+
|
aworld/config/shell_tool.yaml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
custom_executor: False
|
2 |
+
env:
|
3 |
+
TEST_ENV: test
|
4 |
+
enable_recording: False
|
5 |
+
working_dir: "/tmp"
|
6 |
+
max_retry: 3
|