Spaces:
Running
Running
Fixed the display of actions to appear as strings
Browse filesused 'env.state.action_to_string' to display the action string as opposed to only the number
ui/gradio_config_generator.py
CHANGED
@@ -109,17 +109,17 @@ def _create_agent_config(player_type: str,
|
|
109 |
print("π§ AGENT CONFIG DEBUG: Creating agent config for:")
|
110 |
print(f" player_type: {player_type}")
|
111 |
print(f" model: {model}")
|
112 |
-
|
113 |
# Handle Gradio-specific formats
|
114 |
if player_type == "random_bot":
|
115 |
config = {"type": "random"}
|
116 |
elif player_type.startswith("hf_"):
|
117 |
# Extract model from player type (e.g., "hf_gpt2" -> "gpt2")
|
118 |
model_from_type = player_type[3:] # Remove "hf_" prefix
|
119 |
-
|
120 |
# Use the hf_prefixed model name for LLM registry lookup
|
121 |
model_name = f"hf_{model_from_type}"
|
122 |
-
|
123 |
config = {
|
124 |
"type": "llm", # Use standard LLM agent type
|
125 |
"model": model_name # This will be looked up in LLM_REGISTRY
|
@@ -127,13 +127,13 @@ def _create_agent_config(player_type: str,
|
|
127 |
elif player_type.startswith("llm_"):
|
128 |
# For backwards compatibility with LiteLLM models
|
129 |
model_from_type = player_type[4:] # Remove "llm_" prefix
|
130 |
-
|
131 |
# Map display model names to actual model names with prefixes
|
132 |
model_name = model or model_from_type
|
133 |
if not model_name.startswith(("litellm_", "vllm_")):
|
134 |
# Add litellm_ prefix for LiteLLM models
|
135 |
model_name = f"litellm_{model_name}"
|
136 |
-
|
137 |
config = {
|
138 |
"type": "llm",
|
139 |
"model": model_name
|
@@ -153,7 +153,7 @@ def _create_agent_config(player_type: str,
|
|
153 |
else:
|
154 |
# Default to random for unknown types
|
155 |
config = {"type": "random"}
|
156 |
-
|
157 |
print(f" β Created config: {config}")
|
158 |
return config
|
159 |
|
@@ -367,7 +367,12 @@ def _compute_actions_for_gradio(env, player_to_agent, observations, game_log):
|
|
367 |
action, reasoning = _extract_action_and_reasoning(agent_response)
|
368 |
actions[player] = action
|
369 |
|
370 |
-
|
|
|
|
|
|
|
|
|
|
|
371 |
if reasoning and reasoning != "None":
|
372 |
reasoning_preview = reasoning[:100] + ("..." if len(reasoning) > 100 else "")
|
373 |
game_log.append(f" Reasoning: {reasoning_preview}")
|
|
|
109 |
print("π§ AGENT CONFIG DEBUG: Creating agent config for:")
|
110 |
print(f" player_type: {player_type}")
|
111 |
print(f" model: {model}")
|
112 |
+
|
113 |
# Handle Gradio-specific formats
|
114 |
if player_type == "random_bot":
|
115 |
config = {"type": "random"}
|
116 |
elif player_type.startswith("hf_"):
|
117 |
# Extract model from player type (e.g., "hf_gpt2" -> "gpt2")
|
118 |
model_from_type = player_type[3:] # Remove "hf_" prefix
|
119 |
+
|
120 |
# Use the hf_prefixed model name for LLM registry lookup
|
121 |
model_name = f"hf_{model_from_type}"
|
122 |
+
|
123 |
config = {
|
124 |
"type": "llm", # Use standard LLM agent type
|
125 |
"model": model_name # This will be looked up in LLM_REGISTRY
|
|
|
127 |
elif player_type.startswith("llm_"):
|
128 |
# For backwards compatibility with LiteLLM models
|
129 |
model_from_type = player_type[4:] # Remove "llm_" prefix
|
130 |
+
|
131 |
# Map display model names to actual model names with prefixes
|
132 |
model_name = model or model_from_type
|
133 |
if not model_name.startswith(("litellm_", "vllm_")):
|
134 |
# Add litellm_ prefix for LiteLLM models
|
135 |
model_name = f"litellm_{model_name}"
|
136 |
+
|
137 |
config = {
|
138 |
"type": "llm",
|
139 |
"model": model_name
|
|
|
153 |
else:
|
154 |
# Default to random for unknown types
|
155 |
config = {"type": "random"}
|
156 |
+
|
157 |
print(f" β Created config: {config}")
|
158 |
return config
|
159 |
|
|
|
367 |
action, reasoning = _extract_action_and_reasoning(agent_response)
|
368 |
actions[player] = action
|
369 |
|
370 |
+
# Always show both action number and action name (universal solution)
|
371 |
+
try:
|
372 |
+
action_name = env.state.action_to_string(player, action)
|
373 |
+
except Exception:
|
374 |
+
action_name = str(action)
|
375 |
+
game_log.append(f" Player {player} chooses action {action} ({action_name})")
|
376 |
if reasoning and reasoning != "None":
|
377 |
reasoning_preview = reasoning[:100] + ("..." if len(reasoning) > 100 else "")
|
378 |
game_log.append(f" Reasoning: {reasoning_preview}")
|