HF_Agents_Final_Project / tests /test_prompt_formatting.py
Yago Bolivar
feat: add tests for chess position analysis and prompt formatting scenarios
9bdf620
import yaml
import os
import sys
from smolagents import CodeAgent, DummyModel
# Load your updated prompts.yaml
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
# Create a simple dummy model that will help us test the formatting
class TestFormattingModel(DummyModel):
def __init__(self):
super().__init__()
def __call__(self, prompt, **kwargs):
# Print the prompt for inspection
print("="*50)
print("PROMPT:")
print("="*50)
print(prompt)
print("="*50)
# Return a response that simulates different scenarios
scenario = kwargs.get("scenario", "normal")
if scenario == "normal":
return {
"choices": [{
"message": {
"content": """Thought: I'll solve this task step by step.
```py
print("Starting to solve the task")
result = 2 + 2
print(f"The result is {result}")
```<end_code>"""
}
}]
}
elif scenario == "error":
return {
"choices": [{
"message": {
"content": """Thought: I encountered an error.
```py
print("An error occurred: file not found")
```<end_code>"""
}
}]
}
elif scenario == "chess":
return {
"choices": [{
"message": {
"content": """Thought: I need more information about the chess position.
```py
print("I need to see the chess image to analyze the position. Please provide the image.")
```<end_code>"""
}
}]
}
return {"choices": [{"message": {"content": "Test failed"}}]}
# Create a minimal agent to test your prompts
def test_scenario(scenario_name):
print(f"\nTesting scenario: {scenario_name}")
model = TestFormattingModel()
# Create a minimal version of your tools for testing
class DummyFinalAnswerTool:
def __init__(self):
self.name = "final_answer"
self.description = "Use this to provide the final answer"
self.inputs = {"answer": "string"}
self.output_type = "string"
def __call__(self, answer):
return f"Final answer submitted: {answer}"
# Create agent with your updated prompts
agent = CodeAgent(
model=model,
tools=[DummyFinalAnswerTool()],
max_steps=2,
verbosity_level=1,
name="TestAgent",
description="Testing prompt formatting",
prompt_templates=prompt_templates
)
# Test with a simple task
result = agent(f"This is a test task for the {scenario_name} scenario.", scenario=scenario_name)
print(f"Result: {result}")
print("-"*50)
return result
if __name__ == "__main__":
print("Testing prompt formatting with different scenarios\n")
# Test normal scenario
test_scenario("normal")
# Test error scenario
test_scenario("error")
# Test chess scenario
test_scenario("chess")