EtienneB
major overhaul
5da0168
raw
history blame
2.24 kB
def clean_answer(answer: any) -> str:
"""
Clean up the answer to remove common prefixes and formatting
that models often add but that can cause exact match failures.
Args:
answer: The raw answer from the model
Returns:
The cleaned answer as a string
"""
# Convert non-string types to strings
if not isinstance(answer, str):
# Handle numeric types (float, int)
if isinstance(answer, float):
# Format floating point numbers properly
# Check if it's an integer value in float form (e.g., 12.0)
if answer.is_integer():
formatted_answer = str(int(answer))
else:
# For currency values that might need formatting
if abs(answer) >= 1000:
formatted_answer = f"${answer:,.2f}"
else:
formatted_answer = str(answer)
return formatted_answer
elif isinstance(answer, int):
return str(answer)
else:
# For any other type
return str(answer)
# Now we know answer is a string, so we can safely use string methods
# Normalize whitespace
answer = answer.strip()
# Remove common prefixes and formatting that models add
prefixes_to_remove = [
"The answer is ",
"Answer: ",
"Final answer: ",
"The result is ",
"To answer this question: ",
"Based on the information provided, ",
"According to the information: ",
]
for prefix in prefixes_to_remove:
if answer.startswith(prefix):
answer = answer[len(prefix):].strip()
# Remove quotes if they wrap the entire answer
if (answer.startswith('"') and answer.endswith('"')) or (answer.startswith("'") and answer.endswith("'")):
answer = answer[1:-1].strip()
return answer
# Clean the answer
clean_text = clean_answer(llm_response.content)