Spaces:
Sleeping
Sleeping
File size: 6,268 Bytes
5fa4369 05b8101 10e9b7d 61c2ff2 4097d7c 6a52f23 edbeeac c27f94c 61c2ff2 edbeeac 1381703 edbeeac 0b67c77 edbeeac c27f94c 3635d36 abf0257 8fd0023 c8ff0c4 84f178b 04dd10e 7d83bde 04dd10e 0b67c77 46eabca 04dd10e edbeeac 886a81f c8ff0c4 edbeeac c8ff0c4 04dd10e edbeeac c8ff0c4 edbeeac c8ff0c4 c3bd339 edbeeac 7cfb3a2 edbeeac 46eabca a54e373 edbeeac 6a52f23 edbeeac 886a81f edbeeac 7cfb3a2 edbeeac 7cfb3a2 060e212 7cfb3a2 0b67c77 7cfb3a2 84f178b 61c2ff2 84f178b 4856d2b 7cfb3a2 61c2ff2 6a52f23 7cfb3a2 6a52f23 7cfb3a2 bc758d9 7cfb3a2 84f178b 7cfb3a2 ef65c0f 7cfb3a2 6a52f23 886a81f edbeeac 6a52f23 edbeeac 6a52f23 edbeeac 84f178b edbeeac 84f178b 61c2ff2 7cfb3a2 84f178b 61c2ff2 c27f94c 84f178b c27f94c 9e16e60 7cfb3a2 84f178b 7cfb3a2 9e16e60 84f178b 9e16e60 84f178b 9ccf47b edbeeac 9e16e60 46eabca c27f94c 84f178b 46eabca 7cfb3a2 61c2ff2 84f178b 9e16e60 7cfb3a2 9e16e60 a11972f 7cfb3a2 61c2ff2 6a52f23 cfef47f c27f94c 9edd6bb c8ff0c4 edbeeac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 |
import os
import gradio as gr
import requests
import pandas as pd
import google.generativeai as genai
from smolagents import CodeAgent, DuckDuckGoSearchTool
from smolagents.model.base import ModelOutput # import ModelOutput if available
# System prompt used by the agent
SYSTEM_PROMPT = """You are a general AI assistant. I will ask you a question.
Report your thoughts, and finish your answer with just the answer — no prefixes like "FINAL ANSWER:".
Your answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.
If you're asked for a number, don’t use commas or units like $ or %, unless specified.
If you're asked for a string, don’t use articles or abbreviations (e.g. for cities), and write digits in plain text unless told otherwise."""
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# Gemini model wrapper
class GeminiFlashModel:
def __init__(self, model_id="gemini-1.5-flash", api_key=None):
genai.configure(api_key=api_key or os.getenv("GEMINI_API_KEY"))
self.model = genai.GenerativeModel(model_id)
self.system_prompt = SYSTEM_PROMPT
def generate(self, messages, stop_sequences=None, **kwargs):
if not isinstance(messages, list) or not all(isinstance(m, dict) for m in messages):
raise TypeError("Expected 'messages' to be a list of dicts")
if not any(m.get("role") == "system" for m in messages):
messages = [{"role": "system", "content": self.system_prompt}] + messages
prompt = ""
for m in messages:
role = m["role"].capitalize()
content = m["content"]
prompt += f"{role}: {content}\n"
try:
response = self.model.generate_content(prompt)
return ModelOutput(
content=response.text.strip(),
input_tokens=0,
output_tokens=0,
token_usage={}
)
except Exception as e:
return ModelOutput(
content=f"GENERATION ERROR: {e}",
input_tokens=0,
output_tokens=0,
token_usage={}
)
# Agent wrapper
class MyAgent:
def __init__(self):
self.model = GeminiFlashModel(model_id="gemini-1.5-flash")
self.agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=self.model)
def __call__(self, question: str) -> str:
result = self.agent.run(question)
print(f"[DEBUG] Agent run result type: {type(result)}; value: {result}")
# Return string content only
if hasattr(result, "content"):
return result.content
elif isinstance(result, dict):
return result.get("content", str(result))
else:
return str(result)
# Main evaluation function
def run_and_submit_all(profile: gr.OAuthProfile | None):
print("Starting run_and_submit_all...")
space_id = os.getenv("SPACE_ID")
if profile:
username = profile.username
print(f"User logged in: {username}")
else:
print("User not logged in.")
return "Please login to Hugging Face.", None
questions_url = f"{DEFAULT_API_URL}/questions"
submit_url = f"{DEFAULT_API_URL}/submit"
try:
agent = MyAgent()
except Exception as e:
return f"Error initializing agent: {e}", None
try:
response = requests.get(questions_url, timeout=15)
response.raise_for_status()
questions_data = response.json()
except Exception as e:
return f"Error fetching questions: {e}", None
results_log = []
answers_payload = []
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
continue
print(f"Running agent on question: {question_text}")
try:
submitted_answer = agent(question_text)
print(f"Agent answer: {submitted_answer} (type: {type(submitted_answer)})")
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
error_msg = f"AGENT ERROR: {e}"
print(error_msg)
results_log.append({
"Task ID": task_id,
"Question": question_text,
"Submitted Answer": error_msg
})
if not answers_payload:
return "Agent did not return any answers.", pd.DataFrame(results_log)
submission_data = {
"username": profile.username.strip(),
"agent_code": f"https://huggingface.co/spaces/{space_id}/tree/main",
"answers": answers_payload
}
try:
response = requests.post(submit_url, json=submission_data, timeout=60)
response.raise_for_status()
result_data = response.json()
final_status = (
f"Submission Successful!\n"
f"User: {result_data.get('username')}\n"
f"Score: {result_data.get('score', 'N/A')}% "
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
f"Message: {result_data.get('message', 'No message received.')}"
)
return final_status, pd.DataFrame(results_log)
except Exception as e:
return f"Submission failed: {e}", pd.DataFrame(results_log)
# Gradio UI setup
with gr.Blocks() as demo:
gr.Markdown("# Basic Agent Evaluation Runner")
gr.Markdown("""
**Instructions:**
1. Clone this space and configure your Gemini API key.
2. Log in to Hugging Face.
3. Run your agent on evaluation tasks and submit answers.
""")
gr.LoginButton()
run_button = gr.Button("Run Evaluation & Submit All Answers")
status_output = gr.Textbox(label="Submission Result", lines=5, interactive=False)
results_table = gr.DataFrame(label="Results", wrap=True)
run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
if __name__ == "__main__":
print("🔧 App starting...")
demo.launch(debug=True, share=False)
|