File size: 5,905 Bytes
5fa4369
05b8101
10e9b7d
61c2ff2
4097d7c
6a52f23
7cfb3a2
c27f94c
61c2ff2
cc5313c
1381703
c3bd339
0b67c77
c27f94c
 
 
 
3635d36
abf0257
8fd0023
cc5313c
 
 
 
 
 
 
 
badada6
 
84f178b
04dd10e
7d83bde
04dd10e
 
0b67c77
46eabca
04dd10e
 
 
 
 
 
 
 
 
 
 
 
 
 
c3bd339
 
 
 
 
04dd10e
c3bd339
 
 
 
 
 
 
7cfb3a2
 
7de7b09
46eabca
a54e373
7cfb3a2
46eabca
6a52f23
c3bd339
7cfb3a2
 
060e212
7cfb3a2
0b67c77
7cfb3a2
 
 
84f178b
61c2ff2
84f178b
 
4856d2b
7cfb3a2
 
 
 
61c2ff2
6a52f23
7cfb3a2
 
 
6a52f23
7cfb3a2
bc758d9
7cfb3a2
 
84f178b
7cfb3a2
ef65c0f
7cfb3a2
 
6a52f23
 
0b67c77
7cfb3a2
4856d2b
6a52f23
84f178b
 
 
 
 
61c2ff2
7cfb3a2
84f178b
61c2ff2
c27f94c
84f178b
 
c27f94c
 
 
9e16e60
7cfb3a2
 
 
 
 
 
84f178b
7cfb3a2
 
9e16e60
84f178b
9e16e60
84f178b
9ccf47b
c3bd339
9e16e60
 
46eabca
c27f94c
84f178b
 
 
46eabca
7cfb3a2
 
61c2ff2
84f178b
 
9e16e60
7cfb3a2
9e16e60
 
84f178b
7cfb3a2
61c2ff2
6a52f23
cfef47f
c27f94c
9edd6bb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167


import os
import gradio as gr
import requests
import pandas as pd

import google.generativeai as genai
from smolagents import CodeAgent, DuckDuckGoSearchTool
from types import SimpleNamespace

# System prompt used by the agent
SYSTEM_PROMPT = """You are a general AI assistant. I will ask you a question.
Report your thoughts, and finish your answer with just the answer — no prefixes like "FINAL ANSWER:".
Your answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.
If you're asked for a number, don’t use commas or units like $ or %, unless specified.
If you're asked for a string, don’t use articles or abbreviations (e.g. for cities), and write digits in plain text unless told otherwise."""

DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

# Gemini-compatible result wrapper that mimics smolagents' expected structure
def GenerationResult(content, token_usage=None, input_tokens=0, output_tokens=0):
    return SimpleNamespace(
        content=content,
        token_usage=token_usage or {},
        input_tokens=input_tokens,
        output_tokens=output_tokens
    )

# Gemini model wrapper
class GeminiFlashModel:
    def __init__(self, model_id="gemini-1.5-flash", api_key=None):
        genai.configure(api_key=api_key or os.getenv("GEMINI_API_KEY"))
        self.model = genai.GenerativeModel(model_id)
        self.system_prompt = SYSTEM_PROMPT

    def generate(self, messages, stop_sequences=None, **kwargs):
        if not isinstance(messages, list) or not all(isinstance(m, dict) for m in messages):
            raise TypeError("Expected 'messages' to be a list of dicts")

        if not any(m.get("role") == "system" for m in messages):
            messages = [{"role": "system", "content": self.system_prompt}] + messages

        prompt = ""
        for m in messages:
            role = m["role"].capitalize()
            content = m["content"]
            prompt += f"{role}: {content}\n"

        try:
            response = self.model.generate_content(prompt)
            return GenerationResult(
                content=response.text.strip(),
                input_tokens=0,
                output_tokens=0
            )
        except Exception as e:
            return GenerationResult(
                content=f"GENERATION ERROR: {e}",
                input_tokens=0,
                output_tokens=0
            )

# Agent wrapper
class MyAgent:
    def __init__(self):
        self.model = GeminiFlashModel(model_id="gemini-1.5-flash")
        self.agent = CodeAgent(tools=[DuckDuckGoSearchTool()], model=self.model)

    def __call__(self, question: str) -> str:
        return self.agent.run(question)

# Main evaluation function
def run_and_submit_all(profile: gr.OAuthProfile | None):
    space_id = os.getenv("SPACE_ID")

    if profile:
        username = profile.username
        print(f"User logged in: {username}")
    else:
        print("User not logged in.")
        return "Please login to Hugging Face.", None

    questions_url = f"{DEFAULT_API_URL}/questions"
    submit_url = f"{DEFAULT_API_URL}/submit"

    try:
        agent = MyAgent()
    except Exception as e:
        return f"Error initializing agent: {e}", None

    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
    except Exception as e:
        return f"Error fetching questions: {e}", None

    results_log = []
    answers_payload = []

    for item in questions_data:
        task_id = item.get("task_id")
        question_text = item.get("question")
        if not task_id or question_text is None:
            continue
        try:
            submitted_answer = agent(question_text)
            answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
            results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
        except Exception as e:
            results_log.append({
                "Task ID": task_id,
                "Question": question_text,
                "Submitted Answer": f"AGENT ERROR: {e}"
            })

    if not answers_payload:
        return "Agent did not return any answers.", pd.DataFrame(results_log)

    submission_data = {
        "username": profile.username.strip(),
        "agent_code": f"https://huggingface.co/spaces/{space_id}/tree/main",
        "answers": answers_payload
    }

    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        return final_status, pd.DataFrame(results_log)
    except Exception as e:
        return f"Submission failed: {e}", pd.DataFrame(results_log)

# Gradio UI setup
with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")
    gr.Markdown("""
    **Instructions:**
    1. Clone this space and configure your Gemini API key.
    2. Log in to Hugging Face.
    3. Run your agent on evaluation tasks and submit answers.
    """)

    gr.LoginButton()
    run_button = gr.Button("Run Evaluation & Submit All Answers")
    status_output = gr.Textbox(label="Submission Result", lines=5, interactive=False)
    results_table = gr.DataFrame(label="Results", wrap=True)

    run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])

if __name__ == "__main__":
    print("🔧 App starting...")
    demo.launch(debug=True, share=False)