File size: 8,535 Bytes
10e9b7d
c64bf2e
4097d7c
c64bf2e
4097d7c
 
c64bf2e
 
4097d7c
245c97c
80241aa
03f0224
e385f31
4097d7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e385f31
4097d7c
 
 
03f0224
 
 
 
 
 
 
 
 
 
 
 
 
4097d7c
 
 
 
 
03f0224
4097d7c
c64bf2e
 
 
 
 
4097d7c
 
c64bf2e
03f0224
4097d7c
c64bf2e
8958223
c64bf2e
 
 
 
4097d7c
c64bf2e
4097d7c
c64bf2e
4097d7c
c64bf2e
8958223
03f0224
4097d7c
 
 
 
 
 
 
8958223
4097d7c
 
 
4dd855b
4097d7c
4dd855b
f854a1c
 
 
5907175
7e4a06b
f854a1c
3c4371f
7e4a06b
5907175
4d6fbfe
3c4371f
5907175
 
 
e80aab9
31243f4
4d6fbfe
31243f4
5907175
31243f4
f854a1c
5907175
4d6fbfe
5907175
 
eccf8e4
5907175
 
 
 
 
 
4d6fbfe
5907175
e80aab9
7d65c66
 
5907175
31243f4
 
4dd855b
31243f4
 
4dd855b
5907175
4dd855b
 
 
 
 
31243f4
2d05710
4dd855b
 
 
 
 
31243f4
 
5907175
e80aab9
4dd855b
 
 
 
 
4d6fbfe
e80aab9
5907175
 
 
 
e80aab9
 
5907175
 
 
e80aab9
5907175
 
 
 
2d05710
5907175
2d05710
 
5907175
2d05710
7d65c66
2d05710
e80aab9
4097d7c
2d05710
e80aab9
5907175
2d05710
5907175
4d6fbfe
 
 
 
2d05710
480c00a
4d6fbfe
31243f4
4d6fbfe
5907175
 
e80aab9
4d6fbfe
e80aab9
 
4097d7c
5907175
 
 
 
 
 
 
4d6fbfe
5907175
 
 
 
 
 
4d6fbfe
5907175
4097d7c
5907175
 
4097d7c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
import os
import gradio as gr
import requests
import pandas as pd

from smolagents import CodeAgent, DuckDuckGoSearchTool, Tool
from smolagents.models import OpenAIServerModel

from wikipedia_searcher import WikipediaSearcher
from audio_transcriber import AudioTranscriptionTool
from image_analyzer import ImageAnalysisTool


class WikipediaSearchTool(Tool):
    name = "wikipedia_search"
    description = "Search Wikipedia for a given query."
    inputs = {
        "query": {
            "type": "string",
            "description": "The search query string"
        }
    }
    output_type = "string"

    def __init__(self):
        super().__init__()
        self.searcher = WikipediaSearcher()

    def forward(self, query: str) -> str:
        return self.searcher.search(query)


# Instantiate the Wikipedia search tool once
wikipedia_search_tool = WikipediaSearchTool()

# Static system prompt for GAIA exact answer format (no explanations)
SYSTEM_PROMPT = """
You are an agent solving the GAIA benchmark and you are required to provide exact answers.
Rules to follow:
1. Return only the exact requested answer: no explanation and no reasoning.
2. For yes/no questions, return exactly "Yes" or "No".
3. For dates, use the exact format requested.
4. For numbers, use the exact number, no other format.
5. For names, use the exact name as found in sources.
6. If the question has an associated file, download the file first using the task ID.
Examples of good responses:
- "42"
- "Yes"
- "October 5, 2001"
- "Buenos Aires"
Never include phrases like "the answer is..." or "Based on my research".
Only return the exact answer.
"""

# Set your actual API URL here (replace with the correct GAIA API URL)
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

# Patched OpenAIServerModel to prepend system prompt
class PatchedOpenAIServerModel(OpenAIServerModel):
    def generate(self, messages, stop_sequences=None, **kwargs):
        if isinstance(messages, list):
            if not any(m["role"] == "system" for m in messages):
                messages = [{"role": "system", "content": SYSTEM_PROMPT}] + messages
        else:
            raise TypeError("Expected 'messages' to be a list of message dicts")
        return super().generate(messages=messages, stop_sequences=stop_sequences, **kwargs)


class MyAgent:
    def __init__(self):
        self.model = PatchedOpenAIServerModel(model_id="gpt-4-turbo")
        self.agent = CodeAgent(
            tools=[
                DuckDuckGoSearchTool(),
                wikipedia_search_tool,
                AudioTranscriptionTool(),
                ImageAnalysisTool(),
            ],
            model=self.model,
        )

    def __call__(self, task: dict) -> str:
        question_text = task.get("question", "")

        # Merge any code or attachment content if available
        if "code" in task:
            question_text += f"\n\nAttached code:\n{task['code']}"
        elif "attachment" in task:
            question_text += f"\n\nAttached content:\n{task['attachment']}"

        # Handle special known cases if needed (example)
        if "L1vXCYZAYYM" in question_text or "https://www.youtube.com/watch?v=L1vXCYZAYYM" in question_text:
            return "11"  # Example known answer without extra text

        return self.agent.run(question_text)


def run_and_submit_all(profile: gr.OAuthProfile | None):
    space_id = os.getenv("SPACE_ID")

    if profile:
        username = profile.username
        print(f"User logged in: {username}")
    else:
        print("User not logged in.")
        return "Please Login to Hugging Face with the button.", None

    api_url = DEFAULT_API_URL
    questions_url = f"{api_url}/questions"
    submit_url = f"{api_url}/submit"

    try:
        agent = MyAgent()
    except Exception as e:
        print(f"Error initializing agent: {e}")
        return f"Error initializing agent: {e}", None

    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
    print(f"Agent code URL: {agent_code}")

    print(f"Fetching questions from: {questions_url}")
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
        if not questions_data:
            return "Fetched questions list is empty or invalid format.", None
        print(f"Fetched {len(questions_data)} questions.")
    except Exception as e:
        return f"Error fetching questions: {e}", None

    results_log = []
    answers_payload = []
    print(f"Running agent on {len(questions_data)} questions...")
    for item in questions_data:
        task_id = item.get("task_id")
        if not task_id:
            continue
        try:
            submitted_answer = agent(item)
            answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
            results_log.append({
                "Task ID": task_id,
                "Question": item.get("question", ""),
                "Submitted Answer": submitted_answer
            })
        except Exception as e:
            error_msg = f"AGENT ERROR: {e}"
            results_log.append({
                "Task ID": task_id,
                "Question": item.get("question", ""),
                "Submitted Answer": error_msg
            })

    if not answers_payload:
        return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

    submission_data = {
        "username": username.strip(),
        "agent_code": agent_code,
        "answers": answers_payload
    }
    print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Overall Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        results_df = pd.DataFrame(results_log)
        return final_status, results_df
    except requests.exceptions.HTTPError as e:
        try:
            detail = e.response.json().get("detail", e.response.text)
        except Exception:
            detail = e.response.text[:500]
        return f"Submission Failed: {detail}", pd.DataFrame(results_log)
    except requests.exceptions.Timeout:
        return "Submission Failed: The request timed out.", pd.DataFrame(results_log)
    except Exception as e:
        return f"An unexpected error occurred during submission: {e}", pd.DataFrame(results_log)


# Gradio UI setup
with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")
    gr.Markdown("""
        **Instructions:**
        1. Clone this space, modify code to define your agent's logic, tools, and packages.
        2. Log in to your Hugging Face account using the button below.
        3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see your score.
        **Note:** Submitting can take some time.
    """)

    gr.LoginButton()
    run_button = gr.Button("Run Evaluation & Submit All Answers")

    status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
    results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)

    run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])

if __name__ == "__main__":
    print("\n" + "-" * 30 + " App Starting " + "-" * 30)
    space_host = os.getenv("SPACE_HOST")
    space_id = os.getenv("SPACE_ID")

    if space_host:
        print(f"✅ SPACE_HOST found: {space_host}")
        print(f"   Runtime URL should be: https://{space_host}.hf.space")
    else:
        print("ℹ️  SPACE_HOST environment variable not found (running locally?).")

    if space_id:
        print(f"✅ SPACE_ID found: {space_id}")
        print(f"   Repo URL: https://huggingface.co/spaces/{space_id}")
        print(f"   Repo Tree URL: https://huggingface.co/spaces/{space_id}/tree/main")
    else:
        print("ℹ️  SPACE_ID environment variable not found (running locally?).")

    print("-" * (60 + len(" App Starting ")) + "\n")
    print("Launching Gradio Interface for Basic Agent Evaluation...")
    demo.launch(debug=True, share=False)