File size: 5,243 Bytes
a92112f
 
10e9b7d
 
eccf8e4
3c4371f
a92112f
 
 
 
10e9b7d
d59f015
3db6293
e80aab9
a92112f
 
 
 
31243f4
 
1f251f4
ce13199
a92112f
 
ff70a29
a92112f
ff70a29
 
 
 
57e6671
a92112f
 
 
 
 
 
 
 
3c4371f
7e4a06b
a92112f
3c4371f
7e4a06b
3c4371f
7d65c66
3c4371f
a92112f
31243f4
a92112f
e80aab9
a92112f
31243f4
 
 
 
a92112f
36ed51a
c1fd3d2
3c4371f
a92112f
eccf8e4
31243f4
7d65c66
31243f4
 
a92112f
31243f4
7d65c66
a92112f
e80aab9
a92112f
 
31243f4
a92112f
 
31243f4
 
a92112f
31243f4
a92112f
 
 
 
 
 
 
 
 
 
 
 
 
31243f4
a92112f
 
 
 
31243f4
 
 
 
a92112f
 
 
 
 
 
e80aab9
7d65c66
e80aab9
 
31243f4
e80aab9
 
3c4371f
a92112f
 
3c4371f
e80aab9
a92112f
 
7d65c66
a92112f
 
e80aab9
 
a92112f
 
 
e80aab9
31243f4
e80aab9
7e4a06b
31243f4
e80aab9
a92112f
 
7d65c66
e80aab9
a92112f
 
e80aab9
 
3c4371f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
# app.py  –  vollständige, lauffähige Fassung
# -------------------------------------------
import os
import gradio as gr
import requests
import pandas as pd

from agent import agent_executor                  # dein LangGraph-Agent
from langchain_core.messages import HumanMessage  # NEU: benötigt für llm_input


# (Keep Constants as is)
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"


# ---------------------------------------------------------------------------
#  BasicAgent-Wrapper: ruft den LangGraph-Executor auf
# ---------------------------------------------------------------------------
class BasicAgent:
    def __init__(self):
        print("LLM Tool-Enhanced Agent initialized.")

    # nimmt jetzt ein Dict (messages + task_id) entgegen
    def __call__(self, llm_input: dict) -> str:
        try:
            result = agent_executor.invoke(llm_input)   # LangGraph ausführen
            answer = result["messages"][-1].content
            return answer.strip()
        except Exception as e:
            print(f"Agent error: {e}")
            return "I don't know."


# ---------------------------------------------------------------------------
#  GAIA-Runner: Fragen holen → Agent laufen lassen → Ergebnis submitten
# ---------------------------------------------------------------------------
def run_and_submit_all(profile: gr.OAuthProfile | None):
    """Fetch GAIA questions, run agent, submit answers."""
    space_id = os.getenv("SPACE_ID")

    if profile:
        username = f"{profile.username}"
        print(f"User logged in: {username}")
    else:
        print("User not logged in.")
        return "Please Login to Hugging Face with the button.", None

    api_url      = DEFAULT_API_URL
    questions_url = f"{api_url}/questions"
    submit_url    = f"{api_url}/submit"

    # Agent instanziieren
    try:
        agent = BasicAgent()
    except Exception as e:
        return f"Error initializing agent: {e}", None

    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
    print(agent_code)

    # Fragen holen
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
        if not questions_data:
            return "Fetched questions list is empty or invalid format.", None
        print(f"Fetched {len(questions_data)} questions.")
    except Exception as e:
        return f"Error fetching questions: {e}", None

    # Agent auf jede Frage anwenden
    results_log, answers_payload = [], []
    for item in questions_data:
        task_id        = item.get("task_id")
        question_text  = item.get("question")
        if not task_id or question_text is None:
            continue

        try:
            llm_input = {
                "messages": [HumanMessage(content=question_text)],
                "task_id": task_id,              # ←  WICHTIG!
            }
            submitted_answer = agent(llm_input)

            answers_payload.append(
                {"task_id": task_id, "submitted_answer": submitted_answer}
            )
            results_log.append(
                {"Task ID": task_id, "Question": question_text,
                 "Submitted Answer": submitted_answer}
            )
        except Exception as e:
            results_log.append(
                {"Task ID": task_id, "Question": question_text,
                 "Submitted Answer": f"AGENT ERROR: {e}"}
            )

    if not answers_payload:
        return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

    # Submission
    submission_data = {
        "username": username.strip(),
        "agent_code": agent_code,
        "answers": answers_payload,
    }
    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Overall Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/"
            f"{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        return final_status, pd.DataFrame(results_log)

    except Exception as e:
        status_message = f"Submission Failed: {e}"
        return status_message, pd.DataFrame(results_log)


# ---------------------------------------------------------------------------
#  Gradio-UI (unverändert)
# ---------------------------------------------------------------------------
with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")

    gr.LoginButton()
    run_button = gr.Button("Run Evaluation & Submit All Answers")

    status_output = gr.Textbox(label="Run Status / Submission Result",
                               lines=5, interactive=False)
    results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)

    run_button.click(fn=run_and_submit_all,
                     outputs=[status_output, results_table])

if __name__ == "__main__":
    demo.launch(debug=True, share=False)