File size: 13,966 Bytes
10e9b7d
 
eccf8e4
7d65c66
3c4371f
5f605c3
1f5cba5
77f790c
1f5cba5
 
5f605c3
1f5cba5
 
3a178ff
3f7f23e
 
 
 
 
 
1f5cba5
 
d59f015
e80aab9
3db6293
e80aab9
31243f4
d59f015
1f5cba5
c7a6db7
e168d85
d849921
e168d85
 
 
 
c7a6db7
e168d85
c7a6db7
 
d849921
e168d85
 
 
 
 
 
 
 
 
 
 
 
 
 
d849921
e168d85
d849921
e168d85
 
 
d849921
e168d85
d849921
e168d85
 
 
d849921
c7a6db7
e168d85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d849921
e168d85
 
 
d849921
c7a6db7
e168d85
c76a6ae
d849921
e168d85
c7a6db7
e168d85
d849921
c7a6db7
e168d85
d849921
2e75b64
e168d85
 
 
 
 
 
 
 
 
 
 
 
 
2e75b64
 
 
 
e168d85
 
dac4778
e168d85
 
d0729af
c7a6db7
dac4778
e168d85
99b84e4
d849921
e168d85
c7a6db7
e168d85
 
 
 
 
 
 
8e0d47b
e168d85
 
 
1f5cba5
 
31243f4
 
 
 
c7a6db7
 
 
 
 
4021bf3
1f5cba5
 
 
 
 
b90251f
31243f4
 
 
 
7d65c66
b177367
3c4371f
7e4a06b
1ca9f65
3c4371f
7e4a06b
3c4371f
7d65c66
3c4371f
7e4a06b
31243f4
 
e80aab9
b177367
31243f4
 
 
3c4371f
31243f4
b177367
36ed51a
c1fd3d2
3c4371f
7d65c66
31243f4
eccf8e4
31243f4
7d65c66
31243f4
 
3c4371f
 
31243f4
e80aab9
31243f4
 
3c4371f
 
7d65c66
3c4371f
7d65c66
31243f4
 
e80aab9
b177367
7d65c66
 
3c4371f
31243f4
 
 
 
 
 
 
7d65c66
 
 
31243f4
 
7d65c66
31243f4
 
3c4371f
31243f4
 
b177367
7d65c66
3c4371f
31243f4
e80aab9
7d65c66
31243f4
e80aab9
7d65c66
e80aab9
 
31243f4
e80aab9
 
3c4371f
 
 
e80aab9
 
31243f4
 
e80aab9
3c4371f
e80aab9
 
3c4371f
e80aab9
7d65c66
3c4371f
31243f4
7d65c66
31243f4
3c4371f
 
 
 
 
e80aab9
31243f4
 
 
 
7d65c66
31243f4
 
 
 
e80aab9
 
 
 
31243f4
0ee0419
e514fd7
 
 
81917a3
e514fd7
 
 
 
 
 
 
 
e80aab9
 
7e4a06b
e80aab9
31243f4
e80aab9
9088b99
7d65c66
 
e80aab9
31243f4
 
 
e80aab9
 
 
77f790c
3c4371f
7d65c66
3c4371f
7d65c66
3a178ff
3f7f23e
3c4371f
 
7d65c66
3c4371f
7d65c66
 
 
 
 
 
 
 
 
3c4371f
 
31243f4
3c4371f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
import os
import gradio as gr
import requests
import inspect
import pandas as pd
from langgraph.prebuilt import ToolNode
from tools import web_search, parse_excel, ocr_image
# import langgraph
from typing import TypedDict, Annotated

from langchain_openai import ChatOpenAI
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
import langgraph
import importlib.metadata
try:
    lg_ver = importlib.metadata.version("langgraph")
    print("▶︎ LangGraph version:", lg_ver)
except importlib.metadata.PackageNotFoundError:
    print("LangGraph is not installed.")
# Create a ToolNode that knows about your web_search function

# (Keep Constants as is)
# --- Constants ---
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"

# --- Basic Agent Definition ---
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------

class AgentState(TypedDict):
    # We store the full chat history as a list of strings.
    messages: Annotated[list[str], add_messages]
    # If the agent requests a tool, it will fill in:
    tool_request: dict | None
    # Whenever a tool runs, its result goes here:
    tool_result: str | None

# 2) Wrap ChatOpenAI in a function whose signature is (state, user_input) → new_state
llm = ChatOpenAI(model_name="gpt-4.1-mini")

def agent_node(state: AgentState, user_input: str) -> AgentState:
    """
    This function replaces raw ChatOpenAI. It must accept (state, user_input) 
    and return a new AgentState dict.
    """
    # 2.a) Grab prior chat history (empty list on first turn)
    prior_msgs = state.get("messages", [])
    # 2.b) Append the new user_input
    chat_history = prior_msgs + [f"USER: {user_input}"]
    # 2.c) Ask the LLM for a response
    llm_output = llm(chat_history).content

    # 2.d) Check if the LLM output is valid Python dict literal indicating a tool call.
    #      If it is, parse it and stash in state["tool_request"]. Otherwise, no tool.
    tool_req = None
    try:
        parsed = eval(llm_output)
        if isinstance(parsed, dict) and parsed.get("tool"):
            tool_req = parsed
    except Exception:
        tool_req = None

    # 2.e) Construct the new state:
    return {
        "messages": chat_history + [f"ASSISTANT: {llm_output}"],
        "tool_request": tool_req,
        "tool_result": None  # will be filled by the tool_node if invoked
    }

# 3) Create a ToolNode for all three tools, then wrap it in a function
#    whose signature is also (state, tool_request) → new_state.
underlying_tool_node = ToolNode([ocr_image, parse_excel, web_search])

def tool_node(state: AgentState, tool_request: dict) -> AgentState:
    """
    The graph will only call this when tool_request is a dict like
      {"tool": "...", "path": "...", ...}
    Use the underlying ToolNode to run it and store the result.
    """
    # 3.a) Run the actual ToolNode on that dict:
    result_text = underlying_tool_node.run(tool_request)

    # 3.b) Update state.messages to note the tool’s output,
    #      and clear tool_request so we don’t loop.
    return {
        "messages": [f"TOOL ({tool_request['tool']}): {result_text}"],
        "tool_request": None,
        "tool_result": result_text
    }

# 4) Build and register nodes exactly as in the tutorial
graph = StateGraph(AgentState)
graph.add_node("agent", agent_node)
graph.add_node("tools", tool_node)

# 5) Simple START → “agent” edge (no third argument needed)
graph.add_edge(START, "agent")

# 6) Simple “tools” → “agent” edge (again, no third argument)
graph.add_edge("tools", "agent")

# 7) Conditional branching out of “agent,” exactly like the tutorial
def route_agent(state: AgentState, agent_out):
    """
    When the LLM (agent_node) runs, it returns an AgentState where
    - state["tool_request"] is either a dict (if a tool was asked) or None.
    - state["tool_result"] is always None on entry to agent_node.

    route_agent must look at that returned state (called agent_out)
    and decide:
      • If agent_out["tool_request"] is not None, go to "tools".
      • Otherwise, terminate (go to END).
    """
    if agent_out.get("tool_request") is not None:
        return "tools"
    return "final"

graph.add_conditional_edges(
    "agent",       # source
    route_agent,   # routing function (signature: (state, agent_out) → str key)
    {
        "tools": "tools",  # if route_agent(...) == "tools", transition to node "tools"
        "final": END       # if route_agent(...) == "final", stop execution
    }
)

# 8) Compile the graph (now graph.run(...) will work)
compiled_graph = graph.compile()

# 9) Define respond_to_input so that Gradio (and the Hugging Face submission) can call it
def respond_to_input(user_input: str) -> str:
    # Start with an empty state
    initial_state: AgentState = {
        "messages": [],
        "tool_request": None,
        "tool_result": None
    }
    # Use .run(initial_state, user_input) in v0.3.x
    final_state = compiled_graph.invoke(initial_state, user_input)
    # The “final” on END means agent_out has no more tool calls and finished reasoning
    # We return the last assistant message from state["messages"]:
    return final_state["messages"][-1].replace("ASSISTANT: ", "")


class BasicAgent:
    def __init__(self):
        print("BasicAgent initialized.")
    def __call__(self, question: str) -> str:
        # print(f"Agent received question (first 50 chars): {question[:50]}...")
        # fixed_answer = "This is a default answer."
        # print(f"Agent returning fixed answer: {fixed_answer}")
        return respond_to_input(question)
        # return fixed_answer






def run_and_submit_all( profile: gr.OAuthProfile | None):
    """
    Fetches all questions, runs the BasicAgent on them, submits all answers,
    and displays the results.
    """
    # --- Determine HF Space Runtime URL and Repo URL ---
    space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code

    if profile:
        username= f"{profile.username}"
        print(f"User logged in: {username}")
    else:
        print("User not logged in.")
        return "Please Login to Hugging Face with the button.", None

    api_url = DEFAULT_API_URL
    questions_url = f"{api_url}/questions"
    submit_url = f"{api_url}/submit"

    # 1. Instantiate Agent ( modify this part to create your agent)
    try:
        agent = BasicAgent()
    except Exception as e:
        print(f"Error instantiating agent: {e}")
        return f"Error initializing agent: {e}", None
    # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
    agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
    print(agent_code)

    # 2. Fetch Questions
    print(f"Fetching questions from: {questions_url}")
    try:
        response = requests.get(questions_url, timeout=15)
        response.raise_for_status()
        questions_data = response.json()
        if not questions_data:
             print("Fetched questions list is empty.")
             return "Fetched questions list is empty or invalid format.", None
        print(f"Fetched {len(questions_data)} questions.")
    except requests.exceptions.RequestException as e:
        print(f"Error fetching questions: {e}")
        return f"Error fetching questions: {e}", None
    except requests.exceptions.JSONDecodeError as e:
         print(f"Error decoding JSON response from questions endpoint: {e}")
         print(f"Response text: {response.text[:500]}")
         return f"Error decoding server response for questions: {e}", None
    except Exception as e:
        print(f"An unexpected error occurred fetching questions: {e}")
        return f"An unexpected error occurred fetching questions: {e}", None

    # 3. Run your Agent
    results_log = []
    answers_payload = []
    print(f"Running agent on {len(questions_data)} questions...")
    for item in questions_data:
        task_id = item.get("task_id")
        question_text = item.get("question")
        if not task_id or question_text is None:
            print(f"Skipping item with missing task_id or question: {item}")
            continue
        try:
            submitted_answer = agent(question_text)
            answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
            results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
        except Exception as e:
             print(f"Error running agent on task {task_id}: {e}")
             results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})

    if not answers_payload:
        print("Agent did not produce any answers to submit.")
        return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)

    # 4. Prepare Submission 
    submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
    status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
    print(status_update)

    # 5. Submit
    print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
    try:
        response = requests.post(submit_url, json=submission_data, timeout=60)
        response.raise_for_status()
        result_data = response.json()
        final_status = (
            f"Submission Successful!\n"
            f"User: {result_data.get('username')}\n"
            f"Overall Score: {result_data.get('score', 'N/A')}% "
            f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
            f"Message: {result_data.get('message', 'No message received.')}"
        )
        print("Submission successful.")
        results_df = pd.DataFrame(results_log)
        return final_status, results_df
    except requests.exceptions.HTTPError as e:
        error_detail = f"Server responded with status {e.response.status_code}."
        try:
            error_json = e.response.json()
            error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
        except requests.exceptions.JSONDecodeError:
            error_detail += f" Response: {e.response.text[:500]}"
        status_message = f"Submission Failed: {error_detail}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except requests.exceptions.Timeout:
        status_message = "Submission Failed: The request timed out."
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except requests.exceptions.RequestException as e:
        status_message = f"Submission Failed: Network error - {e}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df
    except Exception as e:
        status_message = f"An unexpected error occurred during submission: {e}"
        print(status_message)
        results_df = pd.DataFrame(results_log)
        return status_message, results_df


# --- Build Gradio Interface using Blocks ---
with gr.Blocks() as demo:
    gr.Markdown("# Basic Agent Evaluation Runner")
    gr.Markdown(
        """
        **Instructions:**

        1.  Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
        2.  Log in to your Hugging Face account using the button below. This uses your HF username for submission.
        3.  Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.

        ---
        **Disclaimers:**
        Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
        This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
        """
    )

    gr.LoginButton()

    run_button = gr.Button("Run Evaluation & Submit All Answers")

    status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
    # Removed max_rows=10 from DataFrame constructor
    results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)

    run_button.click(
        fn=run_and_submit_all,
        outputs=[status_output, results_table]
    )

if __name__ == "__main__":
    # print("LangGraph version:", langgraph.__version__) 
    print("\n" + "-"*30 + " App Starting " + "-"*30)
    # Check for SPACE_HOST and SPACE_ID at startup for information
    space_host_startup = os.getenv("SPACE_HOST")
    space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
    # import langgraph
    # print("▶︎ LangGraph version:", langgraph.__version__)
    if space_host_startup:
        print(f"✅ SPACE_HOST found: {space_host_startup}")
        print(f"   Runtime URL should be: https://{space_host_startup}.hf.space")
    else:
        print("ℹ️  SPACE_HOST environment variable not found (running locally?).")

    if space_id_startup: # Print repo URLs if SPACE_ID is found
        print(f"✅ SPACE_ID found: {space_id_startup}")
        print(f"   Repo URL: https://huggingface.co/spaces/{space_id_startup}")
        print(f"   Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
    else:
        print("ℹ️  SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")

    print("-"*(60 + len(" App Starting ")) + "\n")

    print("Launching Gradio Interface for Basic Agent Evaluation...")
    demo.launch(debug=True, share=False)