ALChad commited on
Commit
12c5008
·
verified ·
1 Parent(s): 79da80d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +179 -146
app.py CHANGED
@@ -1,174 +1,207 @@
1
- # app.py (Final version)
2
-
3
  import os
 
4
  import gradio as gr
5
  import requests
6
  import pandas as pd
7
- import base64
8
- import json
9
- import operator
10
- from typing import Annotated, List, TypedDict
11
-
12
- from dotenv import load_dotenv
13
- from langchain_community.tools.tavily_search import TavilySearchResults
14
- from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
15
- from langchain_core.prompts import ChatPromptTemplate
16
- from langchain_core.tools import tool
17
- from langchain_google_genai import ChatGoogleGenerativeAI
18
- from langgraph.graph import END, StateGraph
19
- from langgraph.prebuilt import ToolNode
20
-
21
- API_BASE_URL = "https://agents-course-unit4-scoring.hf.space"
22
-
23
- class GaiaLangGraphAgent:
24
- def __init__(self):
25
- print("Initializing GaiaLangGraphAgent...")
26
- load_dotenv()
27
-
28
- google_api_key = os.getenv("GEMINI_API_KEY")
29
- if not google_api_key:
30
- raise ValueError("GEMINI_API_KEY secret not found. Please set it in your Hugging Face Space settings.")
31
-
32
- class AgentState(TypedDict):
33
- question: str
34
- intermediate_steps: Annotated[List[BaseMessage], operator.add]
35
- self.AgentState = AgentState
36
-
37
- web_search_tool = TavilySearchResults(
38
- tavily_api_key=os.getenv("TAVILY_API_KEY"),
39
- search_engine="google",
40
- max_results=3,
41
- return_direct=True,
42
- )
43
 
44
- @tool
45
- def calculator(expression: str) -> str:
46
- """Evaluates a simple mathematical expression."""
47
- try:
48
- import numexpr
49
- return str(numexpr.evaluate(expression).item())
50
- except Exception as e: return f"Error: {e}"
51
-
52
- llm_vision = ChatGoogleGenerativeAI(model="gemini-2.5-pro-preview-06-05", google_api_key=google_api_key)
53
-
54
- def get_file_path(file_name: str) -> str:
55
- if not os.path.exists("task_files"): os.makedirs("task_files")
56
- return os.path.join("task_files", file_name)
57
-
58
- @tool
59
- def file_reader(file_name: str) -> str:
60
- """Reads a file, downloading if necessary. Handles text and images."""
61
- local_path = get_file_path(file_name)
62
- if not os.path.exists(local_path):
63
- download_url = f"{API_BASE_URL}/files/{file_name}"
64
- print(f"Downloading: {download_url}")
65
- try:
66
- response = requests.get(download_url); response.raise_for_status()
67
- with open(local_path, "wb") as f: f.write(response.content)
68
- except Exception as e: return f"Error downloading {file_name}: {e}"
69
- try:
70
- if any(file_name.lower().endswith(ext) for ext in ['.png', '.jpg', '.jpeg', '.webp']):
71
- with open(local_path, "rb") as image_file: b64_image = base64.b64encode(image_file.read()).decode('utf-8')
72
- vision_prompt = HumanMessage(content=[
73
- {"type": "text", "text": "Describe this image in detail, focusing on text or identifiable objects."},
74
- {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64_image}"}}
75
- ])
76
- return llm_vision.invoke([vision_prompt]).content
77
- else:
78
- with open(local_path, 'r', encoding='utf-8') as f: return f.read()
79
- except Exception as e: return f"Error processing {file_name}: {e}"
80
-
81
- tools = [web_search_tool, file_reader, calculator]
82
-
83
- llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest", temperature=0, convert_system_message_to_human=True)
84
- llm_with_tools = llm.bind_tools(tools)
85
-
86
- planner_prompt = ChatPromptTemplate.from_messages([
87
- ("system", """You are a world-class AI assistant.
88
- **Principles:** 1. Analyze the question for nuances. 2. Create multi-step plans. 3. Use tools intelligently (search, file read, calculator) or solve logic puzzles directly. 4. Provide exact-match answers.
89
- **Execution:** Loop through plan->act cycles until you have the final answer."""),
90
- ("human", "{question}\n\n{intermediate_steps}"),
91
- ])
92
-
93
- def planner_node(state: AgentState):
94
- print("\n---PLANNER---")
95
- chain = planner_prompt | llm_with_tools
96
- response = chain.invoke(state)
97
- print(f"Planner decision: {'Tool call' if response.tool_calls else 'Final Answer'}")
98
- return {'intermediate_steps': [response]}
99
-
100
- tool_node = ToolNode(tools)
101
-
102
- def should_continue(state: AgentState):
103
- last_message = state['intermediate_steps'][-1]
104
- if isinstance(last_message, AIMessage):
105
- if len(getattr(last_message, "tool_calls", [])) > 0: return "action"
106
- return END
107
-
108
- workflow = StateGraph(AgentState)
109
- workflow.add_node("planner", planner_node)
110
- workflow.add_node("action", tool_node)
111
- workflow.set_entry_point("planner")
112
- workflow.add_conditional_edges("planner", should_continue)
113
- workflow.add_edge("action", "planner")
114
- self.app = workflow.compile()
115
- print("GaiaLangGraphAgent initialized successfully.")
116
 
117
  def __call__(self, question: str) -> str:
118
- print(f"\n>>>>>> AGENT EXECUTING FOR QUESTION: {question[:70]}...")
119
- initial_state = {"question": question, "intermediate_steps": []}
120
- final_state = self.app.invoke(initial_state, config={"recursion_limit": 15})
121
- final_answer = final_state["intermediate_steps"][-1].content
122
- print(f"<<<<<< AGENT FINISHED. FINAL ANSWER: {final_answer}")
123
- return final_answer
124
-
125
- def run_and_submit_all(profile: gr.OAuthProfile | None):
126
- if not profile: return "Please Login to Hugging Face with the button first.", None
127
- space_id = os.getenv("SPACE_ID")
128
- if not space_id: return "CRITICAL ERROR: SPACE_ID not found. Run this from a deployed Hugging Face Space.", None
129
- username = profile.username
130
- print(f"User logged in: {username}")
131
- questions_url = f"{API_BASE_URL}/questions"
132
- submit_url = f"{API_BASE_URL}/submit"
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  try:
134
- agent = GaiaLangGraphAgent()
135
- except Exception as e: return f"Error initializing agent: {e}", None
 
 
 
136
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
 
 
 
137
  print(f"Fetching questions from: {questions_url}")
138
  try:
139
- response = requests.get(questions_url, timeout=20); response.raise_for_status()
 
140
  questions_data = response.json()
141
- except Exception as e: return f"Error fetching questions: {e}", None
142
- results_log, answers_payload = [], []
143
- print(f"Running agent on {len(questions_data)} questions. This may take several minutes...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  for item in questions_data:
145
- task_id, question_text = item.get("task_id"), item.get("question")
 
 
 
 
146
  try:
147
  submitted_answer = agent(question_text)
148
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
149
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
150
  except Exception as e:
 
151
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
152
- submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload}
153
- print(f"Submitting {len(answers_payload)} answers...")
 
 
 
 
 
 
 
 
 
 
154
  try:
155
- response = requests.post(submit_url, json=submission_data, timeout=60); response.raise_for_status()
 
156
  result_data = response.json()
157
- final_status = (f"Submission Successful!\nUser: {result_data.get('username')}\n"
158
- f"Score: {result_data.get('score', 'N/A')}% "
159
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)")
160
- return final_status, pd.DataFrame(results_log)
161
- except Exception as e: return f"Submission Failed: {e}", pd.DataFrame(results_log)
162
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  with gr.Blocks() as demo:
164
- gr.Markdown("# GAIA - Advanced Agent Runner")
165
- gr.Markdown("Log in and click 'Run' to evaluate the agent.")
 
 
 
 
 
 
 
 
 
 
 
 
166
  gr.LoginButton()
 
167
  run_button = gr.Button("Run Evaluation & Submit All Answers")
 
168
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
 
169
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
170
- run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
 
 
 
 
171
 
172
  if __name__ == "__main__":
173
- print("Launching Gradio Interface...")
174
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Basic Agent Evaluation Runner"""
 
2
  import os
3
+ import inspect
4
  import gradio as gr
5
  import requests
6
  import pandas as pd
7
+ from langchain_core.messages import HumanMessage
8
+ from agent import build_graph
9
+
10
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # (Keep Constants as is)
13
+ # --- Constants ---
14
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
15
+
16
+ # --- Basic Agent Definition ---
17
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
18
+
19
+
20
+ class BasicAgent:
21
+ """A langgraph agent."""
22
+ def __init__(self):
23
+ print("BasicAgent initialized.")
24
+ self.graph = build_graph()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  def __call__(self, question: str) -> str:
27
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
28
+ # Wrap the question in a HumanMessage from langchain_core
29
+ messages = [HumanMessage(content=question)]
30
+ messages = self.graph.invoke({"messages": messages})
31
+ answer = messages['messages'][-1].content
32
+ return answer[14:]
33
+
34
+
35
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
36
+ """
37
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
38
+ and displays the results.
39
+ """
40
+ # --- Determine HF Space Runtime URL and Repo URL ---
41
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
42
+
43
+ if profile:
44
+ username= f"{profile.username}"
45
+ print(f"User logged in: {username}")
46
+ else:
47
+ print("User not logged in.")
48
+ return "Please Login to Hugging Face with the button.", None
49
+
50
+ api_url = DEFAULT_API_URL
51
+ questions_url = f"{api_url}/questions"
52
+ submit_url = f"{api_url}/submit"
53
+
54
+ # 1. Instantiate Agent ( modify this part to create your agent)
55
  try:
56
+ agent = BasicAgent()
57
+ except Exception as e:
58
+ print(f"Error instantiating agent: {e}")
59
+ return f"Error initializing agent: {e}", None
60
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
61
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
62
+ print(agent_code)
63
+
64
+ # 2. Fetch Questions
65
  print(f"Fetching questions from: {questions_url}")
66
  try:
67
+ response = requests.get(questions_url, timeout=15)
68
+ response.raise_for_status()
69
  questions_data = response.json()
70
+ if not questions_data:
71
+ print("Fetched questions list is empty.")
72
+ return "Fetched questions list is empty or invalid format.", None
73
+ print(f"Fetched {len(questions_data)} questions.")
74
+ except requests.exceptions.RequestException as e:
75
+ print(f"Error fetching questions: {e}")
76
+ return f"Error fetching questions: {e}", None
77
+ except requests.exceptions.JSONDecodeError as e:
78
+ print(f"Error decoding JSON response from questions endpoint: {e}")
79
+ print(f"Response text: {response.text[:500]}")
80
+ return f"Error decoding server response for questions: {e}", None
81
+ except Exception as e:
82
+ print(f"An unexpected error occurred fetching questions: {e}")
83
+ return f"An unexpected error occurred fetching questions: {e}", None
84
+
85
+ # 3. Run your Agent
86
+ results_log = []
87
+ answers_payload = []
88
+ print(f"Running agent on {len(questions_data)} questions...")
89
  for item in questions_data:
90
+ task_id = item.get("task_id")
91
+ question_text = item.get("question")
92
+ if not task_id or question_text is None:
93
+ print(f"Skipping item with missing task_id or question: {item}")
94
+ continue
95
  try:
96
  submitted_answer = agent(question_text)
97
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
98
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
99
  except Exception as e:
100
+ print(f"Error running agent on task {task_id}: {e}")
101
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
102
+
103
+ if not answers_payload:
104
+ print("Agent did not produce any answers to submit.")
105
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
106
+
107
+ # 4. Prepare Submission
108
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
109
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
110
+ print(status_update)
111
+
112
+ # 5. Submit
113
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
114
  try:
115
+ response = requests.post(submit_url, json=submission_data, timeout=60)
116
+ response.raise_for_status()
117
  result_data = response.json()
118
+ final_status = (
119
+ f"Submission Successful!\n"
120
+ f"User: {result_data.get('username')}\n"
121
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
122
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
123
+ f"Message: {result_data.get('message', 'No message received.')}"
124
+ )
125
+ print("Submission successful.")
126
+ results_df = pd.DataFrame(results_log)
127
+ return final_status, results_df
128
+ except requests.exceptions.HTTPError as e:
129
+ error_detail = f"Server responded with status {e.response.status_code}."
130
+ try:
131
+ error_json = e.response.json()
132
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
133
+ except requests.exceptions.JSONDecodeError:
134
+ error_detail += f" Response: {e.response.text[:500]}"
135
+ status_message = f"Submission Failed: {error_detail}"
136
+ print(status_message)
137
+ results_df = pd.DataFrame(results_log)
138
+ return status_message, results_df
139
+ except requests.exceptions.Timeout:
140
+ status_message = "Submission Failed: The request timed out."
141
+ print(status_message)
142
+ results_df = pd.DataFrame(results_log)
143
+ return status_message, results_df
144
+ except requests.exceptions.RequestException as e:
145
+ status_message = f"Submission Failed: Network error - {e}"
146
+ print(status_message)
147
+ results_df = pd.DataFrame(results_log)
148
+ return status_message, results_df
149
+ except Exception as e:
150
+ status_message = f"An unexpected error occurred during submission: {e}"
151
+ print(status_message)
152
+ results_df = pd.DataFrame(results_log)
153
+ return status_message, results_df
154
+
155
+
156
+ # --- Build Gradio Interface using Blocks ---
157
  with gr.Blocks() as demo:
158
+ gr.Markdown("# Basic Agent Evaluation Runner")
159
+ gr.Markdown(
160
+ """
161
+ **Instructions:**
162
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
163
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
164
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
165
+ ---
166
+ **Disclaimers:**
167
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
168
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
169
+ """
170
+ )
171
+
172
  gr.LoginButton()
173
+
174
  run_button = gr.Button("Run Evaluation & Submit All Answers")
175
+
176
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
177
+ # Removed max_rows=10 from DataFrame constructor
178
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
179
+
180
+ run_button.click(
181
+ fn=run_and_submit_all,
182
+ outputs=[status_output, results_table]
183
+ )
184
 
185
  if __name__ == "__main__":
186
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
187
+ # Check for SPACE_HOST and SPACE_ID at startup for information
188
+ space_host_startup = os.getenv("SPACE_HOST")
189
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
190
+
191
+ if space_host_startup:
192
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
193
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
194
+ else:
195
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
196
+
197
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
198
+ print(f"✅ SPACE_ID found: {space_id_startup}")
199
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
200
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
201
+ else:
202
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
203
+
204
+ print("-"*(60 + len(" App Starting ")) + "\n")
205
+
206
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
207
+ demo.launch(debug=True, share=False)