ALChad commited on
Commit
47a60a5
·
verified ·
1 Parent(s): 81917a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -169
app.py CHANGED
@@ -1,196 +1,165 @@
 
 
1
  import os
2
  import gradio as gr
3
  import requests
4
- import inspect
5
  import pandas as pd
6
-
7
- # (Keep Constants as is)
8
- # --- Constants ---
9
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
-
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
 
 
 
 
 
 
 
 
 
14
  def __init__(self):
15
- print("BasicAgent initialized.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
-
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
- """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
- and displays the results.
26
- """
27
- # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
-
30
- if profile:
31
- username= f"{profile.username}"
32
- print(f"User logged in: {username}")
33
- else:
34
- print("User not logged in.")
35
- return "Please Login to Hugging Face with the button.", None
36
-
37
- api_url = DEFAULT_API_URL
38
- questions_url = f"{api_url}/questions"
39
- submit_url = f"{api_url}/submit"
40
-
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
  try:
43
- agent = BasicAgent()
44
- except Exception as e:
45
- print(f"Error instantiating agent: {e}")
46
- return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
- print(agent_code)
50
-
51
- # 2. Fetch Questions
52
  print(f"Fetching questions from: {questions_url}")
53
  try:
54
- response = requests.get(questions_url, timeout=15)
55
- response.raise_for_status()
56
  questions_data = response.json()
57
- if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
- print(f"Fetched {len(questions_data)} questions.")
61
- except requests.exceptions.RequestException as e:
62
- print(f"Error fetching questions: {e}")
63
- return f"Error fetching questions: {e}", None
64
- except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
- except Exception as e:
69
- print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
71
-
72
- # 3. Run your Agent
73
- results_log = []
74
- answers_payload = []
75
- print(f"Running agent on {len(questions_data)} questions...")
76
  for item in questions_data:
77
- task_id = item.get("task_id")
78
- question_text = item.get("question")
79
- if not task_id or question_text is None:
80
- print(f"Skipping item with missing task_id or question: {item}")
81
- continue
82
  try:
83
  submitted_answer = agent(question_text)
84
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
  except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
- if not answers_payload:
91
- print("Agent did not produce any answers to submit.")
92
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
-
94
- # 4. Prepare Submission
95
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
- print(status_update)
98
-
99
- # 5. Submit
100
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
  try:
102
- response = requests.post(submit_url, json=submission_data, timeout=60)
103
- response.raise_for_status()
104
  result_data = response.json()
105
- final_status = (
106
- f"Submission Successful!\n"
107
- f"User: {result_data.get('username')}\n"
108
- f"Overall Score: {result_data.get('score', 'N/A')}% "
109
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
110
- f"Message: {result_data.get('message', 'No message received.')}"
111
- )
112
- print("Submission successful.")
113
- results_df = pd.DataFrame(results_log)
114
- return final_status, results_df
115
- except requests.exceptions.HTTPError as e:
116
- error_detail = f"Server responded with status {e.response.status_code}."
117
- try:
118
- error_json = e.response.json()
119
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
120
- except requests.exceptions.JSONDecodeError:
121
- error_detail += f" Response: {e.response.text[:500]}"
122
- status_message = f"Submission Failed: {error_detail}"
123
- print(status_message)
124
- results_df = pd.DataFrame(results_log)
125
- return status_message, results_df
126
- except requests.exceptions.Timeout:
127
- status_message = "Submission Failed: The request timed out."
128
- print(status_message)
129
- results_df = pd.DataFrame(results_log)
130
- return status_message, results_df
131
- except requests.exceptions.RequestException as e:
132
- status_message = f"Submission Failed: Network error - {e}"
133
- print(status_message)
134
- results_df = pd.DataFrame(results_log)
135
- return status_message, results_df
136
- except Exception as e:
137
- status_message = f"An unexpected error occurred during submission: {e}"
138
- print(status_message)
139
- results_df = pd.DataFrame(results_log)
140
- return status_message, results_df
141
-
142
-
143
- # --- Build Gradio Interface using Blocks ---
144
- with gr.Blocks() as demo:
145
- gr.Markdown("# Basic Agent Evaluation Runner")
146
- gr.Markdown(
147
- """
148
- **Instructions:**
149
-
150
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
-
154
- ---
155
- **Disclaimers:**
156
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
157
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
158
- """
159
- )
160
 
 
 
 
161
  gr.LoginButton()
162
-
163
  run_button = gr.Button("Run Evaluation & Submit All Answers")
164
-
165
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
- # Removed max_rows=10 from DataFrame constructor
167
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
-
169
- run_button.click(
170
- fn=run_and_submit_all,
171
- outputs=[status_output, results_table]
172
- )
173
 
174
  if __name__ == "__main__":
175
- print("\n" + "-"*30 + " App Starting " + "-"*30)
176
- # Check for SPACE_HOST and SPACE_ID at startup for information
177
- space_host_startup = os.getenv("SPACE_HOST")
178
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
-
180
- if space_host_startup:
181
- print(f"✅ SPACE_HOST found: {space_host_startup}")
182
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
183
- else:
184
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
-
186
- if space_id_startup: # Print repo URLs if SPACE_ID is found
187
- print(f"✅ SPACE_ID found: {space_id_startup}")
188
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
190
- else:
191
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
192
-
193
- print("-"*(60 + len(" App Starting ")) + "\n")
194
-
195
- print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- demo.launch(debug=True, share=False)
 
1
+ # app.py (Final version)
2
+
3
  import os
4
  import gradio as gr
5
  import requests
 
6
  import pandas as pd
7
+ import base64
8
+ import json
9
+ import operator
10
+ from typing import Annotated, List, TypedDict
11
+
12
+ from dotenv import load_dotenv
13
+ from langchain_community.tools.tavily_search import TavilySearchResults
14
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
15
+ from langchain_core.prompts import ChatPromptTemplate
16
+ from langchain_core.tools import tool
17
+ from langchain_google_genai import ChatGoogleGenerativeAI
18
+ from langgraph.graph import END, StateGraph
19
+ from langgraph.prebuilt import ToolNode
20
+
21
+ API_BASE_URL = "https://agents-course-unit4-scoring.hf.space"
22
+
23
+ class GaiaLangGraphAgent:
24
  def __init__(self):
25
+ print("Initializing GaiaLangGraphAgent...")
26
+ load_dotenv()
27
+
28
+ class AgentState(TypedDict):
29
+ question: str
30
+ intermediate_steps: Annotated[List[BaseMessage], operator.add]
31
+ self.AgentState = AgentState
32
+
33
+ web_search_tool = TavilySearchResults(max_results=4)
34
+
35
+ @tool
36
+ def calculator(expression: str) -> str:
37
+ """Evaluates a simple mathematical expression."""
38
+ try:
39
+ import numexpr
40
+ return str(numexpr.evaluate(expression).item())
41
+ except Exception as e: return f"Error: {e}"
42
+
43
+ llm_vision = ChatGoogleGenerativeAI(model="gemini-1.5-pro-latest")
44
+
45
+ def get_file_path(file_name: str) -> str:
46
+ if not os.path.exists("task_files"): os.makedirs("task_files")
47
+ return os.path.join("task_files", file_name)
48
+
49
+ @tool
50
+ def file_reader(file_name: str) -> str:
51
+ """Reads a file, downloading if necessary. Handles text and images."""
52
+ local_path = get_file_path(file_name)
53
+ if not os.path.exists(local_path):
54
+ download_url = f"{API_BASE_URL}/files/{file_name}"
55
+ print(f"Downloading: {download_url}")
56
+ try:
57
+ response = requests.get(download_url); response.raise_for_status()
58
+ with open(local_path, "wb") as f: f.write(response.content)
59
+ except Exception as e: return f"Error downloading {file_name}: {e}"
60
+ try:
61
+ if any(file_name.lower().endswith(ext) for ext in ['.png', '.jpg', '.jpeg', '.webp']):
62
+ with open(local_path, "rb") as image_file: b64_image = base64.b64encode(image_file.read()).decode('utf-8')
63
+ vision_prompt = HumanMessage(content=[
64
+ {"type": "text", "text": "Describe this image in detail, focusing on text or identifiable objects."},
65
+ {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{b64_image}"}}
66
+ ])
67
+ return llm_vision.invoke([vision_prompt]).content
68
+ else:
69
+ with open(local_path, 'r', encoding='utf-8') as f: return f.read()
70
+ except Exception as e: return f"Error processing {file_name}: {e}"
71
+
72
+ tools = [web_search_tool, file_reader, calculator]
73
+
74
+ llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest", temperature=0, convert_system_message_to_human=True)
75
+ llm_with_tools = llm.bind_tools(tools)
76
+
77
+ planner_prompt = ChatPromptTemplate.from_messages([
78
+ ("system", """You are a world-class AI assistant.
79
+ **Principles:** 1. Analyze the question for nuances. 2. Create multi-step plans. 3. Use tools intelligently (search, file read, calculator) or solve logic puzzles directly. 4. Provide exact-match answers.
80
+ **Execution:** Loop through plan->act cycles until you have the final answer."""),
81
+ ("human", "{question}\n\n{intermediate_steps}"),
82
+ ])
83
+
84
+ def planner_node(state: AgentState):
85
+ print("\n---PLANNER---")
86
+ chain = planner_prompt | llm_with_tools
87
+ response = chain.invoke(state)
88
+ print(f"Planner decision: {'Tool call' if response.tool_calls else 'Final Answer'}")
89
+ return {'intermediate_steps': [response]}
90
+
91
+ tool_node = ToolNode(tools)
92
+
93
+ def should_continue(state: AgentState):
94
+ last_message = state['intermediate_steps'][-1]
95
+ if isinstance(last_message, AIMessage):
96
+ if len(getattr(last_message, "tool_calls", [])) > 0: return "action"
97
+ return END
98
+
99
+ workflow = StateGraph(AgentState)
100
+ workflow.add_node("planner", planner_node)
101
+ workflow.add_node("action", tool_node)
102
+ workflow.set_entry_point("planner")
103
+ workflow.add_conditional_edges("planner", should_continue)
104
+ workflow.add_edge("action", "planner")
105
+ self.app = workflow.compile()
106
+ print("GaiaLangGraphAgent initialized successfully.")
107
+
108
  def __call__(self, question: str) -> str:
109
+ print(f"\n>>>>>> AGENT EXECUTING FOR QUESTION: {question[:70]}...")
110
+ initial_state = {"question": question, "intermediate_steps": []}
111
+ final_state = self.app.invoke(initial_state, config={"recursion_limit": 15})
112
+ final_answer = final_state["intermediate_steps"][-1].content
113
+ print(f"<<<<<< AGENT FINISHED. FINAL ANSWER: {final_answer}")
114
+ return final_answer
115
+
116
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
117
+ if not profile: return "Please Login to Hugging Face with the button first.", None
118
+ space_id = os.getenv("SPACE_ID")
119
+ if not space_id: return "CRITICAL ERROR: SPACE_ID not found. Run this from a deployed Hugging Face Space.", None
120
+ username = profile.username
121
+ print(f"User logged in: {username}")
122
+ questions_url = f"{API_BASE_URL}/questions"
123
+ submit_url = f"{API_BASE_URL}/submit"
 
 
 
 
 
 
 
 
 
 
124
  try:
125
+ agent = GaiaLangGraphAgent()
126
+ except Exception as e: return f"Error initializing agent: {e}", None
 
 
 
127
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
 
 
 
128
  print(f"Fetching questions from: {questions_url}")
129
  try:
130
+ response = requests.get(questions_url, timeout=20); response.raise_for_status()
 
131
  questions_data = response.json()
132
+ except Exception as e: return f"Error fetching questions: {e}", None
133
+ results_log, answers_payload = [], []
134
+ print(f"Running agent on {len(questions_data)} questions. This may take several minutes...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  for item in questions_data:
136
+ task_id, question_text = item.get("task_id"), item.get("question")
 
 
 
 
137
  try:
138
  submitted_answer = agent(question_text)
139
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
140
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
141
  except Exception as e:
 
142
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
143
+ submission_data = {"username": username, "agent_code": agent_code, "answers": answers_payload}
144
+ print(f"Submitting {len(answers_payload)} answers...")
 
 
 
 
 
 
 
 
 
 
145
  try:
146
+ response = requests.post(submit_url, json=submission_data, timeout=60); response.raise_for_status()
 
147
  result_data = response.json()
148
+ final_status = (f"Submission Successful!\nUser: {result_data.get('username')}\n"
149
+ f"Score: {result_data.get('score', 'N/A')}% "
150
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)")
151
+ return final_status, pd.DataFrame(results_log)
152
+ except Exception as e: return f"Submission Failed: {e}", pd.DataFrame(results_log)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153
 
154
+ with gr.Blocks() as demo:
155
+ gr.Markdown("# GAIA - Advanced Agent Runner")
156
+ gr.Markdown("Log in and click 'Run' to evaluate the agent.")
157
  gr.LoginButton()
 
158
  run_button = gr.Button("Run Evaluation & Submit All Answers")
 
159
  status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
 
160
  results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
161
+ run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
 
 
 
 
162
 
163
  if __name__ == "__main__":
164
+ print("Launching Gradio Interface...")
165
+ demo.launch()