APRG commited on
Commit
b0182f2
·
verified ·
1 Parent(s): 2a15979

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +194 -291
app.py CHANGED
@@ -1,315 +1,218 @@
1
- #pip install langchain_google_genai langgraph gradio
2
  import os
3
- import sys
4
- import typing
5
- from typing import Annotated, Literal, Iterable
6
- from typing_extensions import TypedDict
7
-
8
  from langchain_google_genai import ChatGoogleGenerativeAI
9
- from langgraph.graph import StateGraph, START, END
10
- from langgraph.graph.message import add_messages
11
- from langgraph.prebuilt import ToolNode
12
- from langchain_core.tools import tool
13
- from langchain_core.messages import AIMessage, ToolMessage, HumanMessage, BaseMessage, SystemMessage
14
- from random import randint
15
-
16
- import requests
17
- from bs4 import BeautifulSoup
18
- import openpyxl
19
- import wikipedia
20
- import pandas as pd
21
-
22
- import gradio as gr
23
- import logging
24
-
25
- class OrderState(TypedDict):
26
- """State representing the customer's order conversation."""
27
- messages: Annotated[list, add_messages]
28
- order: list[str]
29
- finished: bool
30
 
31
- # System instruction for the Agent
32
- SYSINT = (
33
- "system",
34
- "You are a general AI assistant. I will ask you a question."
35
- "The question requires a tool to solve. You must attempt to use at least one of the available tools before returning an answer."
36
- "Report your thoughts, and finish your answer with the following template: "
37
- "FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings."
38
- "If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise."
39
- "If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise."
40
- "If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
41
- "If a tool required for task completion is not functioning, return 0."
42
- )
43
 
44
- WELCOME_MSG = "Welcome to my general-purpose AI agent. Type `q` to quit. How shall I fail to serve you today?"
 
 
 
 
 
45
 
46
- # Initialize the Google Gemini LLM
47
- llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
- @tool
50
- def wikipedia_search_tool(title: str) -> str:
51
- """Provides an excerpt from a Wikipedia article with the given title."""
52
- page = wikipedia.page(title, auto_suggest=False)
53
- return page.content[:3000]
54
 
55
- @tool
56
- def media_tool(file_path: str) -> str:
57
- """Used for deciphering video and audio files."""
58
- return "This tool hasn't been implemented yet. Please return 0 if the task cannot be solved without knowing the contents of this file."
59
 
60
- @tool
61
- def internet_search_tool(search_query: str) -> str:
62
- """Does a google search with using the input as the search query. Returns a long batch of textual information related to the query."""
63
- search_tool = DuckDuckGoSearchTool()
64
- result = search_tool(search_query)
65
- return result
 
66
 
67
- @tool
68
- def webscraper_tool(url: str) -> str:
69
- """Returns the page's html content from the input url."""
70
- response = requests.get(url, stream=True)
71
- if response.status_code == 200:
72
- soup = BeautifulSoup(response.content, 'html.parser')
73
- html_text = soup.get_text()
74
- return html_text
75
  else:
76
- raise Exception(f"Failed to retrieve the webpage. Status code: {response.status_code}")
77
-
78
- @tool
79
- def read_excel_tool(file_path: str) -> str:
80
- """Returns the contents of an Excel file as a Pandas dataframe."""
81
- df = pd.read_excel(file_path, engine = "openpyxl")
82
- return df
83
 
84
- def agent_node(state: OrderState) -> OrderState:
85
- """agent with tool handling."""
86
- print(f"Messagelist sent to agent node: {[msg.content for msg in state.get('messages', [])]}")
87
- defaults = {"order": [], "finished": False}
88
-
89
- # Ensure we always have at least a system message
90
- if not state.get("messages", []):
91
- return defaults | state | {"messages": [SystemMessage(content=SYSINT), new_output]}
92
 
 
93
  try:
94
- # Prepend system instruction if not already present
95
- messages_with_system = [
96
- SystemMessage(content=SYSINT)
97
- ] + state.get("messages", [])
98
-
99
- # Process messages through the LLM
100
- new_output = llm_with_tools.invoke(messages_with_system)
101
-
102
- return defaults | state | {"messages": [new_output]}
103
  except Exception as e:
104
- # Fallback if LLM processing fails
105
- return defaults | state | {"messages": [AIMessage(content=f"I'm having trouble processing that. {str(e)}")]}
106
-
107
- def interactive_tools_node(state: OrderState) -> OrderState:
108
- """Handles interactive tool calls."""
109
- logging.info("interactive tools node")
110
- tool_msg = state.get("messages", [])[-1]
111
- order = state.get("order", [])
112
- outbound_msgs = []
113
-
114
- for tool_call in tool_msg.tool_calls:
115
- tool_name = tool_call["name"]
116
- tool_args = tool_call["args"]
117
-
118
- if tool_name == "wikipedia_search_tool":
119
- print(f"called wikipedia with {str(tool_args)}")
120
- page = wikipedia.page(tool_args.get("title"), auto_suggest=False)
121
- response = page.content[:3000]
122
- elif tool_name == "media_tool":
123
- print(f"called media with {str(tool_args)}")
124
- response = "This tool hasn't been implemented yet. Please return 0 if the task cannot be solved without knowing the contents of this file."
125
- elif tool_name == "internet_search_tool":
126
- print(f"called internet with {str(tool_args)}")
127
- question = tool_args.get("search_query")
128
- search_tool = DuckDuckGoSearchTool()
129
- response = search_tool(question)[:3000]
130
- elif tool_name == "webscraper_tool":
131
- print(f"called webscraper with {str(tool_args)}")
132
- url = tool_args.get("url")
133
- response = requests.get(url, stream=True)
134
- if response.status_code == 200:
135
- soup = BeautifulSoup(response.content, 'html.parser')
136
- html_text = soup.get_text()
137
- response = html_text
138
- else:
139
- response = Exception(f"Failed to retrieve the webpage. Status code: {response.status_code}")
140
- elif tool_name == "read_excel_tool":
141
- print(f"called excel with {str(tool_args)}")
142
- path = tool_args.get("file_path")
143
- df = pd.read_excel(path, engine = "openpyxl")
144
- response = df
145
-
146
- else:
147
- raise NotImplementedError(f'Unknown tool call: {tool_name}')
148
-
149
- outbound_msgs.append(
150
- ToolMessage(
151
- content=response,
152
- name=tool_name,
153
- tool_call_id=tool_call["id"],
154
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
- return {"messages": outbound_msgs, "order": order, "finished": False}
158
-
159
- def maybe_route_to_tools(state: OrderState) -> str:
160
- """Route between chat and tool nodes."""
161
- if not (msgs := state.get("messages", [])):
162
- raise ValueError(f"No messages found when parsing state: {state}")
163
-
164
- msg = msgs[-1]
165
-
166
- if state.get("finished", False):
167
- print("from agent GOTO End node")
168
- return END
169
-
170
- elif hasattr(msg, "tool_calls") and len(msg.tool_calls) > 0:
171
- if any(tool["name"] in tool_node.tools_by_name.keys() for tool in msg.tool_calls):
172
- print("from agent GOTO tools node")
173
- return "tools"
174
- else:
175
- logging.info("from chatbot GOTO interactive tools node")
176
- return "interactive_tools"
177
-
178
- print("tool call failed, quitting")
179
- return "human"
180
 
181
- def human_node(state: OrderState) -> OrderState:
182
- """Handle user input."""
183
- logging.info(f"Messagelist sent to human node: {[msg.content for msg in state.get('messages', [])]}")
184
- last_msg = state["messages"][-1]
185
 
186
- if last_msg.content.lower() in {"q", "quit", "exit", "goodbye"}:
187
- state["finished"] = True
 
188
 
189
- return state
 
 
 
190
 
191
- def maybe_exit_human_node(state: OrderState) -> Literal["agent", "__end__"]:
192
- """Determine if conversation should continue."""
193
- if state.get("finished", False):
194
- logging.info("from human GOTO End node")
195
- return END
196
- last_msg = state["messages"][-1]
197
- if isinstance(last_msg, AIMessage):
198
- logging.info("Chatbot response obtained, ending conversation")
199
- return END
200
  else:
201
- logging.info("from human GOTO agent node")
202
- return "agent"
203
-
204
- # Prepare tools
205
- auto_tools = []
206
- tool_node = ToolNode(auto_tools)
207
-
208
- interactive_tools = [wikipedia_search_tool, media_tool, internet_search_tool, webscraper_tool, read_excel_tool]
209
-
210
- # Bind all tools to the LLM
211
- llm_with_tools = llm.bind_tools(auto_tools + interactive_tools)
212
-
213
- # Build the graph
214
- graph_builder = StateGraph(OrderState)
215
-
216
- # Add nodes
217
- graph_builder.add_node("agent", agent_node)
218
- graph_builder.add_node("human", human_node)
219
- graph_builder.add_node("tools", tool_node)
220
- graph_builder.add_node("interactive_tools", interactive_tools_node)
221
-
222
- # Add edges and routing
223
- graph_builder.add_conditional_edges("agent", maybe_route_to_tools)
224
- graph_builder.add_conditional_edges("human", maybe_exit_human_node)
225
- graph_builder.add_edge("tools", "agent")
226
- graph_builder.add_edge("interactive_tools", "agent")
227
- graph_builder.add_edge(START, "human")
228
-
229
- # Compile the graph
230
- chat_graph = graph_builder.compile()
231
-
232
- def convert_history_to_messages(history: list) -> list[BaseMessage]:
233
- """
234
- Convert Gradio chat history to a list of Langchain messages.
235
-
236
- Args:
237
- - history: Gradio's chat history format
238
-
239
- Returns:
240
- - List of Langchain BaseMessage objects
241
- """
242
- messages = []
243
- for human, ai in history:
244
- if human:
245
- messages.append(HumanMessage(content=human))
246
- if ai:
247
- messages.append(AIMessage(content=ai))
248
- return messages
249
 
250
- def gradio_chat(message: str, history: list) -> str:
251
- """
252
- Gradio-compatible chat function that manages the conversation state.
253
-
254
- Args:
255
- - message: User's input message
256
- - history: Gradio's chat history
257
-
258
- Returns:
259
- - Bot's response as a string
260
- """
261
- logging.info(f"{len(history)} history so far: {history}")
262
- # Ensure non-empty message
263
- if not message or message.strip() == "":
264
- message = "Hello, how can I help you today?"
265
-
266
- # Convert history to Langchain messages
267
- conversation_messages = []
268
- for old_message in history:
269
- if old_message["content"].strip():
270
- if old_message["role"] == "user":
271
- conversation_messages.append(HumanMessage(content=old_message["content"]))
272
- if old_message["role"] == "assistant":
273
- conversation_messages.append(AIMessage(content=old_message["content"]))
274
-
275
- # Add current message
276
- conversation_messages.append(HumanMessage(content=message))
277
-
278
- # Create initial state with conversation history
279
- conversation_state = {
280
- "messages": conversation_messages,
281
- "order": [],
282
- "finished": False
283
- }
284
- logging.info(f"Conversation so far: {str(conversation_state)}")
285
- try:
286
- # Process the conversation through the graph
287
- conversation_state = chat_graph.invoke(conversation_state, {"recursion_limit": 10})
288
-
289
- # Extract the latest bot message
290
- latest_message = conversation_state["messages"][-1]
291
-
292
- # Return the bot's response content
293
- logging.info(f"return: {latest_message.content}")
294
- return latest_message.content
295
-
296
- except Exception as e:
297
- return f"An error occurred: {str(e)}"
298
 
299
- # Gradio interface
300
- def launch_agent():
301
- gr.ChatInterface(
302
- gradio_chat,
303
- type="messages",
304
- title="Agent",
305
- description="An AI agent (work in progress)",
306
- theme="ocean"
307
- ).launch()
308
 
309
- if __name__ == "__main__":
310
- # initiate logging tool
311
- logging.basicConfig(
312
- stream=sys.stdout,
313
- level=logging.INFO,
314
- format='%(asctime)s - %(levelname)s - %(message)s')
315
- launch_agent()
 
 
1
  import os
2
+ import inspect
 
 
 
 
3
  from langchain_google_genai import ChatGoogleGenerativeAI
4
+ from GenericAgent import AgenticAI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # (Keep Constants as is)
7
+ # --- Constants ---
8
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
 
 
 
 
 
 
 
9
 
10
+ # --- Basic Agent Definition ---
11
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
12
+ class BasicAgent:
13
+ def __init__(self):
14
+ print("BasicAgent initialized.")
15
+ self.agent = AgenticAI()
16
 
17
+ def summarize(self, question: str) -> str:
18
+ prompt = """You are an AI assistant for summarizing tasks. You will receive a long request message, your task is to make it shorter by removing irrelevant details. Do no attempt to find an answer to the request or any part of the request. Make sure to always include any requirement towards the answer's format in your summary.
19
+
20
+ EXAMPLE:
21
+ 'Hi, we've been learning about reptiles in biology and I'd like to know more about crocodiles. Can you tell me how many legs an average crocodile has? Please answer only with a number! Thanks in advance!'
22
+ 'How many legs does an average crocodile have? Answer with only a number.'
23
+
24
+ REQUEST:
25
+ """
26
+ llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
27
+ summary = llm.invoke(prompt+question)
28
+ return summary
29
+
30
+ def __call__(self, question: str) -> str:
31
+ prompt = """You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. If a tool required for task completion is unavailable after multiple tries, return 0.
32
+
33
+ QUESTION:
34
+ """
35
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
36
 
37
+ if len(question) > 400:
38
+ question = self.summary(question)
 
 
 
39
 
40
+ answer = self.agent.ask(prompt + question)
41
+ print(f"Agent returning answer: {str(answer)}")
42
+ return str(answer).split("FINAL ANSWER:")[-1].strip()
 
43
 
44
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
45
+ """
46
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
47
+ and displays the results.
48
+ """
49
+ # --- Determine HF Space Runtime URL and Repo URL ---
50
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
51
 
52
+ if profile:
53
+ username= f"{profile.username}"
54
+ print(f"User logged in: {username}")
 
 
 
 
 
55
  else:
56
+ print("User not logged in.")
57
+ return "Please Login to Hugging Face with the button.", None
 
 
 
 
 
58
 
59
+ api_url = DEFAULT_API_URL
60
+ questions_url = f"{api_url}/questions"
61
+ submit_url = f"{api_url}/submit"
 
 
 
 
 
62
 
63
+ # 1. Instantiate Agent ( modify this part to create your agent)
64
  try:
65
+ agent = BasicAgent()
 
 
 
 
 
 
 
 
66
  except Exception as e:
67
+ print(f"Error instantiating agent: {e}")
68
+ return f"Error initializing agent: {e}", None
69
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
70
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
71
+ print(agent_code)
72
+
73
+ # 2. Fetch Questions
74
+ print(f"Fetching questions from: {questions_url}")
75
+ try:
76
+ response = requests.get(questions_url, timeout=15)
77
+ response.raise_for_status()
78
+ questions_data = response.json()
79
+ if not questions_data:
80
+ print("Fetched questions list is empty.")
81
+ return "Fetched questions list is empty or invalid format.", None
82
+ print(f"Fetched {len(questions_data)} questions.")
83
+ except requests.exceptions.RequestException as e:
84
+ print(f"Error fetching questions: {e}")
85
+ return f"Error fetching questions: {e}", None
86
+ except requests.exceptions.JSONDecodeError as e:
87
+ print(f"Error decoding JSON response from questions endpoint: {e}")
88
+ print(f"Response text: {response.text[:500]}")
89
+ return f"Error decoding server response for questions: {e}", None
90
+ except Exception as e:
91
+ print(f"An unexpected error occurred fetching questions: {e}")
92
+ return f"An unexpected error occurred fetching questions: {e}", None
93
+
94
+ # 3. Run your Agent
95
+ results_log = []
96
+ answers_payload = []
97
+ print(f"Running agent on {len(questions_data)} questions...")
98
+ for item in questions_data:
99
+ task_id = item.get("task_id")
100
+ question_text = item.get("question")
101
+ if not task_id or question_text is None:
102
+ print(f"Skipping item with missing task_id or question: {item}")
103
+ continue
104
+ try:
105
+ submitted_answer = agent(question_text)
106
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
107
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
108
+ except Exception as e:
109
+ print(f"Error running agent on task {task_id}: {e}")
110
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
111
+
112
+ if not answers_payload:
113
+ print("Agent did not produce any answers to submit.")
114
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
115
+
116
+ # 4. Prepare Submission
117
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
118
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
119
+ print(status_update)
120
+
121
+ # 5. Submit
122
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
123
+ try:
124
+ response = requests.post(submit_url, json=submission_data, timeout=60)
125
+ response.raise_for_status()
126
+ result_data = response.json()
127
+ final_status = (
128
+ f"Submission Successful!\n"
129
+ f"User: {result_data.get('username')}\n"
130
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
131
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
132
+ f"Message: {result_data.get('message', 'No message received.')}"
133
  )
134
+ print("Submission successful.")
135
+ results_df = pd.DataFrame(results_log)
136
+ return final_status, results_df
137
+ except requests.exceptions.HTTPError as e:
138
+ error_detail = f"Server responded with status {e.response.status_code}."
139
+ try:
140
+ error_json = e.response.json()
141
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
142
+ except requests.exceptions.JSONDecodeError:
143
+ error_detail += f" Response: {e.response.text[:500]}"
144
+ status_message = f"Submission Failed: {error_detail}"
145
+ print(status_message)
146
+ results_df = pd.DataFrame(results_log)
147
+ return status_message, results_df
148
+ except requests.exceptions.Timeout:
149
+ status_message = "Submission Failed: The request timed out."
150
+ print(status_message)
151
+ results_df = pd.DataFrame(results_log)
152
+ return status_message, results_df
153
+ except requests.exceptions.RequestException as e:
154
+ status_message = f"Submission Failed: Network error - {e}"
155
+ print(status_message)
156
+ results_df = pd.DataFrame(results_log)
157
+ return status_message, results_df
158
+ except Exception as e:
159
+ status_message = f"An unexpected error occurred during submission: {e}"
160
+ print(status_message)
161
+ results_df = pd.DataFrame(results_log)
162
+ return status_message, results_df
163
+
164
+
165
+ # --- Build Gradio Interface using Blocks ---
166
+ with gr.Blocks() as demo:
167
+ gr.Markdown("# Basic Agent Evaluation Runner")
168
+ gr.Markdown(
169
+ """
170
+ **Instructions:**
171
+
172
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
173
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
174
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
175
+
176
+ ---
177
+ **Disclaimers:**
178
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
179
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
180
+ """
181
+ )
182
 
183
+ gr.LoginButton()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
 
 
 
186
 
187
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
188
+ # Removed max_rows=10 from DataFrame constructor
189
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
190
 
191
+ run_button.click(
192
+ fn=run_and_submit_all,
193
+ outputs=[status_output, results_table]
194
+ )
195
 
196
+ if __name__ == "__main__":
197
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
198
+ # Check for SPACE_HOST and SPACE_ID at startup for information
199
+ space_host_startup = os.getenv("SPACE_HOST")
200
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
201
+
202
+ if space_host_startup:
203
+ print(f" SPACE_HOST found: {space_host_startup}")
204
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
205
  else:
206
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
 
208
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
209
+ print(f"✅ SPACE_ID found: {space_id_startup}")
210
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
211
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
212
+ else:
213
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
+ print("-"*(60 + len(" App Starting ")) + "\n")
 
 
 
 
 
 
 
 
216
 
217
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
218
+ demo.launch(debug=True, share=False)