samoye16 commited on
Commit
865f24e
·
verified ·
1 Parent(s): 8be21d4

Update agents.py

Browse files
Files changed (1) hide show
  1. agents.py +195 -145
agents.py CHANGED
@@ -1,152 +1,202 @@
1
  import os
2
-
3
- from langgraph.graph import StateGraph, START, MessagesState
4
- from langgraph.prebuilt import ToolNode, tools_condition
5
-
6
- from langchain_google_genai import ChatGoogleGenerativeAI
7
- from langchain_groq import ChatGroq
8
- from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings
9
-
10
- from langchain_community.tools.tavily_search import TavilySearchResults
11
- from langchain_community.document_loaders import WikipediaLoader, ArxivLoader
12
- from langchain_community.vectorstores import SupabaseVectorStore
13
-
14
- from langchain_core.messages import SystemMessage, HumanMessage
15
- from langchain_core.tools import tool
16
-
17
- from supabase.client import create_client, Client
18
-
19
-
20
- # Load environment variables
21
-
22
- # ---- Basic Arithmetic Utilities ---- #
23
- @tool
24
- def multiply(a: int, b: int) -> int:
25
- """Returns the product of two integers."""
26
- return a * b
27
-
28
- @tool
29
- def add(a: int, b: int) -> int:
30
- """Returns the sum of two integers."""
31
- return a + b
32
-
33
- @tool
34
- def subtract(a: int, b: int) -> int:
35
- """Returns the difference between two integers."""
36
- return a - b
37
-
38
- @tool
39
- def divide(a: int, b: int) -> float:
40
- """Performs division and handles zero division errors."""
41
- if b == 0:
42
- raise ValueError("Division by zero is undefined.")
43
- return a / b
44
-
45
- @tool
46
- def modulus(a: int, b: int) -> int:
47
- """Returns the remainder after division."""
48
- return a % b
49
-
50
-
51
- # ---- Search Tools ---- #
52
- @tool
53
- def search_wikipedia(query: str) -> str:
54
- """Returns up to 2 documents related to a query from Wikipedia."""
55
- docs = WikipediaLoader(query=query, load_max_docs=2).load()
56
- return {"wiki_results": "\n\n---\n\n".join(
57
- f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}'
58
- for doc in docs
59
- )}
60
-
61
- @tool
62
- def search_web(query: str) -> str:
63
- """Fetches up to 3 web results using Tavily."""
64
- results = TavilySearchResults(max_results=3).invoke(query=query)
65
- return {"web_results": "\n\n---\n\n".join(
66
- f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}'
67
- for doc in results
68
- )}
69
-
70
- @tool
71
- def search_arxiv(query: str) -> str:
72
- """Retrieves up to 3 papers related to the query from ArXiv."""
73
- results = ArxivLoader(query=query, load_max_docs=3).load()
74
- return {"arvix_results": "\n\n---\n\n".join(
75
- f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}'
76
- for doc in results
77
- )}
78
-
79
-
80
- system_message = SystemMessage(content="""You are a helpful assistant tasked with answering questions using a set of tools. Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
81
-
82
- FINAL ANSWER: [YOUR FINAL ANSWER]
83
-
84
- YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.
85
- - If you are asked for a number, don't use a comma in the number and avoid units like $ or % unless specified otherwise.
86
- - If you are asked for a string, avoid using articles and abbreviations (e.g. for cities), and write digits in plain text unless specified otherwise.
87
- - If you are asked for a comma-separated list, apply the above rules depending on whether each item is a number or string.
88
-
89
- Your answer should start only with "FINAL ANSWER: ", followed by your result.""")
90
-
91
- toolset = [
92
- multiply,
93
- add,
94
- subtract,
95
- divide,
96
- modulus,
97
- search_wikipedia,
98
- search_web,
99
- search_arxiv,
100
- ]
101
-
102
-
103
- # ---- Graph Construction ---- #
104
- def create_agent_flow(provider: str = "groq"):
105
- """Constructs the LangGraph conversational flow with tool support."""
106
-
107
- if provider == "google":
108
- llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
109
- elif provider == "groq":
110
- llm = ChatGroq(api_key="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" , model="qwen-qwq-32b", temperature=0)
111
- elif provider == "huggingface":
112
- llm = ChatHuggingFace(llm=HuggingFaceEndpoint(
113
- url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf",
114
- temperature=0
115
- ))
116
- else:
117
- raise ValueError("Unsupported provider. Choose from: 'google', 'groq', 'huggingface'.")
118
-
119
- llm_toolchain = llm.bind_tools(toolset)
120
-
121
- # Assistant node behavior
122
- def assistant_node(state: MessagesState):
123
- response = llm_toolchain.invoke(state["messages"])
124
- return {"messages": [response]}
125
-
126
 
127
- # Build the conversational graph
128
- graph01 = StateGraph(MessagesState)
129
- graph01.add_node("assistant", assistant_node)
130
- graph01.add_node("tools", ToolNode(toolset))
131
- graph01.add_edge(START, "assistant")
132
- graph01.add_conditional_edges("assistant", tools_condition)
133
- graph01.add_edge("tools", "assistant")
134
-
135
- return graph01.compile()
136
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
 
138
  if __name__ == "__main__":
139
- question = "What is the capital of France?"
140
-
141
- # Build the graph
142
- compiled_graph = create_agent_flow(provider="groq")
 
 
 
 
 
 
143
 
144
- # Prepare input messages
145
- messages = [system_message, HumanMessage(content=question)]
 
 
 
 
146
 
147
- # Run the graph
148
- output_state = compiled_graph.invoke({"messages": messages})
149
 
150
- # Print the final output
151
- for m in output_state["messages"]:
152
- print(m.content)
 
1
  import os
2
+ import gradio as gr
3
+ import requests
4
+ import inspect
5
+ import pandas as pd
6
+ from agents import create_agent_flow
7
+ from langchain_core.messages import HumanMessage
8
+
9
+ # (Keep Constants as is)
10
+ # --- Constants ---
11
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
+
13
+ # --- Basic Agent Definition ---
14
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
+ class BasicAgent:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ def __init__(self):
18
+ print("BasicAgent initialized.")
19
+ self.agent = create_agent_flow()
20
+
21
+ def __call__(self, question: str) -> str:
22
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
23
+
24
+ question = [HumanMessage(content=question)]
25
+ question_ask = self.agent.invoke({"messages": question})
26
+ response = question_ask['messages'][-1].content
27
+ print(f"Agent returning fixed answer: {response}")
28
+ return response[8:]
29
+
30
+ def run_and_submit_all( profile: gr.OAuthProfile | None):
31
+ """
32
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
33
+ and displays the results.
34
+ """
35
+ # --- Determine HF Space Runtime URL and Repo URL ---
36
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
37
+
38
+ if profile:
39
+ username= f"{profile.username}"
40
+ print(f"User logged in: {username}")
41
+ else:
42
+ print("User not logged in.")
43
+ return "Please Login to Hugging Face with the button.", None
44
+
45
+ api_url = DEFAULT_API_URL
46
+ questions_url = f"{api_url}/questions"
47
+ submit_url = f"{api_url}/submit"
48
+
49
+ # 1. Instantiate Agent ( modify this part to create your agent)
50
+ try:
51
+ agent = BasicAgent()
52
+ except Exception as e:
53
+ print(f"Error instantiating agent: {e}")
54
+ return f"Error initializing agent: {e}", None
55
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
56
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
57
+ print(agent_code)
58
+
59
+ # 2. Fetch Questions
60
+ print(f"Fetching questions from: {questions_url}")
61
+ try:
62
+ response = requests.get(questions_url, timeout=15)
63
+ response.raise_for_status()
64
+ questions_data = response.json()
65
+ if not questions_data:
66
+ print("Fetched questions list is empty.")
67
+ return "Fetched questions list is empty or invalid format.", None
68
+ print(f"Fetched {len(questions_data)} questions.")
69
+ except requests.exceptions.RequestException as e:
70
+ print(f"Error fetching questions: {e}")
71
+ return f"Error fetching questions: {e}", None
72
+ except requests.exceptions.JSONDecodeError as e:
73
+ print(f"Error decoding JSON response from questions endpoint: {e}")
74
+ print(f"Response text: {response.text[:500]}")
75
+ return f"Error decoding server response for questions: {e}", None
76
+ except Exception as e:
77
+ print(f"An unexpected error occurred fetching questions: {e}")
78
+ return f"An unexpected error occurred fetching questions: {e}", None
79
+
80
+ # 3. Run your Agent
81
+ results_log = []
82
+ answers_payload = []
83
+ print(f"Running agent on {len(questions_data)} questions...")
84
+ for item in questions_data:
85
+ task_id = item.get("task_id")
86
+ question_text = item.get("question")
87
+ if not task_id or question_text is None:
88
+ print(f"Skipping item with missing task_id or question: {item}")
89
+ continue
90
+ try:
91
+ submitted_answer = agent(question_text)
92
+ answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
93
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
94
+ except Exception as e:
95
+ print(f"Error running agent on task {task_id}: {e}")
96
+ results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
97
+
98
+ if not answers_payload:
99
+ print("Agent did not produce any answers to submit.")
100
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
101
+
102
+ # 4. Prepare Submission
103
+ submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
104
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
105
+ print(status_update)
106
+
107
+ # 5. Submit
108
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
109
+ try:
110
+ response = requests.post(submit_url, json=submission_data, timeout=60)
111
+ response.raise_for_status()
112
+ result_data = response.json()
113
+ final_status = (
114
+ f"Submission Successful!\n"
115
+ f"User: {result_data.get('username')}\n"
116
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
117
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
118
+ f"Message: {result_data.get('message', 'No message received.')}"
119
+ )
120
+ print("Submission successful.")
121
+ results_df = pd.DataFrame(results_log)
122
+ return final_status, results_df
123
+ except requests.exceptions.HTTPError as e:
124
+ error_detail = f"Server responded with status {e.response.status_code}."
125
+ try:
126
+ error_json = e.response.json()
127
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
128
+ except requests.exceptions.JSONDecodeError:
129
+ error_detail += f" Response: {e.response.text[:500]}"
130
+ status_message = f"Submission Failed: {error_detail}"
131
+ print(status_message)
132
+ results_df = pd.DataFrame(results_log)
133
+ return status_message, results_df
134
+ except requests.exceptions.Timeout:
135
+ status_message = "Submission Failed: The request timed out."
136
+ print(status_message)
137
+ results_df = pd.DataFrame(results_log)
138
+ return status_message, results_df
139
+ except requests.exceptions.RequestException as e:
140
+ status_message = f"Submission Failed: Network error - {e}"
141
+ print(status_message)
142
+ results_df = pd.DataFrame(results_log)
143
+ return status_message, results_df
144
+ except Exception as e:
145
+ status_message = f"An unexpected error occurred during submission: {e}"
146
+ print(status_message)
147
+ results_df = pd.DataFrame(results_log)
148
+ return status_message, results_df
149
+
150
+
151
+ # --- Build Gradio Interface using Blocks ---
152
+ with gr.Blocks() as demo:
153
+ gr.Markdown("# Basic Agent Evaluation Runner")
154
+ gr.Markdown(
155
+ """
156
+ **Instructions:**
157
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
158
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
159
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
160
+ ---
161
+ **Disclaimers:**
162
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
163
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
164
+ """
165
+ )
166
+
167
+ gr.LoginButton()
168
+
169
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
170
+
171
+ status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
172
+ # Removed max_rows=10 from DataFrame constructor
173
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
174
+
175
+ run_button.click(
176
+ fn=run_and_submit_all,
177
+ outputs=[status_output, results_table]
178
+ )
179
 
180
  if __name__ == "__main__":
181
+ print("\n" + "-"*30 + " App Starting " + "-"*30)
182
+ # Check for SPACE_HOST and SPACE_ID at startup for information
183
+ space_host_startup = os.getenv("SPACE_HOST")
184
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
185
+
186
+ if space_host_startup:
187
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
188
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
189
+ else:
190
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
191
 
192
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
193
+ print(f"✅ SPACE_ID found: {space_id_startup}")
194
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
195
+ print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
196
+ else:
197
+ print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
198
 
199
+ print("-"*(60 + len(" App Starting ")) + "\n")
 
200
 
201
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
202
+ demo.launch(debug=True, share=False)