mnab commited on
Commit
f499a2d
·
verified ·
1 Parent(s): 81917a3

Upload 2 files

Browse files
Files changed (2) hide show
  1. agent.py +161 -0
  2. app.py +227 -196
agent.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.schema import HumanMessage, AIMessage, SystemMessage
2
+ from langchain_openai import ChatOpenAI
3
+ from langchain_core.messages import AnyMessage, SystemMessage
4
+ from langchain_core.tools import tool
5
+ from langchain_community.document_loaders import WikipediaLoader
6
+ from langchain_community.document_loaders import ArxivLoader
7
+
8
+ # from langchain_community.tools.tavily_search import TavilySearchResults
9
+ from langchain.tools.retriever import create_retriever_tool
10
+
11
+ from langgraph.graph.message import add_messages
12
+ from langgraph.graph import START, StateGraph, MessagesState, END
13
+ from langgraph.prebuilt import tools_condition, ToolNode
14
+
15
+ import os
16
+ from dotenv import load_dotenv
17
+ from typing import TypedDict, Annotated, Optional
18
+ from langchain_community.tools import DuckDuckGoSearchResults
19
+
20
+ from langchain_huggingface import (
21
+ ChatHuggingFace,
22
+ HuggingFaceEndpoint,
23
+ HuggingFaceEmbeddings,
24
+ )
25
+
26
+
27
+ load_dotenv()
28
+
29
+ embddings = HuggingFaceEmbeddings(
30
+ model_name="sentence-transformers/all-mpnet-base-v2",
31
+ )
32
+
33
+
34
+ # Initialize the DuckDuckGo search tool
35
+ search_tool = DuckDuckGoSearchResults()
36
+
37
+
38
+ @tool
39
+ def wiki_search(query: str) -> str:
40
+ """Search Wikipedia for a query and return maximum 2 results.
41
+
42
+ Args:
43
+ query: The search query."""
44
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
45
+ formatted_search_docs = "\n\n---\n\n".join(
46
+ [
47
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
48
+ for doc in search_docs
49
+ ]
50
+ )
51
+ return {"wiki_results": formatted_search_docs}
52
+
53
+
54
+ @tool
55
+ def web_search(query: str) -> str:
56
+ """Search Tavily for a query and return maximum 3 results.
57
+
58
+ Args:
59
+ query: The search query."""
60
+ search_docs = TavilySearchResults(max_results=3).invoke(query=query)
61
+ formatted_search_docs = "\n\n---\n\n".join(
62
+ [
63
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
64
+ for doc in search_docs
65
+ ]
66
+ )
67
+ return {"web_results": formatted_search_docs}
68
+
69
+
70
+ @tool
71
+ def arvix_search(query: str) -> str:
72
+ """Search Arxiv for a query and return maximum 3 result.
73
+
74
+ Args:
75
+ query: The search query."""
76
+ search_docs = ArxivLoader(query=query, load_max_docs=3).load()
77
+ formatted_search_docs = "\n\n---\n\n".join(
78
+ [
79
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content[:1000]}\n</Document>'
80
+ for doc in search_docs
81
+ ]
82
+ )
83
+ return {"arvix_results": formatted_search_docs}
84
+
85
+
86
+ # Load LLM model
87
+ # llm = ChatOpenAI(
88
+ # model="gpt-4o",
89
+ # base_url="https://models.inference.ai.azure.com",
90
+ # api_key=os.environ["GITHUB_TOKEN"],
91
+ # temperature=0.2,
92
+ # max_tokens=4096,
93
+ # )
94
+ llm = ChatHuggingFace(
95
+ llm=HuggingFaceEndpoint(
96
+ repo_id="microsoft/Phi-3-mini-4k-instruct",
97
+ temperature=0,
98
+ # huggingfacehub_api_token=os.environ["HUGGINGFACEHUB_API_TOKEN"],
99
+ ),
100
+ verbose=True,
101
+ )
102
+
103
+ tools = [
104
+ arvix_search,
105
+ wiki_search,
106
+ # web_search,
107
+ search_tool,
108
+ ]
109
+ # Bind the tools to the LLM
110
+ model_with_tools = llm.bind_tools(tools)
111
+ tool_node = ToolNode(tools)
112
+
113
+
114
+ def build_agent_workflow():
115
+
116
+ def should_continue(state: MessagesState):
117
+ messages = state["messages"]
118
+ last_message = messages[-1]
119
+ if last_message.tool_calls:
120
+ return "tools"
121
+ return END
122
+
123
+ def call_model(state: MessagesState):
124
+ system_message = SystemMessage(
125
+ content=f"""
126
+ You are a helpful assistant tasked with answering questions using a set of tools.
127
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
128
+ FINAL ANSWER: [YOUR FINAL ANSWER].
129
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
130
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer. """
131
+ )
132
+
133
+ messages = [system_message] + state["messages"]
134
+ print("Messages to LLM:", messages)
135
+
136
+ response = model_with_tools.invoke(messages)
137
+ return {"messages": [response]}
138
+
139
+ # Define the state graph
140
+ workflow = StateGraph(MessagesState)
141
+ workflow.add_node("agent", call_model)
142
+ workflow.add_node("tools", tool_node)
143
+
144
+ workflow.add_edge(START, "agent")
145
+ workflow.add_conditional_edges("agent", should_continue, ["tools", END])
146
+ workflow.add_edge("tools", "agent")
147
+
148
+ app = workflow.compile()
149
+
150
+ return app
151
+
152
+
153
+ if __name__ == "__main__":
154
+ question = "Who nominated the only Featured Article on English Wikipedia about a dinosaur that was promoted in November 2016?"
155
+ # Build the graph
156
+ graph = build_agent_workflow()
157
+ # Run the graph
158
+ messages = [HumanMessage(content=question)]
159
+ messages = graph.invoke({"messages": messages})
160
+ for m in messages["messages"]:
161
+ m.pretty_print()
app.py CHANGED
@@ -1,196 +1,227 @@
1
- import os
2
- import gradio as gr
3
- import requests
4
- import inspect
5
- import pandas as pd
6
-
7
- # (Keep Constants as is)
8
- # --- Constants ---
9
- DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
-
11
- # --- Basic Agent Definition ---
12
- # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
- class BasicAgent:
14
- def __init__(self):
15
- print("BasicAgent initialized.")
16
- def __call__(self, question: str) -> str:
17
- print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
21
-
22
- def run_and_submit_all( profile: gr.OAuthProfile | None):
23
- """
24
- Fetches all questions, runs the BasicAgent on them, submits all answers,
25
- and displays the results.
26
- """
27
- # --- Determine HF Space Runtime URL and Repo URL ---
28
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
29
-
30
- if profile:
31
- username= f"{profile.username}"
32
- print(f"User logged in: {username}")
33
- else:
34
- print("User not logged in.")
35
- return "Please Login to Hugging Face with the button.", None
36
-
37
- api_url = DEFAULT_API_URL
38
- questions_url = f"{api_url}/questions"
39
- submit_url = f"{api_url}/submit"
40
-
41
- # 1. Instantiate Agent ( modify this part to create your agent)
42
- try:
43
- agent = BasicAgent()
44
- except Exception as e:
45
- print(f"Error instantiating agent: {e}")
46
- return f"Error initializing agent: {e}", None
47
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
48
- agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
49
- print(agent_code)
50
-
51
- # 2. Fetch Questions
52
- print(f"Fetching questions from: {questions_url}")
53
- try:
54
- response = requests.get(questions_url, timeout=15)
55
- response.raise_for_status()
56
- questions_data = response.json()
57
- if not questions_data:
58
- print("Fetched questions list is empty.")
59
- return "Fetched questions list is empty or invalid format.", None
60
- print(f"Fetched {len(questions_data)} questions.")
61
- except requests.exceptions.RequestException as e:
62
- print(f"Error fetching questions: {e}")
63
- return f"Error fetching questions: {e}", None
64
- except requests.exceptions.JSONDecodeError as e:
65
- print(f"Error decoding JSON response from questions endpoint: {e}")
66
- print(f"Response text: {response.text[:500]}")
67
- return f"Error decoding server response for questions: {e}", None
68
- except Exception as e:
69
- print(f"An unexpected error occurred fetching questions: {e}")
70
- return f"An unexpected error occurred fetching questions: {e}", None
71
-
72
- # 3. Run your Agent
73
- results_log = []
74
- answers_payload = []
75
- print(f"Running agent on {len(questions_data)} questions...")
76
- for item in questions_data:
77
- task_id = item.get("task_id")
78
- question_text = item.get("question")
79
- if not task_id or question_text is None:
80
- print(f"Skipping item with missing task_id or question: {item}")
81
- continue
82
- try:
83
- submitted_answer = agent(question_text)
84
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
85
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
86
- except Exception as e:
87
- print(f"Error running agent on task {task_id}: {e}")
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
89
-
90
- if not answers_payload:
91
- print("Agent did not produce any answers to submit.")
92
- return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
93
-
94
- # 4. Prepare Submission
95
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
96
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
97
- print(status_update)
98
-
99
- # 5. Submit
100
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
101
- try:
102
- response = requests.post(submit_url, json=submission_data, timeout=60)
103
- response.raise_for_status()
104
- result_data = response.json()
105
- final_status = (
106
- f"Submission Successful!\n"
107
- f"User: {result_data.get('username')}\n"
108
- f"Overall Score: {result_data.get('score', 'N/A')}% "
109
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
110
- f"Message: {result_data.get('message', 'No message received.')}"
111
- )
112
- print("Submission successful.")
113
- results_df = pd.DataFrame(results_log)
114
- return final_status, results_df
115
- except requests.exceptions.HTTPError as e:
116
- error_detail = f"Server responded with status {e.response.status_code}."
117
- try:
118
- error_json = e.response.json()
119
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
120
- except requests.exceptions.JSONDecodeError:
121
- error_detail += f" Response: {e.response.text[:500]}"
122
- status_message = f"Submission Failed: {error_detail}"
123
- print(status_message)
124
- results_df = pd.DataFrame(results_log)
125
- return status_message, results_df
126
- except requests.exceptions.Timeout:
127
- status_message = "Submission Failed: The request timed out."
128
- print(status_message)
129
- results_df = pd.DataFrame(results_log)
130
- return status_message, results_df
131
- except requests.exceptions.RequestException as e:
132
- status_message = f"Submission Failed: Network error - {e}"
133
- print(status_message)
134
- results_df = pd.DataFrame(results_log)
135
- return status_message, results_df
136
- except Exception as e:
137
- status_message = f"An unexpected error occurred during submission: {e}"
138
- print(status_message)
139
- results_df = pd.DataFrame(results_log)
140
- return status_message, results_df
141
-
142
-
143
- # --- Build Gradio Interface using Blocks ---
144
- with gr.Blocks() as demo:
145
- gr.Markdown("# Basic Agent Evaluation Runner")
146
- gr.Markdown(
147
- """
148
- **Instructions:**
149
-
150
- 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
151
- 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
152
- 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
153
-
154
- ---
155
- **Disclaimers:**
156
- Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
157
- This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
158
- """
159
- )
160
-
161
- gr.LoginButton()
162
-
163
- run_button = gr.Button("Run Evaluation & Submit All Answers")
164
-
165
- status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False)
166
- # Removed max_rows=10 from DataFrame constructor
167
- results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
168
-
169
- run_button.click(
170
- fn=run_and_submit_all,
171
- outputs=[status_output, results_table]
172
- )
173
-
174
- if __name__ == "__main__":
175
- print("\n" + "-"*30 + " App Starting " + "-"*30)
176
- # Check for SPACE_HOST and SPACE_ID at startup for information
177
- space_host_startup = os.getenv("SPACE_HOST")
178
- space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
179
-
180
- if space_host_startup:
181
- print(f"✅ SPACE_HOST found: {space_host_startup}")
182
- print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
183
- else:
184
- print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
185
-
186
- if space_id_startup: # Print repo URLs if SPACE_ID is found
187
- print(f"✅ SPACE_ID found: {space_id_startup}")
188
- print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
189
- print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
190
- else:
191
- print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
192
-
193
- print("-"*(60 + len(" App Starting ")) + "\n")
194
-
195
- print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- demo.launch(debug=True, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import inspect
5
+ import pandas as pd
6
+ from agent import build_agent_workflow
7
+ from langchain_core.messages import HumanMessage
8
+
9
+ # (Keep Constants as is)
10
+ # --- Constants ---
11
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
12
+
13
+
14
+ # --- Basic Agent Definition ---
15
+ # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
16
+ class BasicAgent:
17
+ def __init__(self):
18
+ print("BasicAgent initialized.")
19
+ self.workflow = build_agent_workflow()
20
+
21
+ def __call__(self, question: str) -> str:
22
+ print(f"Agent received question (first 50 chars): {question[:50]}...")
23
+ messages = [HumanMessage(content=question)]
24
+ messages = self.graph.invoke({"messages": messages})
25
+ answer = messages["messages"][-1].content
26
+ return answer[14:]
27
+ # fixed_answer = "This is a default answer."
28
+ # print(f"Agent returning fixed answer: {fixed_answer}")
29
+ # return fixed_answer
30
+
31
+
32
+ def run_and_submit_all(profile: gr.OAuthProfile | None):
33
+ """
34
+ Fetches all questions, runs the BasicAgent on them, submits all answers,
35
+ and displays the results.
36
+ """
37
+ # --- Determine HF Space Runtime URL and Repo URL ---
38
+ space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
39
+
40
+ if profile:
41
+ username = f"{profile.username}"
42
+ print(f"User logged in: {username}")
43
+ else:
44
+ print("User not logged in.")
45
+ return "Please Login to Hugging Face with the button.", None
46
+
47
+ api_url = DEFAULT_API_URL
48
+ questions_url = f"{api_url}/questions"
49
+ submit_url = f"{api_url}/submit"
50
+
51
+ # 1. Instantiate Agent ( modify this part to create your agent)
52
+ try:
53
+ agent = BasicAgent()
54
+ except Exception as e:
55
+ print(f"Error instantiating agent: {e}")
56
+ return f"Error initializing agent: {e}", None
57
+ # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
58
+ agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
59
+ print(agent_code)
60
+
61
+ # 2. Fetch Questions
62
+ print(f"Fetching questions from: {questions_url}")
63
+ try:
64
+ response = requests.get(questions_url, timeout=15)
65
+ response.raise_for_status()
66
+ questions_data = response.json()
67
+ if not questions_data:
68
+ print("Fetched questions list is empty.")
69
+ return "Fetched questions list is empty or invalid format.", None
70
+ print(f"Fetched {len(questions_data)} questions.")
71
+ except requests.exceptions.RequestException as e:
72
+ print(f"Error fetching questions: {e}")
73
+ return f"Error fetching questions: {e}", None
74
+ except requests.exceptions.JSONDecodeError as e:
75
+ print(f"Error decoding JSON response from questions endpoint: {e}")
76
+ print(f"Response text: {response.text[:500]}")
77
+ return f"Error decoding server response for questions: {e}", None
78
+ except Exception as e:
79
+ print(f"An unexpected error occurred fetching questions: {e}")
80
+ return f"An unexpected error occurred fetching questions: {e}", None
81
+
82
+ # 3. Run your Agent
83
+ results_log = []
84
+ answers_payload = []
85
+ print(f"Running agent on {len(questions_data)} questions...")
86
+ for item in questions_data:
87
+ task_id = item.get("task_id")
88
+ question_text = item.get("question")
89
+ if not task_id or question_text is None:
90
+ print(f"Skipping item with missing task_id or question: {item}")
91
+ continue
92
+ try:
93
+ submitted_answer = agent(question_text)
94
+ answers_payload.append(
95
+ {"task_id": task_id, "submitted_answer": submitted_answer}
96
+ )
97
+ results_log.append(
98
+ {
99
+ "Task ID": task_id,
100
+ "Question": question_text,
101
+ "Submitted Answer": submitted_answer,
102
+ }
103
+ )
104
+ except Exception as e:
105
+ print(f"Error running agent on task {task_id}: {e}")
106
+ results_log.append(
107
+ {
108
+ "Task ID": task_id,
109
+ "Question": question_text,
110
+ "Submitted Answer": f"AGENT ERROR: {e}",
111
+ }
112
+ )
113
+
114
+ if not answers_payload:
115
+ print("Agent did not produce any answers to submit.")
116
+ return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
117
+
118
+ # 4. Prepare Submission
119
+ submission_data = {
120
+ "username": username.strip(),
121
+ "agent_code": agent_code,
122
+ "answers": answers_payload,
123
+ }
124
+ status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
125
+ print(status_update)
126
+
127
+ # 5. Submit
128
+ print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
129
+ try:
130
+ response = requests.post(submit_url, json=submission_data, timeout=60)
131
+ response.raise_for_status()
132
+ result_data = response.json()
133
+ final_status = (
134
+ f"Submission Successful!\n"
135
+ f"User: {result_data.get('username')}\n"
136
+ f"Overall Score: {result_data.get('score', 'N/A')}% "
137
+ f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
138
+ f"Message: {result_data.get('message', 'No message received.')}"
139
+ )
140
+ print("Submission successful.")
141
+ results_df = pd.DataFrame(results_log)
142
+ return final_status, results_df
143
+ except requests.exceptions.HTTPError as e:
144
+ error_detail = f"Server responded with status {e.response.status_code}."
145
+ try:
146
+ error_json = e.response.json()
147
+ error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
148
+ except requests.exceptions.JSONDecodeError:
149
+ error_detail += f" Response: {e.response.text[:500]}"
150
+ status_message = f"Submission Failed: {error_detail}"
151
+ print(status_message)
152
+ results_df = pd.DataFrame(results_log)
153
+ return status_message, results_df
154
+ except requests.exceptions.Timeout:
155
+ status_message = "Submission Failed: The request timed out."
156
+ print(status_message)
157
+ results_df = pd.DataFrame(results_log)
158
+ return status_message, results_df
159
+ except requests.exceptions.RequestException as e:
160
+ status_message = f"Submission Failed: Network error - {e}"
161
+ print(status_message)
162
+ results_df = pd.DataFrame(results_log)
163
+ return status_message, results_df
164
+ except Exception as e:
165
+ status_message = f"An unexpected error occurred during submission: {e}"
166
+ print(status_message)
167
+ results_df = pd.DataFrame(results_log)
168
+ return status_message, results_df
169
+
170
+
171
+ # --- Build Gradio Interface using Blocks ---
172
+ with gr.Blocks() as demo:
173
+ gr.Markdown("# Basic Agent Evaluation Runner")
174
+ gr.Markdown(
175
+ """
176
+ **Instructions:**
177
+
178
+ 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ...
179
+ 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission.
180
+ 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score.
181
+
182
+ ---
183
+ **Disclaimers:**
184
+ Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions).
185
+ This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
186
+ """
187
+ )
188
+
189
+ gr.LoginButton()
190
+
191
+ run_button = gr.Button("Run Evaluation & Submit All Answers")
192
+
193
+ status_output = gr.Textbox(
194
+ label="Run Status / Submission Result", lines=5, interactive=False
195
+ )
196
+ # Removed max_rows=10 from DataFrame constructor
197
+ results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True)
198
+
199
+ run_button.click(fn=run_and_submit_all, outputs=[status_output, results_table])
200
+
201
+ if __name__ == "__main__":
202
+ print("\n" + "-" * 30 + " App Starting " + "-" * 30)
203
+ # Check for SPACE_HOST and SPACE_ID at startup for information
204
+ space_host_startup = os.getenv("SPACE_HOST")
205
+ space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
206
+
207
+ if space_host_startup:
208
+ print(f"✅ SPACE_HOST found: {space_host_startup}")
209
+ print(f" Runtime URL should be: https://{space_host_startup}.hf.space")
210
+ else:
211
+ print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
212
+
213
+ if space_id_startup: # Print repo URLs if SPACE_ID is found
214
+ print(f"✅ SPACE_ID found: {space_id_startup}")
215
+ print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
216
+ print(
217
+ f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main"
218
+ )
219
+ else:
220
+ print(
221
+ "ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined."
222
+ )
223
+
224
+ print("-" * (60 + len(" App Starting ")) + "\n")
225
+
226
+ print("Launching Gradio Interface for Basic Agent Evaluation...")
227
+ demo.launch(debug=True, share=False)