Attempt #3
Browse files
app.py
CHANGED
@@ -11,36 +11,87 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
11 |
|
12 |
# --- Basic Agent Definition ---
|
13 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
|
|
|
14 |
class BasicAgent:
|
15 |
def __init__(self):
|
16 |
print("BasicAgent initialized.")
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
print("
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
def break_down_question(self, question: str) -> list:
|
24 |
"""
|
25 |
Use an LLM to break down a complex question into key search terms or sub-questions.
|
26 |
-
|
27 |
Args:
|
28 |
question (str): The original question
|
29 |
-
|
30 |
Returns:
|
31 |
list: A list of key search terms or sub-questions
|
32 |
"""
|
33 |
try:
|
34 |
print(f"Breaking down question with LLM: {question[:50]}...")
|
35 |
-
|
36 |
# Create a prompt that asks the LLM to break down the question
|
37 |
prompt = f"""
|
38 |
Please break down this question into 2-3 key search queries that would help find information to answer it.
|
39 |
Return ONLY the search queries, one per line, with no additional text or explanations.
|
40 |
-
|
41 |
Question: {question}
|
42 |
"""
|
43 |
-
|
44 |
# Call the Hugging Face model to get the breakdown
|
45 |
response = self.hf_client.text_generation(
|
46 |
prompt=prompt,
|
@@ -49,30 +100,30 @@ class BasicAgent:
|
|
49 |
repetition_penalty=1.1,
|
50 |
do_sample=True
|
51 |
)
|
52 |
-
|
53 |
# Extract the search terms from the response
|
54 |
search_terms = response.strip().split('\n')
|
55 |
search_terms = [term.strip() for term in search_terms if term.strip()]
|
56 |
-
|
57 |
# Limit to 3 search terms maximum
|
58 |
search_terms = search_terms[:3]
|
59 |
-
|
60 |
print(f"Question broken down into {len(search_terms)} search terms: {search_terms}")
|
61 |
return search_terms
|
62 |
-
|
63 |
except Exception as e:
|
64 |
print(f"Error breaking down question: {e}")
|
65 |
# If there's an error, return the original question as a fallback
|
66 |
return [question]
|
67 |
-
|
68 |
def search_internet(self, query: str) -> str:
|
69 |
"""
|
70 |
Search the internet for information using Wikipedia's API.
|
71 |
This is a simple implementation that returns search results as text.
|
72 |
-
|
73 |
Args:
|
74 |
query (str): The search query
|
75 |
-
|
76 |
Returns:
|
77 |
str: Search results as text
|
78 |
"""
|
@@ -82,88 +133,88 @@ class BasicAgent:
|
|
82 |
headers = {
|
83 |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
84 |
}
|
85 |
-
|
86 |
# Step 1: Search for relevant articles
|
87 |
search_url = f"https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={query}&format=json"
|
88 |
search_response = requests.get(search_url, headers=headers, timeout=10)
|
89 |
search_response.raise_for_status()
|
90 |
search_data = search_response.json()
|
91 |
-
|
92 |
# Check if we found any search results
|
93 |
if 'query' not in search_data or 'search' not in search_data['query'] or not search_data['query']['search']:
|
94 |
return "No relevant information found."
|
95 |
-
|
96 |
# Get the title of the first (most relevant) result
|
97 |
first_result = search_data['query']['search'][0]
|
98 |
page_title = first_result['title']
|
99 |
-
|
100 |
# Step 2: Fetch the content of the most relevant article
|
101 |
content_url = f"https://en.wikipedia.org/w/api.php?action=query&prop=extracts&exintro=1&explaintext=1&titles={page_title}&format=json"
|
102 |
content_response = requests.get(content_url, headers=headers, timeout=10)
|
103 |
content_response.raise_for_status()
|
104 |
content_data = content_response.json()
|
105 |
-
|
106 |
# Extract the page content
|
107 |
pages = content_data['query']['pages']
|
108 |
page_id = list(pages.keys())[0]
|
109 |
-
|
110 |
if 'extract' in pages[page_id]:
|
111 |
extract = pages[page_id]['extract']
|
112 |
# Limit extract length to avoid very long responses
|
113 |
if len(extract) > 1000:
|
114 |
extract = extract[:1000] + "..."
|
115 |
-
|
116 |
result = f"Wikipedia article: {page_title}\n\n{extract}"
|
117 |
-
|
118 |
# Also get a few more related article titles
|
119 |
related_titles = []
|
120 |
for item in search_data['query']['search'][1:4]: # Get next 3 results
|
121 |
related_titles.append(item['title'])
|
122 |
-
|
123 |
if related_titles:
|
124 |
result += "\n\nRelated topics:\n"
|
125 |
for title in related_titles:
|
126 |
result += f"- {title}\n"
|
127 |
-
|
128 |
return result
|
129 |
else:
|
130 |
return "Found a relevant page, but couldn't extract its content."
|
131 |
-
|
132 |
except Exception as e:
|
133 |
print(f"Error searching internet: {e}")
|
134 |
return f"Error performing internet search: {str(e)}"
|
135 |
-
|
136 |
def __call__(self, question: str) -> str:
|
137 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
138 |
-
|
139 |
# Use LLM to break down the question into key search terms
|
140 |
search_terms = self.break_down_question(question)
|
141 |
-
|
142 |
# Search for information using each search term
|
143 |
all_results = []
|
144 |
for term in search_terms:
|
145 |
result = self.search_internet(term)
|
146 |
if result and result != "No relevant information found." and not result.startswith("Error"):
|
147 |
all_results.append(result)
|
148 |
-
|
149 |
# Create a response based on collected search results
|
150 |
if all_results:
|
151 |
# Join the results with clear separation
|
152 |
combined_results = "\n\n--- Next Search Result ---\n\n".join(all_results)
|
153 |
-
|
154 |
# Use Hugging Face model to synthesize a coherent answer from the search results
|
155 |
try:
|
156 |
synthesis_prompt = f"""
|
157 |
Based on the following search results, please provide a comprehensive answer to this question:
|
158 |
-
|
159 |
Question: {question}
|
160 |
-
|
161 |
Search Results:
|
162 |
{combined_results}
|
163 |
-
|
164 |
Answer:
|
165 |
"""
|
166 |
-
|
167 |
# Call the Hugging Face model to synthesize an answer
|
168 |
response = self.hf_client.text_generation(
|
169 |
prompt=synthesis_prompt,
|
@@ -172,11 +223,11 @@ class BasicAgent:
|
|
172 |
repetition_penalty=1.05,
|
173 |
do_sample=True
|
174 |
)
|
175 |
-
|
176 |
answer = response.strip()
|
177 |
print("Agent returning synthesized answer from search results.")
|
178 |
return answer
|
179 |
-
|
180 |
except Exception as e:
|
181 |
print(f"Error synthesizing answer: {e}")
|
182 |
# Fallback to returning the raw search results
|
@@ -189,16 +240,17 @@ class BasicAgent:
|
|
189 |
print("Agent returning default answer as searches found no useful information.")
|
190 |
return answer
|
191 |
|
192 |
-
|
|
|
193 |
"""
|
194 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
195 |
and displays the results.
|
196 |
"""
|
197 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
198 |
-
space_id = os.getenv("SPACE_ID")
|
199 |
|
200 |
if profile:
|
201 |
-
username= f"{profile.username}"
|
202 |
print(f"User logged in: {username}")
|
203 |
else:
|
204 |
print("User not logged in.")
|
@@ -225,16 +277,16 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
225 |
response.raise_for_status()
|
226 |
questions_data = response.json()
|
227 |
if not questions_data:
|
228 |
-
|
229 |
-
|
230 |
print(f"Fetched {len(questions_data)} questions.")
|
231 |
except requests.exceptions.RequestException as e:
|
232 |
print(f"Error fetching questions: {e}")
|
233 |
return f"Error fetching questions: {e}", None
|
234 |
except requests.exceptions.JSONDecodeError as e:
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
except Exception as e:
|
239 |
print(f"An unexpected error occurred fetching questions: {e}")
|
240 |
return f"An unexpected error occurred fetching questions: {e}", None
|
@@ -254,14 +306,14 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
254 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
255 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
256 |
except Exception as e:
|
257 |
-
|
258 |
-
|
259 |
|
260 |
if not answers_payload:
|
261 |
print("Agent did not produce any answers to submit.")
|
262 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
263 |
|
264 |
-
# 4. Prepare Submission
|
265 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
266 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
267 |
print(status_update)
|
@@ -312,7 +364,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
312 |
|
313 |
# --- Build Gradio Interface using Blocks ---
|
314 |
with gr.Blocks() as demo:
|
315 |
-
gr.Markdown("# Basic Agent Evaluation Runner (Attempt #
|
316 |
gr.Markdown(
|
317 |
"""
|
318 |
**Instructions:**
|
@@ -342,10 +394,10 @@ with gr.Blocks() as demo:
|
|
342 |
)
|
343 |
|
344 |
if __name__ == "__main__":
|
345 |
-
print("\n" + "-"*30 + " App Starting " + "-"*30)
|
346 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
347 |
space_host_startup = os.getenv("SPACE_HOST")
|
348 |
-
space_id_startup = os.getenv("SPACE_ID")
|
349 |
|
350 |
if space_host_startup:
|
351 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
@@ -353,14 +405,14 @@ if __name__ == "__main__":
|
|
353 |
else:
|
354 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
355 |
|
356 |
-
if space_id_startup:
|
357 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
358 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
359 |
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
360 |
else:
|
361 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
362 |
|
363 |
-
print("-"*(60 + len(" App Starting ")) + "\n")
|
364 |
|
365 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
366 |
-
demo.launch(debug=True, share=False)
|
|
|
11 |
|
12 |
# --- Basic Agent Definition ---
|
13 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
14 |
+
|
15 |
+
|
16 |
class BasicAgent:
|
17 |
def __init__(self):
|
18 |
print("BasicAgent initialized.")
|
19 |
+
# Try multiple models in order of preference with better error handling
|
20 |
+
self.model_name = "Qwen/Qwen1.5-7B-Chat" # Default model
|
21 |
+
self.hf_token = os.getenv("HF_TOKEN") # Get token from environment if available
|
22 |
+
|
23 |
+
print(f"aaa, {os.getenv("HF_TOKEN")}")
|
24 |
+
|
25 |
+
# List of fallback models (free models that don't require authentication)
|
26 |
+
self.fallback_models = [
|
27 |
+
"google/flan-t5-small", # Small but doesn't require authentication
|
28 |
+
"facebook/opt-125m", # Another small fallback option
|
29 |
+
"distilbert-base-uncased" # Even more basic fallback
|
30 |
+
]
|
31 |
+
|
32 |
+
try:
|
33 |
+
print(f"Attempting to initialize with model: {self.model_name}")
|
34 |
+
self.hf_client = InferenceClient(
|
35 |
+
model=self.model_name,
|
36 |
+
token=self.hf_token
|
37 |
+
)
|
38 |
+
# Test the client with a simple prompt to verify it works
|
39 |
+
test_response = self.hf_client.text_generation(
|
40 |
+
prompt="Hello, this is a test.",
|
41 |
+
max_new_tokens=10
|
42 |
+
)
|
43 |
+
print(f"Model initialized successfully: {self.model_name}")
|
44 |
+
|
45 |
+
except Exception as e:
|
46 |
+
print(f"Error initializing primary model ({self.model_name}): {e}")
|
47 |
+
self.hf_client = None
|
48 |
+
|
49 |
+
# Try fallback models
|
50 |
+
for fallback_model in self.fallback_models:
|
51 |
+
try:
|
52 |
+
print(f"Attempting fallback model: {fallback_model}")
|
53 |
+
self.hf_client = InferenceClient(
|
54 |
+
model=fallback_model,
|
55 |
+
token=self.hf_token
|
56 |
+
)
|
57 |
+
# Quick test to verify the model works
|
58 |
+
test_response = self.hf_client.text_generation(
|
59 |
+
prompt="Hello, this is a test.",
|
60 |
+
max_new_tokens=5
|
61 |
+
)
|
62 |
+
print(f"Successfully initialized fallback model: {fallback_model}")
|
63 |
+
self.model_name = fallback_model
|
64 |
+
break
|
65 |
+
except Exception as fallback_error:
|
66 |
+
print(f"Fallback model failed ({fallback_model}): {fallback_error}")
|
67 |
+
self.hf_client = None
|
68 |
+
|
69 |
+
# If all models fail, we'll use a rule-based response generator
|
70 |
+
if self.hf_client is None:
|
71 |
+
print("WARNING: All models failed. Using rule-based fallback for responses.")
|
72 |
+
self.model_name = "rule-based-fallback"
|
73 |
+
|
74 |
def break_down_question(self, question: str) -> list:
|
75 |
"""
|
76 |
Use an LLM to break down a complex question into key search terms or sub-questions.
|
77 |
+
|
78 |
Args:
|
79 |
question (str): The original question
|
80 |
+
|
81 |
Returns:
|
82 |
list: A list of key search terms or sub-questions
|
83 |
"""
|
84 |
try:
|
85 |
print(f"Breaking down question with LLM: {question[:50]}...")
|
86 |
+
|
87 |
# Create a prompt that asks the LLM to break down the question
|
88 |
prompt = f"""
|
89 |
Please break down this question into 2-3 key search queries that would help find information to answer it.
|
90 |
Return ONLY the search queries, one per line, with no additional text or explanations.
|
91 |
+
|
92 |
Question: {question}
|
93 |
"""
|
94 |
+
|
95 |
# Call the Hugging Face model to get the breakdown
|
96 |
response = self.hf_client.text_generation(
|
97 |
prompt=prompt,
|
|
|
100 |
repetition_penalty=1.1,
|
101 |
do_sample=True
|
102 |
)
|
103 |
+
|
104 |
# Extract the search terms from the response
|
105 |
search_terms = response.strip().split('\n')
|
106 |
search_terms = [term.strip() for term in search_terms if term.strip()]
|
107 |
+
|
108 |
# Limit to 3 search terms maximum
|
109 |
search_terms = search_terms[:3]
|
110 |
+
|
111 |
print(f"Question broken down into {len(search_terms)} search terms: {search_terms}")
|
112 |
return search_terms
|
113 |
+
|
114 |
except Exception as e:
|
115 |
print(f"Error breaking down question: {e}")
|
116 |
# If there's an error, return the original question as a fallback
|
117 |
return [question]
|
118 |
+
|
119 |
def search_internet(self, query: str) -> str:
|
120 |
"""
|
121 |
Search the internet for information using Wikipedia's API.
|
122 |
This is a simple implementation that returns search results as text.
|
123 |
+
|
124 |
Args:
|
125 |
query (str): The search query
|
126 |
+
|
127 |
Returns:
|
128 |
str: Search results as text
|
129 |
"""
|
|
|
133 |
headers = {
|
134 |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
135 |
}
|
136 |
+
|
137 |
# Step 1: Search for relevant articles
|
138 |
search_url = f"https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={query}&format=json"
|
139 |
search_response = requests.get(search_url, headers=headers, timeout=10)
|
140 |
search_response.raise_for_status()
|
141 |
search_data = search_response.json()
|
142 |
+
|
143 |
# Check if we found any search results
|
144 |
if 'query' not in search_data or 'search' not in search_data['query'] or not search_data['query']['search']:
|
145 |
return "No relevant information found."
|
146 |
+
|
147 |
# Get the title of the first (most relevant) result
|
148 |
first_result = search_data['query']['search'][0]
|
149 |
page_title = first_result['title']
|
150 |
+
|
151 |
# Step 2: Fetch the content of the most relevant article
|
152 |
content_url = f"https://en.wikipedia.org/w/api.php?action=query&prop=extracts&exintro=1&explaintext=1&titles={page_title}&format=json"
|
153 |
content_response = requests.get(content_url, headers=headers, timeout=10)
|
154 |
content_response.raise_for_status()
|
155 |
content_data = content_response.json()
|
156 |
+
|
157 |
# Extract the page content
|
158 |
pages = content_data['query']['pages']
|
159 |
page_id = list(pages.keys())[0]
|
160 |
+
|
161 |
if 'extract' in pages[page_id]:
|
162 |
extract = pages[page_id]['extract']
|
163 |
# Limit extract length to avoid very long responses
|
164 |
if len(extract) > 1000:
|
165 |
extract = extract[:1000] + "..."
|
166 |
+
|
167 |
result = f"Wikipedia article: {page_title}\n\n{extract}"
|
168 |
+
|
169 |
# Also get a few more related article titles
|
170 |
related_titles = []
|
171 |
for item in search_data['query']['search'][1:4]: # Get next 3 results
|
172 |
related_titles.append(item['title'])
|
173 |
+
|
174 |
if related_titles:
|
175 |
result += "\n\nRelated topics:\n"
|
176 |
for title in related_titles:
|
177 |
result += f"- {title}\n"
|
178 |
+
|
179 |
return result
|
180 |
else:
|
181 |
return "Found a relevant page, but couldn't extract its content."
|
182 |
+
|
183 |
except Exception as e:
|
184 |
print(f"Error searching internet: {e}")
|
185 |
return f"Error performing internet search: {str(e)}"
|
186 |
+
|
187 |
def __call__(self, question: str) -> str:
|
188 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
189 |
+
|
190 |
# Use LLM to break down the question into key search terms
|
191 |
search_terms = self.break_down_question(question)
|
192 |
+
|
193 |
# Search for information using each search term
|
194 |
all_results = []
|
195 |
for term in search_terms:
|
196 |
result = self.search_internet(term)
|
197 |
if result and result != "No relevant information found." and not result.startswith("Error"):
|
198 |
all_results.append(result)
|
199 |
+
|
200 |
# Create a response based on collected search results
|
201 |
if all_results:
|
202 |
# Join the results with clear separation
|
203 |
combined_results = "\n\n--- Next Search Result ---\n\n".join(all_results)
|
204 |
+
|
205 |
# Use Hugging Face model to synthesize a coherent answer from the search results
|
206 |
try:
|
207 |
synthesis_prompt = f"""
|
208 |
Based on the following search results, please provide a comprehensive answer to this question:
|
209 |
+
|
210 |
Question: {question}
|
211 |
+
|
212 |
Search Results:
|
213 |
{combined_results}
|
214 |
+
|
215 |
Answer:
|
216 |
"""
|
217 |
+
|
218 |
# Call the Hugging Face model to synthesize an answer
|
219 |
response = self.hf_client.text_generation(
|
220 |
prompt=synthesis_prompt,
|
|
|
223 |
repetition_penalty=1.05,
|
224 |
do_sample=True
|
225 |
)
|
226 |
+
|
227 |
answer = response.strip()
|
228 |
print("Agent returning synthesized answer from search results.")
|
229 |
return answer
|
230 |
+
|
231 |
except Exception as e:
|
232 |
print(f"Error synthesizing answer: {e}")
|
233 |
# Fallback to returning the raw search results
|
|
|
240 |
print("Agent returning default answer as searches found no useful information.")
|
241 |
return answer
|
242 |
|
243 |
+
|
244 |
+
def run_and_submit_all(profile: gr.OAuthProfile | None):
|
245 |
"""
|
246 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
247 |
and displays the results.
|
248 |
"""
|
249 |
# --- Determine HF Space Runtime URL and Repo URL ---
|
250 |
+
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
|
251 |
|
252 |
if profile:
|
253 |
+
username = f"{profile.username}"
|
254 |
print(f"User logged in: {username}")
|
255 |
else:
|
256 |
print("User not logged in.")
|
|
|
277 |
response.raise_for_status()
|
278 |
questions_data = response.json()
|
279 |
if not questions_data:
|
280 |
+
print("Fetched questions list is empty.")
|
281 |
+
return "Fetched questions list is empty or invalid format.", None
|
282 |
print(f"Fetched {len(questions_data)} questions.")
|
283 |
except requests.exceptions.RequestException as e:
|
284 |
print(f"Error fetching questions: {e}")
|
285 |
return f"Error fetching questions: {e}", None
|
286 |
except requests.exceptions.JSONDecodeError as e:
|
287 |
+
print(f"Error decoding JSON response from questions endpoint: {e}")
|
288 |
+
print(f"Response text: {response.text[:500]}")
|
289 |
+
return f"Error decoding server response for questions: {e}", None
|
290 |
except Exception as e:
|
291 |
print(f"An unexpected error occurred fetching questions: {e}")
|
292 |
return f"An unexpected error occurred fetching questions: {e}", None
|
|
|
306 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
307 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
308 |
except Exception as e:
|
309 |
+
print(f"Error running agent on task {task_id}: {e}")
|
310 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
311 |
|
312 |
if not answers_payload:
|
313 |
print("Agent did not produce any answers to submit.")
|
314 |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
|
315 |
|
316 |
+
# 4. Prepare Submission
|
317 |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
|
318 |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
|
319 |
print(status_update)
|
|
|
364 |
|
365 |
# --- Build Gradio Interface using Blocks ---
|
366 |
with gr.Blocks() as demo:
|
367 |
+
gr.Markdown("# Basic Agent Evaluation Runner (Attempt #3)")
|
368 |
gr.Markdown(
|
369 |
"""
|
370 |
**Instructions:**
|
|
|
394 |
)
|
395 |
|
396 |
if __name__ == "__main__":
|
397 |
+
print("\n" + "-" * 30 + " App Starting " + "-" * 30)
|
398 |
# Check for SPACE_HOST and SPACE_ID at startup for information
|
399 |
space_host_startup = os.getenv("SPACE_HOST")
|
400 |
+
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
|
401 |
|
402 |
if space_host_startup:
|
403 |
print(f"✅ SPACE_HOST found: {space_host_startup}")
|
|
|
405 |
else:
|
406 |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).")
|
407 |
|
408 |
+
if space_id_startup: # Print repo URLs if SPACE_ID is found
|
409 |
print(f"✅ SPACE_ID found: {space_id_startup}")
|
410 |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}")
|
411 |
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main")
|
412 |
else:
|
413 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
414 |
|
415 |
+
print("-" * (60 + len(" App Starting ")) + "\n")
|
416 |
|
417 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
418 |
+
demo.launch(debug=True, share=False)
|