Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
import requests | |
import inspect | |
import pandas as pd | |
from smolagents import tool, Tool, CodeAgent, DuckDuckGoSearchTool, HfApiModel, ToolCallingAgent, VisitWebpageTool, SpeechToTextTool, FinalAnswerTool | |
from dotenv import load_dotenv | |
import heapq | |
from collections import Counter | |
import re | |
from io import BytesIO | |
from youtube_transcript_api import YouTubeTranscriptApi | |
from langchain_community.tools.tavily_search import TavilySearchResults | |
from langchain_community.document_loaders import WikipediaLoader | |
from langchain_community.utilities import WikipediaAPIWrapper | |
from langchain_community.document_loaders import ArxivLoader | |
# (Keep Constants as is) | |
# --- Constants --- | |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
#Load environment variables | |
load_dotenv() | |
import io | |
import contextlib | |
import traceback | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
from smolagents import Tool, CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, HfApiModel | |
class CodeLlamaTool(Tool): | |
name = "code_llama_tool" | |
description = "Solves reasoning/code questions using Meta Code Llama 7B Instruct" | |
inputs = { | |
"question": { | |
"type": "string", | |
"description": "The question requiring code-based or reasoning-based solution" | |
} | |
} | |
output_type = "string" | |
def __init__(self): | |
self.model_id = "codellama/CodeLlama-7b-Instruct-hf" | |
token = os.getenv("HF_TOKEN") | |
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, token=token) | |
self.model = AutoModelForCausalLM.from_pretrained( | |
self.model_id, | |
device_map="auto", | |
torch_dtype="auto", | |
token=token | |
) | |
self.pipeline = pipeline( | |
"text-generation", | |
model=self.model, | |
tokenizer=self.tokenizer, | |
max_new_tokens=512, | |
temperature=0.2, | |
truncation=True | |
) | |
def forward(self, question: str) -> str: | |
prompt = f"""You are an AI that uses Python code to answer questions. | |
Question: {question} | |
Instructions: | |
- If solving requires code, use a block like <tool>code</tool>. | |
- Always end with <final>FINAL ANSWER</final> containing the final number or string. | |
Example: | |
Question: What is 5 * sqrt(36)? | |
Answer: | |
<tool> | |
import math | |
print(5 * math.sqrt(36)) | |
</tool> | |
<final>30.0</final> | |
Answer:""" | |
response = self.pipeline(prompt)[0]["generated_text"] | |
return self.parse_and_execute(response) | |
def parse_and_execute(self, response: str) -> str: | |
try: | |
# Extract and run code if exists | |
if "<tool>" in response and "</tool>" in response: | |
code = response.split("<tool>")[1].split("</tool>")[0].strip() | |
result = self._run_code(code) | |
return f"FINAL ANSWER (code output): {result}" | |
# Extract final result directly | |
elif "<final>" in response and "</final>" in response: | |
final = response.split("<final>")[1].split("</final>")[0].strip() | |
return f"FINAL ANSWER: {final}" | |
return f"Could not extract final answer.\n\n{response}" | |
except Exception as e: | |
return f"Error in parse_and_execute: {str(e)}\n\nFull response:\n{response}" | |
def _run_code(self, code: str) -> str: | |
buffer = io.StringIO() | |
try: | |
with contextlib.redirect_stdout(buffer): | |
exec(code, {}) | |
return buffer.getvalue().strip() | |
except Exception: | |
return f"Error executing code:\n{traceback.format_exc()}" | |
#from smolagents import Tool | |
#from langchain_community.document_loaders import WikipediaLoader | |
class WikiSearchTool(Tool): | |
name = "wiki_search" | |
description = "Search Wikipedia for a query and return up to 2 results." | |
inputs = { | |
"query": {"type": "string", "description": "The search term for Wikipedia."} | |
} | |
output_type = "string" | |
def forward(self, query: str) -> str: | |
search_docs = WikipediaLoader(query=query, load_max_docs=2).load() | |
formatted_search_docs = "\n\n---\n\n".join( | |
[ | |
f'<Document source="{doc.metadata.get("source", "Wikipedia")}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>' | |
for doc in search_docs | |
] | |
) | |
return formatted_search_docs | |
class StringReverseTool(Tool): | |
name = "reverse_message" | |
description = "When you received a strange text, try to reverse it and perform action described in reversed message." | |
inputs = { | |
"message": { | |
"type": "string", | |
"description": "A message, which looks like strange and can be reversed to get actions to execute." | |
} | |
} | |
output_type = "string" | |
def __init__(self): | |
return | |
def forward(self, message: str): | |
return message[::-1] | |
class KeywordsExtractorTool(Tool): | |
"""Extracts top 5 keywords from a given text based on frequency.""" | |
name = "keywords_extractor" | |
description = "This tool returns the 5 most frequent keywords occur in provided block of text." | |
inputs = { | |
"text": { | |
"type": "string", | |
"description": "Text to analyze for keywords.", | |
} | |
} | |
output_type = "string" | |
def forward(self, text: str) -> str: | |
try: | |
all_words = re.findall(r'\b\w+\b', text.lower()) | |
conjunctions = {'a', 'and', 'of', 'is', 'in', 'to', 'the'} | |
filtered_words = [] | |
for w in all_words: | |
if w not in conjunctions: | |
filtered_words.push(w) | |
word_counts = Counter(filtered_words) | |
k = 5 | |
return heapq.nlargest(k, word_counts.items(), key=lambda x: x[1]) | |
except Exception as e: | |
return f"Error during extracting most common words: {e}" | |
import requests | |
import pandas as pd | |
from io import BytesIO | |
from smolagents import Tool # Make sure to import the Tool base class | |
class ParseExcelToJsonTool(Tool): | |
""" | |
A tool for fetching and parsing an Excel file into structured JSON data. | |
""" | |
name = "parse_excel_to_json" | |
description = ( | |
"For a given task_id, fetches an Excel file from a remote URL, " | |
"parses its sheets, and returns the data as a structured JSON object. " | |
"Each sheet's data is returned as a list of dictionaries, with each dictionary " | |
"representing a row (limited to the first 20 rows). " | |
"Useful for extracting structured information from Excel files." | |
) | |
inputs = { | |
"task_id": { | |
"type": "string", | |
"description": "The task ID used to construct the URL for fetching the Excel file.", | |
} | |
} | |
output_type = "json" # The tool returns a dictionary, so "json" is the appropriate output_type | |
def _run(self, task_id: str) -> dict: | |
""" | |
Fetches and parses an Excel file from a URL based on the task_id. | |
""" | |
url = f"https://agents-course-unit4-scoring.hf.space/files/{task_id}" | |
try: | |
response = requests.get(url, timeout=100) | |
response.raise_for_status() # Raise an HTTPError for bad responses (4xx or 5xx) | |
xls_content = pd.ExcelFile(BytesIO(response.content)) | |
json_sheets = {} | |
for sheet_name in xls_content.sheet_names: | |
df = xls_content.parse(sheet_name) | |
df = df.dropna(how="all") # Drop rows that are entirely NaN | |
# Limit to the first 20 rows for efficiency and to prevent overwhelming context | |
rows = df.head(20).to_dict(orient="records") | |
json_sheets[sheet_name] = rows | |
return { | |
"task_id": task_id, | |
"sheets": json_sheets, | |
"status": "Success" | |
} | |
except requests.exceptions.RequestException as e: | |
return { | |
"task_id": task_id, | |
"sheets": {}, | |
"status": f"Network or HTTP error: {str(e)}" | |
} | |
except Exception as e: | |
return { | |
"task_id": task_id, | |
"sheets": {}, | |
"status": f"Error in parsing Excel file: {str(e)}" | |
} | |
# Optional: You can keep __call__ for direct instance calling, but it's handled by Tool base class | |
# def __call__(self, task_id: str) -> dict: | |
# return self._run(task_id) | |
import os | |
from langchain_community.document_loaders import PyMuPDFLoader | |
from docx import Document as DocxDocument | |
import openpyxl | |
class AnalyseAttachmentTool(Tool): | |
""" | |
A tool for analyzing various attachment types (PY, PDF, TXT, DOCX, XLSX) | |
and extracting their text content. | |
""" | |
name = "analyze_attachment" | |
description = ( | |
"Analyzes attachments including PY, PDF, TXT, DOCX, and XLSX files and returns text content. " | |
"Useful for understanding the content of various document types. " | |
"The output is limited to the first 3000 characters for readability." | |
) | |
inputs = { | |
"file_path": { | |
"type": "string", | |
"description": "Local path to the attachment file (e.g., 'documents/report.pdf').", | |
} | |
} | |
output_type = "string" | |
def forward(self, file_path: str) -> str: | |
""" | |
Executes the attachment analysis. This method is called internally by the tool. | |
""" | |
if not os.path.exists(file_path): | |
return f"File not found: {file_path}" | |
try: | |
ext = os.path.splitext(file_path)[1].lower() | |
content = "" | |
if ext == ".pdf": | |
loader = PyMuPDFLoader(file_path) | |
documents = loader.load() | |
content = "\n\n".join([doc.page_content for doc in documents]) | |
elif ext == ".txt" or ext == ".py": | |
with open(file_path, "r", encoding="utf-8") as file: | |
content = file.read() | |
elif ext == ".docx": | |
doc = DocxDocument(file_path) | |
content = "\n".join([para.text for para in doc.paragraphs]) | |
elif ext == ".xlsx": | |
wb = openpyxl.load_workbook(file_path, data_only=True) | |
for sheet in wb: | |
content += f"Sheet: {sheet.title}\n" | |
for row in sheet.iter_rows(values_only=True): | |
content += "\t".join([str(cell) if cell is not None else "" for cell in row]) + "\n" | |
else: | |
return "Unsupported file format. Please use PY, PDF, TXT, DOCX, or XLSX." | |
return content[:3000] | |
except Exception as e: | |
return f"An error occurred while processing the file: {str(e)}" | |
def __call__(self, file_path: str) -> str: | |
""" | |
Makes the instance callable directly, invoking the _run method. | |
""" | |
return self._run(file_path) | |
import os | |
import base64 | |
import requests | |
from PIL import Image | |
from io import BytesIO | |
# Define image analysis tool | |
import requests | |
class ImageAnalysisTool(Tool): | |
""" | |
A tool for analyzing images using a hosted Hugging Face model. | |
""" | |
name = "image_analysis" | |
description = ( | |
"Analyzes an image provided via a URL and returns a textual description of its content. " | |
"This tool is useful for understanding the visual content of an image." | |
) | |
inputs = { | |
"image_url": { | |
"type": "string", | |
"description": "The URL of the image to be analyzed (e.g., 'https://example.com/image.jpg').", | |
} | |
} | |
output_type = "string" | |
# You might consider making API_URL a class attribute if it's constant | |
# or an instance attribute if it could vary per instance. | |
# For this example, we'll keep it within the _run method for directness. | |
def forward(self, image_url: str) -> str: | |
""" | |
Executes the image analysis by sending the image URL to the Hugging Face API. | |
""" | |
API_URL = "https://api-inference.huggingface.co/models/llava-hf/llava-1.5-7b-hf" | |
try: | |
response = requests.post(API_URL, json={"inputs": image_url}) | |
response.raise_for_status() # Raise an HTTPError for bad responses (4xx or 5xx) | |
# Assuming the response structure is always a list with a dictionary | |
# and 'generated_text' is the key for the description. | |
if response.json() and isinstance(response.json(), list) and 'generated_text' in response.json()[0]: | |
return response.json()[0]['generated_text'] | |
else: | |
return f"Unexpected API response format: {response.text}" | |
except requests.exceptions.RequestException as e: | |
return f"An error occurred during the API request: {e}" | |
except IndexError: | |
return "API response did not contain expected 'generated_text'." | |
except Exception as e: | |
return f"An unexpected error occurred: {e}" | |
def __call__(self, image_url: str) -> str: | |
""" | |
Makes the instance callable directly, invoking the _run method for convenience. | |
""" | |
return self._run(image_url) | |
class VideoTranscriptionTool(Tool): | |
"""Fetch transcripts from YouTube videos""" | |
name = "transcript_video" | |
description = "Fetch text transcript from YouTube movies with optional timestamps" | |
inputs = { | |
"url": {"type": "string", "description": "YouTube video URL or ID"}, | |
"include_timestamps": {"type": "boolean", "description": "If timestamps should be included in output", "nullable": True} | |
} | |
output_type = "string" | |
def forward(self, url: str, include_timestamps: bool = False) -> str: | |
if "youtube.com/watch" in url: | |
video_id = url.split("v=")[1].split("&")[0] | |
elif "youtu.be/" in url: | |
video_id = url.split("youtu.be/")[1].split("?")[0] | |
elif len(url.strip()) == 11: # Direct ID | |
video_id = url.strip() | |
else: | |
return f"YouTube URL or ID: {url} is invalid!" | |
try: | |
transcription = YouTubeTranscriptApi.get_transcript(video_id) | |
if include_timestamps: | |
formatted_transcription = [] | |
for part in transcription: | |
timestamp = f"{int(part['start']//60)}:{int(part['start']%60):02d}" | |
formatted_transcription.append(f"[{timestamp}] {part['text']}") | |
return "\n".join(formatted_transcription) | |
else: | |
return " ".join([part['text'] for part in transcription]) | |
except Exception as e: | |
return f"Error in extracting YouTube transcript: {str(e)}" | |
class BasicAgent: | |
def __init__(self): | |
try: | |
token = os.environ.get("HF_API_TOKEN") | |
self.model = HfApiModel(temperature=0.1, token=token) | |
except Exception as e: | |
raise RuntimeError(f"Error initializing model: {e}") | |
try: | |
# Initialize all tool instances | |
self.search_tool = DuckDuckGoSearchTool() | |
self.wiki_search_tool = WikiSearchTool() # Ensure this class is defined/imported | |
self.str_reverse_tool = StringReverseTool() # Ensure this class is defined/imported | |
self.keywords_extract_tool = KeywordsExtractorTool() # Ensure this class is defined/imported | |
self.speech_to_text_tool = SpeechToTextTool() # Ensure this class is defined/imported | |
self.visit_webpage_tool = VisitWebpageTool() # Ensure this class is defined/imported | |
self.final_answer_tool = FinalAnswerTool() | |
except Exception as e: | |
raise RuntimeError(f"Error initializing tools: {e}") | |
# Custom tools - ensure these classes are defined and imported | |
self.video_transcription_tool = VideoTranscriptionTool() | |
self.image_analysis_tool = ImageAnalysisTool() # Renamed for clarity | |
self.analyse_attachment_tool = AnalyseAttachmentTool() # Renamed for clarity | |
self.code_llama_tool = CodeLlamaTool() # Ensure this class is defined/imported | |
self.parse_excel_to_json_tool = ParseExcelToJsonTool() | |
system_prompt_template = """ | |
You are my general AI assistant. Your task is to answer the question I asked. | |
First, provide an explanation of your reasoning, step by step, to arrive at the answer. | |
Then, return your final answer in a single line, formatted as follows: "FINAL ANSWER: [YOUR FINAL ANSWER]". | |
[YOUR FINAL ANSWER] should be a number, a string, or a comma-separated list of numbers and/or strings, depending on the question. | |
If the answer is a number, do not use commas or units (e.g., $, %) unless specified. | |
If the answer is a string, do not use articles or abbreviations (e.g., for cities), and write digits in plain text unless specified. | |
If the answer is a comma-separated list, apply the above rules for each element based on whether it is a number or a string. | |
""" | |
# Create web agent with image analysis capability | |
try: | |
self.web_agent = ToolCallingAgent( | |
tools=[ | |
self.search_tool, # Use the initialized DuckDuckGoSearchTool instance | |
self.visit_webpage_tool, | |
self.image_analysis_tool | |
], | |
model=self.model, # Use self.model | |
max_steps=10, | |
name="web_search_agent", | |
description="Runs web searches and analyzes images", | |
) | |
except Exception as e: | |
raise RuntimeError(f"Error initializing web agent: {e}") | |
# Create main agent with all capabilities | |
try: | |
self.agent = CodeAgent( | |
model=self.model, # Use self.model | |
tools=[ | |
self.search_tool, | |
self.wiki_search_tool, | |
self.str_reverse_tool, | |
self.keywords_extract_tool, | |
self.speech_to_text_tool, | |
self.visit_webpage_tool, | |
self.final_answer_tool, | |
self.video_transcription_tool, | |
self.code_llama_tool, | |
self.parse_excel_to_json_tool, | |
self.image_analysis_tool, # Use the initialized instance | |
self.analyse_attachment_tool # Add the initialized attachment analysis tool | |
], | |
add_base_tools=True # Consider what this adds, ensure it doesn't duplicate. | |
) | |
except Exception as e: | |
raise RuntimeError(f"Error initializing web agent: {e}") | |
# Update system prompt | |
# It's generally better to pass the system prompt directly if possible | |
# or manage it through prompt templates defined by smolagents. | |
# If smolagents adds its own system prompt, this appends to it. | |
self.agent.prompt_templates["system_prompt"] = self.agent.prompt_templates["system_prompt"] + system_prompt_template | |
def __call__(self, question: str) -> str: | |
print(f"Agent received question (first 50 chars): {question[:50]}...") | |
# First try web agent for image-based queries | |
if any(keyword in question.lower() for keyword in ["image", "picture", "photo", "screenshot", "diagram"]): | |
print("Using web agent for image-related query") | |
answer = self.web_agent.run(question) | |
else: | |
print("Using main agent") | |
answer = self.agent.run(question) | |
print(f"Agent returning answer: {answer}") | |
return answer | |
def run_and_submit_all( profile: gr.OAuthProfile | None): | |
""" | |
Fetches all questions, runs the BasicAgent on them, submits all answers, | |
and displays the results. | |
""" | |
# --- Determine HF Space Runtime URL and Repo URL --- | |
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code | |
if profile: | |
username= f"{profile.username}" | |
print(f"User logged in: {username}") | |
else: | |
print("User not logged in.") | |
return "Please Login to Hugging Face with the button.", None | |
api_url = DEFAULT_API_URL | |
questions_url = f"{api_url}/questions" | |
submit_url = f"{api_url}/submit" | |
# 1. Instantiate Agent ( modify this part to create your agent) | |
try: | |
agent = BasicAgent() | |
except Exception as e: | |
print(f"Error instantiating agent: {e}") | |
return f"Error initializing agent: {e}", None | |
# In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public) | |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
print(agent_code) | |
# 2. Fetch Questions | |
print(f"Fetching questions from: {questions_url}") | |
try: | |
response = requests.get(questions_url, timeout=15) | |
response.raise_for_status() | |
questions_data = response.json() | |
if not questions_data: | |
print("Fetched questions list is empty.") | |
return "Fetched questions list is empty or invalid format.", None | |
print(f"Fetched {len(questions_data)} questions.") | |
except requests.exceptions.RequestException as e: | |
print(f"Error fetching questions: {e}") | |
return f"Error fetching questions: {e}", None | |
except requests.exceptions.JSONDecodeError as e: | |
print(f"Error decoding JSON response from questions endpoint: {e}") | |
print(f"Response text: {response.text[:500]}") | |
return f"Error decoding server response for questions: {e}", None | |
except Exception as e: | |
print(f"An unexpected error occurred fetching questions: {e}") | |
return f"An unexpected error occurred fetching questions: {e}", None | |
# 3. Run your Agent | |
results_log = [] | |
answers_payload = [] | |
print(f"Running agent on {len(questions_data)} questions...") | |
for item in questions_data: | |
task_id = item.get("task_id") | |
question_text = item.get("question") | |
if not task_id or question_text is None: | |
print(f"Skipping item with missing task_id or question: {item}") | |
continue | |
try: | |
submitted_answer = agent(question_text) | |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
except Exception as e: | |
print(f"Error running agent on task {task_id}: {e}") | |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) | |
if not answers_payload: | |
print("Agent did not produce any answers to submit.") | |
return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
# 4. Prepare Submission | |
submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} | |
status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." | |
print(status_update) | |
# 5. Submit | |
print(f"Submitting {len(answers_payload)} answers to: {submit_url}") | |
try: | |
response = requests.post(submit_url, json=submission_data, timeout=60) | |
response.raise_for_status() | |
result_data = response.json() | |
final_status = ( | |
f"Submission Successful!\n" | |
f"User: {result_data.get('username')}\n" | |
f"Overall Score: {result_data.get('score', 'N/A')}% " | |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
f"Message: {result_data.get('message', 'No message received.')}" | |
) | |
print("Submission successful.") | |
results_df = pd.DataFrame(results_log) | |
return final_status, results_df | |
except requests.exceptions.HTTPError as e: | |
error_detail = f"Server responded with status {e.response.status_code}." | |
try: | |
error_json = e.response.json() | |
error_detail += f" Detail: {error_json.get('detail', e.response.text)}" | |
except requests.exceptions.JSONDecodeError: | |
error_detail += f" Response: {e.response.text[:500]}" | |
status_message = f"Submission Failed: {error_detail}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
except requests.exceptions.Timeout: | |
status_message = "Submission Failed: The request timed out." | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
except requests.exceptions.RequestException as e: | |
status_message = f"Submission Failed: Network error - {e}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
except Exception as e: | |
status_message = f"An unexpected error occurred during submission: {e}" | |
print(status_message) | |
results_df = pd.DataFrame(results_log) | |
return status_message, results_df | |
# --- Build Gradio Interface using Blocks --- | |
with gr.Blocks() as demo: | |
gr.Markdown("# Basic Agent Evaluation Runner") | |
gr.Markdown( | |
""" | |
**Instructions:** | |
1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... | |
2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. | |
3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. | |
--- | |
**Disclaimers:** | |
Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). | |
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. | |
""" | |
) | |
gr.LoginButton() | |
run_button = gr.Button("Run Evaluation & Submit All Answers") | |
status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
# Removed max_rows=10 from DataFrame constructor | |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
run_button.click( | |
fn=run_and_submit_all, | |
outputs=[status_output, results_table] | |
) | |
if __name__ == "__main__": | |
print("\n" + "-"*30 + " App Starting " + "-"*30) | |
# Check for SPACE_HOST and SPACE_ID at startup for information | |
space_host_startup = os.getenv("SPACE_HOST") | |
space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup | |
if space_host_startup: | |
print(f"✅ SPACE_HOST found: {space_host_startup}") | |
print(f" Runtime URL should be: https://{space_host_startup}.hf.space") | |
else: | |
print("ℹ️ SPACE_HOST environment variable not found (running locally?).") | |
if space_id_startup: # Print repo URLs if SPACE_ID is found | |
print(f"✅ SPACE_ID found: {space_id_startup}") | |
print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") | |
print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") | |
else: | |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") | |
print("-"*(60 + len(" App Starting ")) + "\n") | |
print("Launching Gradio Interface for Basic Agent Evaluation...") | |
demo.launch(debug=True, share=False) |