Spaces:
Sleeping
Sleeping
import asyncio | |
import inspect | |
import json | |
import os | |
import time | |
from typing import Any, Dict, List, Optional | |
import gradio as gr | |
import pandas as pd | |
import requests | |
from dotenv import load_dotenv | |
from langchain_community.chat_models import ChatHuggingFace | |
from langchain_community.llms import HuggingFaceEndpoint | |
from langchain_core.messages import AIMessage, HumanMessage | |
from langchain_core.tools import StructuredTool | |
from tools import (absolute, add, divide, exponential, floor_divide, | |
get_current_time_in_timezone, logarithm, modulus, multiply, | |
power, roman_calculator_converter, square_root, subtract, | |
web_search) | |
# --- Constants --- | |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
MAX_AGENT_ITERATIONS = 15 | |
MAX_CONCURRENT_REQUESTS = 5 # Limit concurrent requests to avoid overwhelming the API | |
load_dotenv() | |
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
# Global cache for answers | |
answer_cache = {} | |
class ImprovedAgent: | |
def __init__(self): | |
if not HUGGINGFACEHUB_API_TOKEN: | |
raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.") | |
print("ImprovedAgent initialized.") | |
# Initialize LLM with better parameters | |
self.llm = HuggingFaceEndpoint( | |
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", | |
huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN, | |
temperature=0.1, # Lower temperature for more consistent responses | |
max_new_tokens=1024, | |
timeout=30, | |
) | |
self.chat = ChatHuggingFace(llm=self.llm, verbose=False) | |
# Initialize tools | |
self.tools = [ | |
multiply, add, subtract, power, divide, modulus, | |
square_root, floor_divide, absolute, logarithm, | |
exponential, web_search, roman_calculator_converter, | |
get_current_time_in_timezone | |
] | |
self.chat_with_tools = self.chat.bind_tools(self.tools) | |
print(f"Total tools available: {len(self.tools)}") | |
# Create tool mapping for easier access | |
self.tool_map = {tool.name: tool for tool in self.tools} | |
def _extract_tool_calls(self, response) -> List[Dict]: | |
"""Extract tool calls from the response""" | |
tool_calls = [] | |
if hasattr(response, 'tool_calls') and response.tool_calls: | |
for tool_call in response.tool_calls: | |
tool_calls.append({ | |
'name': tool_call['name'], | |
'args': tool_call['args'] | |
}) | |
return tool_calls | |
def _execute_tool_calls(self, tool_calls: List[Dict]) -> List[str]: | |
"""Execute tool calls and return results""" | |
results = [] | |
for tool_call in tool_calls: | |
tool_name = tool_call['name'] | |
tool_args = tool_call['args'] | |
if tool_name in self.tool_map: | |
try: | |
tool = self.tool_map[tool_name] | |
result = tool.invoke(tool_args) | |
results.append(f"Tool {tool_name} result: {result}") | |
except Exception as e: | |
results.append(f"Tool {tool_name} error: {str(e)}") | |
else: | |
results.append(f"Unknown tool: {tool_name}") | |
return results | |
async def answer(self, question: str) -> str: | |
"""Improved answer method with better error handling and tool usage""" | |
print(f"Processing question: {question[:100]}...") | |
try: | |
# Create system prompt for better instruction following | |
system_prompt = """You are a helpful AI assistant with access to various tools. | |
When answering questions, use the appropriate tools when needed and provide clear, concise answers. | |
If you need to perform calculations, use the math tools available. | |
If you need current information, use the web search tool. | |
Always provide a final answer after using tools.""" | |
messages = [ | |
HumanMessage(content=f"{system_prompt}\n\nQuestion: {question}") | |
] | |
# Initial response | |
response = await asyncio.to_thread(self.chat_with_tools.invoke, messages) | |
# Handle tool calls if present | |
max_iterations = 3 | |
iteration = 0 | |
while iteration < max_iterations: | |
tool_calls = self._extract_tool_calls(response) | |
if not tool_calls: | |
break | |
# Execute tool calls | |
tool_results = self._execute_tool_calls(tool_calls) | |
# Add tool results to conversation | |
messages.append(AIMessage(content=response.content)) | |
messages.append(HumanMessage(content=f"Tool results: {'; '.join(tool_results)}. Please provide a final answer based on these results.")) | |
# Get next response | |
response = await asyncio.to_thread(self.chat_with_tools.invoke, messages) | |
iteration += 1 | |
# Extract final answer | |
final_answer = response.content.strip() | |
# Clean up the response - remove any tool call artifacts | |
if "Tool " in final_answer and "result:" in final_answer: | |
# Try to extract just the final answer part | |
lines = final_answer.split('\n') | |
for line in reversed(lines): | |
if line.strip() and not line.startswith('Tool ') and not 'result:' in line: | |
final_answer = line.strip() | |
break | |
return final_answer | |
except Exception as e: | |
print(f"Error in answer method: {e}") | |
return f"Error processing question: {str(e)}" | |
def answer_sync(self, question: str) -> str: | |
"""Synchronous version of answer method""" | |
try: | |
return asyncio.run(self.answer(question)) | |
except Exception as e: | |
print(f"Error in sync answer: {e}") | |
return f"Error: {str(e)}" | |
async def process_questions_batch(agent, questions_batch, semaphore): | |
"""Process a batch of questions with rate limiting""" | |
results = [] | |
async def process_single_question(task_id, question): | |
async with semaphore: | |
try: | |
# Check cache first | |
cache_key = f"{task_id}_{hash(question)}" | |
if cache_key in answer_cache: | |
print(f"Using cached answer for task {task_id}") | |
return task_id, question, answer_cache[cache_key], None | |
answer = await agent.answer(question) | |
# Cache the result | |
answer_cache[cache_key] = answer | |
return task_id, question, answer, None | |
except Exception as e: | |
print(f"Error processing task {task_id}: {e}") | |
return task_id, question, None, str(e) | |
# Create semaphore for rate limiting | |
tasks = [] | |
for item in questions_batch: | |
task_id = item.get("task_id") | |
question_text = item.get("question") | |
if task_id and question_text is not None: | |
tasks.append(process_single_question(task_id, question_text)) | |
if tasks: | |
results = await asyncio.gather(*tasks, return_exceptions=True) | |
return results | |
async def run_agent_async_improved(agent, questions_data): | |
"""Improved async processing with batching and caching""" | |
results_log, answers_payload = [], [] | |
# Create semaphore for rate limiting | |
semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS) | |
# Process questions in batches | |
batch_size = 10 | |
batches = [questions_data[i:i + batch_size] for i in range(0, len(questions_data), batch_size)] | |
print(f"Processing {len(questions_data)} questions in {len(batches)} batches...") | |
for i, batch in enumerate(batches): | |
print(f"Processing batch {i+1}/{len(batches)} ({len(batch)} questions)...") | |
try: | |
batch_results = await process_questions_batch(agent, batch, semaphore) | |
for result in batch_results: | |
if isinstance(result, Exception): | |
print(f"Batch processing error: {result}") | |
continue | |
task_id, question, answer, error = result | |
if error: | |
print(f"Error in task {task_id}: {error}") | |
results_log.append({ | |
"Task ID": task_id, | |
"Question": question[:100] + "..." if len(question) > 100 else question, | |
"Submitted Answer": f"ERROR: {error}" | |
}) | |
else: | |
answers_payload.append({"task_id": task_id, "submitted_answer": answer}) | |
results_log.append({ | |
"Task ID": task_id, | |
"Question": question[:100] + "..." if len(question) > 100 else question, | |
"Submitted Answer": answer[:200] + "..." if len(answer) > 200 else answer | |
}) | |
# Small delay between batches to be respectful | |
if i < len(batches) - 1: | |
await asyncio.sleep(1) | |
except Exception as e: | |
print(f"Error processing batch {i+1}: {e}") | |
# Continue with next batch | |
continue | |
return results_log, answers_payload | |
def cache_answers(profile: gr.OAuthProfile | None): | |
"""Cache answers without submitting""" | |
if not profile: | |
return "Please log in to Hugging Face first.", None | |
username = profile.username | |
print(f"Caching answers for user: {username}") | |
# Fetch questions | |
api_url = DEFAULT_API_URL | |
questions_url = f"{api_url}/questions" | |
try: | |
response = requests.get(questions_url, timeout=15) | |
response.raise_for_status() | |
questions_data = response.json() | |
if not questions_data: | |
return "No questions found.", None | |
print(f"Fetched {len(questions_data)} questions for caching.") | |
# Initialize agent | |
agent = ImprovedAgent() | |
# Process questions | |
results_log, answers_payload = asyncio.run(run_agent_async_improved(agent, questions_data)) | |
# Store in global cache with username | |
answer_cache[f"user_{username}"] = answers_payload | |
status = f"Cached {len(answers_payload)} answers for user {username}. Ready to submit!" | |
results_df = pd.DataFrame(results_log) | |
return status, results_df | |
except Exception as e: | |
print(f"Error caching answers: {e}") | |
return f"Error caching answers: {e}", None | |
def submit_cached_answers(profile: gr.OAuthProfile | None): | |
"""Submit previously cached answers""" | |
if not profile: | |
return "Please log in to Hugging Face first.", None | |
username = profile.username | |
cache_key = f"user_{username}" | |
if cache_key not in answer_cache: | |
return "No cached answers found. Please run 'Cache Answers' first.", None | |
answers_payload = answer_cache[cache_key] | |
if not answers_payload: | |
return "No answers to submit.", None | |
# Get space info | |
space_id = os.getenv("SPACE_ID") | |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Unknown" | |
# Submit | |
api_url = DEFAULT_API_URL | |
submit_url = f"{api_url}/submit" | |
submission_data = { | |
"username": username.strip(), | |
"agent_code": agent_code, | |
"answers": answers_payload | |
} | |
try: | |
print(f"Submitting {len(answers_payload)} cached answers...") | |
response = requests.post(submit_url, json=submission_data, timeout=60) | |
response.raise_for_status() | |
result_data = response.json() | |
final_status = ( | |
f"Submission Successful!\n" | |
f"User: {result_data.get('username')}\n" | |
f"Overall Score: {result_data.get('score', 'N/A')}% " | |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
f"Message: {result_data.get('message', 'No message received.')}" | |
) | |
# Clear cache after successful submission | |
if cache_key in answer_cache: | |
del answer_cache[cache_key] | |
return final_status, None | |
except Exception as e: | |
print(f"Submission error: {e}") | |
return f"Submission failed: {e}", None | |
def run_and_submit_all(profile: gr.OAuthProfile | None): | |
"""Original function - now improved with better error handling""" | |
if not profile: | |
return "Please log in to Hugging Face first.", None | |
username = profile.username | |
print(f"User logged in: {username}") | |
api_url = DEFAULT_API_URL | |
questions_url = f"{api_url}/questions" | |
submit_url = f"{api_url}/submit" | |
# Initialize agent | |
try: | |
agent = ImprovedAgent() | |
except Exception as e: | |
print(f"Error initializing agent: {e}") | |
return f"Error initializing agent: {e}", None | |
# Get space info | |
space_id = os.getenv("SPACE_ID") | |
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Unknown" | |
# Fetch questions | |
try: | |
print(f"Fetching questions from: {questions_url}") | |
response = requests.get(questions_url, timeout=15) | |
response.raise_for_status() | |
questions_data = response.json() | |
if not questions_data: | |
return "No questions found.", None | |
print(f"Fetched {len(questions_data)} questions.") | |
except Exception as e: | |
print(f"Error fetching questions: {e}") | |
return f"Error fetching questions: {e}", None | |
# Process questions | |
try: | |
results_log, answers_payload = asyncio.run(run_agent_async_improved(agent, questions_data)) | |
except Exception as e: | |
print(f"Error processing questions: {e}") | |
return f"Error processing questions: {e}", None | |
if not answers_payload: | |
return "No answers generated.", pd.DataFrame(results_log) if results_log else None | |
# Submit answers | |
submission_data = { | |
"username": username.strip(), | |
"agent_code": agent_code, | |
"answers": answers_payload | |
} | |
try: | |
print(f"Submitting {len(answers_payload)} answers...") | |
response = requests.post(submit_url, json=submission_data, timeout=60) | |
response.raise_for_status() | |
result_data = response.json() | |
final_status = ( | |
f"Submission Successful!\n" | |
f"User: {result_data.get('username')}\n" | |
f"Overall Score: {result_data.get('score', 'N/A')}% " | |
f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
f"Message: {result_data.get('message', 'No message received.')}" | |
) | |
results_df = pd.DataFrame(results_log) | |
return final_status, results_df | |
except Exception as e: | |
print(f"Submission error: {e}") | |
results_df = pd.DataFrame(results_log) | |
return f"Submission failed: {e}", results_df | |
# --- Build Gradio Interface --- | |
with gr.Blocks(title="Improved Agent Evaluation") as demo: | |
gr.Markdown("# Improved Agent Evaluation Runner") | |
gr.Markdown( | |
""" | |
**Instructions:** | |
1. Log in to your Hugging Face account using the button below. | |
2. **Recommended**: Use "Cache Answers" to process all questions first, then "Submit Cached Answers" to submit them. | |
3. **Alternative**: Use "Run & Submit All" for the original one-step process. | |
**Improvements:** | |
- β Async processing with rate limiting | |
- β Answer caching for faster resubmissions | |
- β Better error handling and recovery | |
- β Batch processing to avoid timeouts | |
- β Improved tool usage and response parsing | |
--- | |
""" | |
) | |
gr.LoginButton() | |
with gr.Row(): | |
cache_button = gr.Button("π Cache Answers", variant="secondary") | |
submit_button = gr.Button("π€ Submit Cached Answers", variant="primary") | |
run_all_button = gr.Button("π Run & Submit All", variant="secondary") | |
status_output = gr.Textbox(label="Status", lines=6, interactive=False) | |
results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
# Wire up the buttons | |
cache_button.click( | |
fn=cache_answers, | |
outputs=[status_output, results_table] | |
) | |
submit_button.click( | |
fn=submit_cached_answers, | |
outputs=[status_output, results_table] | |
) | |
run_all_button.click( | |
fn=run_and_submit_all, | |
outputs=[status_output, results_table] | |
) | |
if __name__ == "__main__": | |
print("\n" + "-"*30 + " Improved App Starting " + "-"*30) | |
space_host = os.getenv("SPACE_HOST") | |
space_id = os.getenv("SPACE_ID") | |
if space_host: | |
print(f"β SPACE_HOST: {space_host}") | |
print(f" Runtime URL: https://{space_host}.hf.space") | |
else: | |
print("βΉοΈ Running locally - SPACE_HOST not found.") | |
if space_id: | |
print(f"β SPACE_ID: {space_id}") | |
print(f" Repo URL: https://huggingface.co/spaces/{space_id}") | |
else: | |
print("βΉοΈ SPACE_ID not found.") | |
print("-" * 76 + "\n") | |
print("Launching Improved Gradio Interface...") | |
demo.launch(debug=True, share=False) |