AgentDemo / app.py
CanerCoban's picture
Update app.py
af0c0fa verified
raw
history blame
10 kB
# Python's OS interface for accessing environment variables
import os
# Intropesction utilities, you can auto-wrap it as a tool later.
import inspect
# HTTP client, Make REST calls for endpoints
import requests
# Parses CSV/Excel files
import pandas as pd
# Gradio - Provides the web format front-end you see in the Space-text boxes, logs, "Run Agent" button etc.
import gradio as gr
# smolagent - minimalist agent framework for LLMs with tools
# CodeAgent - Orchestrate ReAct loop, logs each step
# Tool - a base class and a decorator (@tool)
# InferenceClientModel - Wrapper for HF's Serverless Inference API so you dont need to stand up your own TGI/LLM endpoint
from smolagents import CodeAgent, DuckDuckGoSearchTool, Tool, InferenceClientModel
# Programmatic huggingface-cli login, so the app can: pull private models, call paid-tier inference, push artefacts
from huggingface_hub import login
# Quick helper to pull LangChain's built-in tools so you can blend them with smolagent tools if you wish.
from langchain.agents import load_tools
# Configuration constant
# Unit-4 scoring micro-services where your agent submits answers and receivess a JSON score.
# --- Constants
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
# --- Basic Agent Definition ---
# ---- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ----
# This class is a ready-to-run wrapper that:
# 1. Authenticates to the Hub
# 2. Spins up a server-side Qwen-32B LLM.
# 3. Gives it a DuckDuckGo search plug-in plus smolagents' standard library
# 4. Primes it with strict grading instructions.
# 5. Exposes a clean, callable interface for what ever frontend(Gradio, FastAPI, etc.) you bolt on.
class BasicAgent:
def __init__(self):
# Pull a HF access token from the Space's secrets or your local shell. You can download private models, call paid-tier Inference endpoints, push artefacts
hf_token = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN")
# IF IT WORKS LOGIN INTO HF HUB VIA THIS TOKEN
if hf_token:
login(token=hf_token)
else:
try:
login()
except Exception as e:
raise Exception(
# helpful, course-style message
"Authentication failed. Please enter:\n"
"1. Run 'huggingface-cli login' in your terminal, or\n"
"2. Set HUGGINGFACE_HUB_TOKEN environment variable with your token, or\n"
"3. Get a token from https://huggingface.co/settings/tokens"
) from e
# Warps the servesless inference endpoint for the chosen model
# Initialize the model
# InferenceClientModel handles throttling, batching, and streaming under the hood
self.model = InferenceClientModel("Qwen/Qwen2.5-Code-32B-Instruct")
# Add a first tool
# Initialize the search tool
# DuckDuckGoSearchTool - Gives the agent web-search super-powers it can pull fresh facts during its reasoning loop.
self.search_tool = DuckDuckGoSearchTool()
# smolagents's flagship class -
# Code Agent follows a ReAct-style loop, literally write Python code, executes it in a sandbox, inspects the result, then decides its next step
self.agent = CodeAgent(
model=self.model,
tools=[self.search_tool],
# drops in a small standard library (Python REPL, JSON loader etc.) so you can solve many tasks without defining anything else.
add_base_tools=True # - python_repl, browser, math etc.
# CodeAgent's auto_document_tools convenience flag
auto_document_tools=True
)
# Send a single "bootstrap" run whose only job is lock in behaviour rules:
# The returned text is captured in self.responses.
self.response = self.agent.run(
"""
You are a general AI assistant.
I will ask you a question. Report your thoughts, and finish your answer with the following template: [FINAL ANSWER].
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
If you are asked for a number, do not use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
If you are asked for a string, do not use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
You have access to the following tools:
Tool Name: search_tool, description: lets you search and browse the internet for accessing the most updated information out there.
If you require more tools to get a correct answer, create your own tools to utilize.
""")
# Turning BasicAgent into a callable object
# It means you can drop it straight into Gradio (or any other framework) without wrapping it in a standalone function.
# Debug prints show the round-trip in the server logs.
def __call__(self, question: str) -> str:
print(f"Agent received question:")
response = self.agent.run(question)
# the reply is generated on-the-fly, not hard coded.
print(f"Agent returning answer: {response}")
return response
# 1. Check if the user is logged in
# 2. Download questions from a grading API.
# 3. Use the BasicAgent to generate answers
# 4. Submit those answers back to the API.
# 5. Return the grading results + a full log for UI display (e.g. Gradio Table)
# Includes detailed logging, robust error handling, and submission payload formatting
def run_and_submit_all( profile: gr.OAuthProfile | None):
"""
Fetches all questions, runs the BasicAgent on them, submits all answers, and display the results.
"""
# --- Determine HF Space Runtime URL and Repo URL ---
# Authenticate user and runtime info
# Grabbing space_id from the environment lets the app dynamically construct a URL to your codebase.
# This will be included in the submission for transparency (important in peer-review courses.)
space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
# If the gradio OAuth profile object is present, extract the username.
if profile:
username = f"{profile.username}"
print(f"User logged in: {username}")
# Otherwise, early exit with a friendly error message
else:
print("User not logged in.")
return "Please login to Hugging Face with the button.",None
# --- PrePare API endpoints ---
# Uses the provided scoring end point (defaulting to the course's hosted backen)
# Constucts two URLs:
api_url = DEFAULT_API_URL
# URL to Fetch the question bank.
question_url = f"{api_url}/questions"
# URL to POST answers for grading
submit_url = f"{api_url}/submit"
# 1. Instantiate Agent ( modify this part to create your agent)
# Tries to spin up your BasicAgent class from earlier.
# Includes token validation, model loading, tool setup, and system prompt injection.
# If this fails, the app gracefully exits, returning a user-visible error.
try:
agent = BasicAgent()
except Exception as e:
print(f"Error instantiating agent: {e}")
return f"Error initialiazing agent: {e}", None
# In the case of an app running as a HF space, this link points toward your codebase
# (usefull for others so please keep it public)
# Builds a link to your code repor on HF Hub (public space)
agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
# Gets submitted with the answers for transparacey
print(agent_code)
# 2. Fetch Questions
# --- FETCH QUESTIONS FROM THE BACKEND ---
print(f"Fetching questions from: {questions_url}")
# Tries to GET the questions from the course's scoring server
try:
# Timout and error handling ensure the app does not hang or crash.
response = requests.get(requests, timeout=15)
questions_data = response.json()
# handles edge cases like empty response, malformed JSON, network Errors
# Empty response handling
if not questions_data:
print("Fetched questions list is empty.")
return "Fetched questions list is empty or invalid format.", None
print(f"Fetched {len(question_data) questions.}")
except requests.exceptions,RequestException as e:
print(f"Error fetching questions: {e}")
return f"Error fetching questions: {e}", None
except reqests.exceptions.JSONDecodeError as e:
print(f"Error decoding JSON response from questions endpoint: {e}")
print(f"Response text: {response.text[:500]}")
return f"Error decoding server response for questions: {e}, None"
except Exception as e:
print(f"An unexpected error occured fetching questions: {e}")
return f"An unexpected error occurred fetching questions: {e}", None
# 3. Run your agent.
# Loop through questions and generate answers
results_log = [] # Used to make a DataFrame for UI display (question + answer)
answers_payload = [] # sent to grading API in the final submission
for item in questions_data:
task_id = item.get("task_id")
question_text = item.get("question")
if not task_id or question_text is None:
print(f"Skipping item with missing task_id or question: {item}")
continue
try:
submitted_answer = agent(question_text)
answers_payload.append({"task_id": task_id, "submmitted_answer": submitted_answer})
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
except Exception as e:
print(f"Erron running agent on task {task_id}: {e}")
results_log.append