# Python's OS interface for accessing environment variables import os # Intropesction utilities, you can auto-wrap it as a tool later. import inspect # HTTP client, Make REST calls for endpoints import requests # Parses CSV/Excel files import pandas as pd # Gradio - Provides the web format front-end you see in the Space-text boxes, logs, "Run Agent" button etc. import gradio as gr # smolagent - minimalist agent framework for LLMs with tools # CodeAgent - Orchestrate ReAct loop, logs each step # Tool - a base class and a decorator (@tool) # InferenceClientModel - Wrapper for HF's Serverless Inference API so you dont need to stand up your own TGI/LLM endpoint from smolagents import CodeAgent, DuckDuckGoSearchTool, Tool, InferenceClientModel # Programmatic huggingface-cli login, so the app can: pull private models, call paid-tier inference, push artefacts from huggingface_hub import login # Quick helper to pull LangChain's built-in tools so you can blend them with smolagent tools if you wish. from langchain.agents import load_tools # Configuration constant # Unit-4 scoring micro-services where your agent submits answers and receivess a JSON score. # --- Constants DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" # --- Basic Agent Definition --- # ---- THIS IS WHERE YOU CAN BUILD WHAT YOU WANT ---- class BasicAgent: def __init__(self): # Pull a HF access token from the Space's secrets or your local shell. You can download private models, call paid-tier Inference endpoints, push artefacts hf_token = os.getenv("HUGGINGFACE_HUB_TOKEN") or os.getenv("HF_TOKEN") # IF IT WORKS LOGIN INTO HF HUB VIA THIS TOKEN if hf_token: login(token=hf_token) else: try: login() except Exception as e: raise Exception( # helpful, course-style message "Authentication failed. Please enter:\n" "1. Run 'huggingface-cli login' in your terminal, or\n" "2. Set HUGGINGFACE_HUB_TOKEN environment variable with your token, or\n" "3. Get a token from https://huggingface.co/settings/tokens" ) from e # Warps the servesless inference endpoint for the chosen model # Initialize the model # InferenceClientModel handles throttling, batching, and streaming under the hood self.model = InferenceClientModel("Qwen/Qwen2.5-Code-32B-Instruct") # Add a first tool # Initialize the search tool # DuckDuckGoSearchTool - Gives the agent web-search super-powers it can pull fresh facts during its reasoning loop. self.search_tool = DuckDuckGoSearchTool() # smolagents's flagship class - # Code Agent follows a ReAct-style loop, literally write Python code, executes it in a sandbox, inspects the result, then decides its next step self.agent = CodeAgent( model=self.model, tools=[self.search_tool], # drops in a small standard library (Python REPL, JSON loader etc.) so you can solve many tasks without defining anything else. add_base_tools=True # - python_repl, browser, math etc. ) # Send a single "bootstrap" run whose only job is lock in behaviour rules: self.response = self.agent.run( """ You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: [FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, do not use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, do not use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string. You have access to the following tools: Tool Name: search_tool, description: lets you search and browse the internet for accessing the most updated information out there. If you require more tools to get a correct answer, create your own tools to utilize. """)