Spaces:
Sleeping
Sleeping
File size: 5,004 Bytes
3121027 9b5b26a c19d193 3121027 6aae614 3121027 ebaf70b 3121027 5591fd4 3121027 5591fd4 3121027 5591fd4 3121027 5591fd4 3121027 5591fd4 3121027 8c01ffb 3121027 b443ecf 3121027 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
"""
HuggingFace and Gradio Agent Template
Requirements:
pip install -r requirements.txt
"""
import os
from smolagents import CodeAgent, HfApiModel, load_tool, tool
import datetime
import pytz
import yaml
import gradio as gr
from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI
from typing import Dict, Any
from huggingface_hub import InferenceClient
# Example requirements.txt content (save this separately)
REQUIREMENTS = """
gradio>=4.0.0
huggingface-hub>=0.19.0
smolagents
pytz
pyyaml
"""
# Basic working tool example
@tool
def calculator(operation: str) -> str:
"""A simple calculator tool that safely evaluates basic math expressions."""
try:
allowed_chars = set("0123456789+-*/ .()")
if not all(c in allowed_chars for c in operation):
return "Error: Only basic math operations allowed"
result = eval(operation, {"__builtins__": {}})
return f"Result: {result}"
except Exception as e:
return f"Error calculating {operation}: {str(e)}"
@tool
def get_time(timezone: str = "UTC") -> str:
"""Get current time in specified timezone."""
try:
tz = pytz.timezone(timezone)
current_time = datetime.datetime.now(tz)
return f"Current time in {timezone}: {current_time.strftime('%Y-%m-%d %H:%M:%S %Z')}"
except Exception as e:
return f"Error getting time for {timezone}: {str(e)}"
# Example HuggingFace tool
@tool
def text_generation(prompt: str) -> str:
"""Generate text using HuggingFace model.
Args:
prompt: Text prompt for generation
Returns:
str: Generated text or error message
"""
try:
# Using HF Inference API
client = InferenceClient()
# You can change the model to any available on HF
response = client.text_generation(
prompt,
model="google/gemma-7b-it", # Example model
max_new_tokens=100,
temperature=0.7
)
return response
except Exception as e:
return f"Error generating text: {str(e)}"
# Create default prompts.yaml
DEFAULT_PROMPTS = """
system_prompt: |-
You are an expert assistant who can solve tasks using Python code and available tools.
You proceed step by step using 'Thought:', 'Code:', and 'Observation:' sequences.
Here's an example:
Task: "Calculate 23 * 45 and generate a short story about the number"
Thought: First, I'll calculate the multiplication.
Code:
```py
result = calculator("23 * 45")
print(result)
```<end_code>
Observation: Result: 1035
Thought: Now I'll generate a short story about this number.
Code:
```py
story = text_generation(f"Write a very short story about the number {1035}")
final_answer(f"The calculation result is 1035.\\nHere's a story about it:\\n{story}")
```<end_code>
You have access to these tools:
- calculator: Evaluates basic math expressions
- get_time: Gets current time in any timezone
- text_generation: Generates text using HuggingFace model
- final_answer: Returns the final answer to the user
Rules:
1. Always use 'Thought:', 'Code:', and end with '<end_code>'
2. Only use defined variables
3. Pass arguments directly to tools
4. Use print() to save intermediate results
5. End with final_answer tool
[... rest of the prompts.yaml content remains the same ...]
"""
def ensure_files():
"""Create necessary files if they don't exist."""
if not os.path.exists("prompts.yaml"):
with open("prompts.yaml", "w") as f:
f.write(DEFAULT_PROMPTS)
if not os.path.exists("requirements.txt"):
with open("requirements.txt", "w") as f:
f.write(REQUIREMENTS)
def initialize_agent() -> CodeAgent:
"""Initialize and return a working CodeAgent."""
# Ensure necessary files exist
ensure_files()
# Initialize tools
final_answer = FinalAnswerTool()
# Initialize model
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
# Load prompts
with open("prompts.yaml", "r") as f:
prompt_templates = yaml.safe_load(f)
# Create agent
agent = CodeAgent(
model=model,
tools=[
final_answer,
calculator,
get_time,
text_generation,
# Add new tools here
],
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
return agent
def main():
"""Run the agent with Gradio UI."""
try:
agent = initialize_agent()
GradioUI(agent).launch()
except Exception as e:
print(f"Error starting agent: {str(e)}")
raise
if __name__ == "__main__":
main() |