File size: 1,372 Bytes
13755f8 cd2c8df bf58062 c3ff8d8 c3c803a c3ff8d8 cd2c8df c3ff8d8 cd2c8df 13755f8 bf58062 c3ff8d8 15b9880 c3ff8d8 15b9880 c3ff8d8 cd2c8df c3ff8d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
import os
# from transformers import pipeline # No longer directly using pipeline here
# NEW IMPORTS for smolagents
from smolagents import CodeAgent, DuckDuckGoSearchTool
from smolagents import TransformersModel # To use your local Hugging Face model
class GaiaAgent:
def __init__(self, model_id: str = "google/flan-t5-large"):
# Initialize your LLM using smolagents's TransformersModel
# This is the crucial part. Flan-T5 is a 'text2text-generation' model.
# TransformersModel probably builds a pipeline, which needs the correct task.
self.llm_model = TransformersModel(
model_id=model_id,
# Specify the task type for Flan-T5 models
task="text2text-generation",
# You might need to add device mapping if running into memory issues
# e.g., device_map="auto" if on GPU
)
# Initialize the smolagents CodeAgent
self.agent = CodeAgent(
model=self.llm_model,
tools=[DuckDuckGoSearchTool()],
add_base_tools=False,
verbose=True
)
def process_task(self, task_description: str) -> str:
try:
response = self.agent.run(task_description)
return response
except Exception as e:
return f"An error occurred during agent processing: {e}"
|