Spaces:
Sleeping
Sleeping
import os | |
# from transformers import pipeline # No longer directly using pipeline here | |
# NEW IMPORTS for smolagents | |
from smolagents import CodeAgent, DuckDuckGoSearchTool | |
from smolagents import TransformersModel # To use your local Hugging Face model | |
class GaiaAgent: | |
def __init__(self, model_id: str = "google/flan-t5-large"): | |
# Initialize your LLM using smolagents's TransformersModel | |
# This is the crucial part. Flan-T5 is a 'text2text-generation' model. | |
# TransformersModel probably builds a pipeline, which needs the correct task. | |
self.llm_model = TransformersModel( | |
model_id=model_id, | |
# Specify the task type for Flan-T5 models | |
task="text2text-generation", | |
# You might need to add device mapping if running into memory issues | |
# e.g., device_map="auto" if on GPU | |
) | |
# Initialize the smolagents CodeAgent | |
self.agent = CodeAgent( | |
model=self.llm_model, | |
tools=[DuckDuckGoSearchTool()], | |
add_base_tools=False, | |
verbose=True | |
) | |
def process_task(self, task_description: str) -> str: | |
try: | |
response = self.agent.run(task_description) | |
return response | |
except Exception as e: | |
return f"An error occurred during agent processing: {e}" | |