Spaces:
Sleeping
Sleeping
File size: 1,066 Bytes
13755f8 c3ff8d8 c965b8d c3c803a c965b8d cd2c8df c965b8d cd2c8df c3ff8d8 cd2c8df 13755f8 bf58062 c3ff8d8 15b9880 c3ff8d8 15b9880 c3ff8d8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import os
from smolagents import CodeAgent, DuckDuckGoSearchTool
from smolagents import TransformersModel
class GaiaAgent:
def __init__(self, model_id: str = "HuggingFaceH4/zephyr-7b-beta"): # <-- CHANGE MODEL HERE
self.llm_model = TransformersModel(
model_id=model_id,
# For Zephyr (a causal LM), the default `AutoModelForCausalLM` works,
# and `task="text-generation"` is appropriate for the pipeline.
task="text-generation",
# You might need device_map="auto" if you hit memory issues or have GPU:
# device_map="auto"
)
self.agent = CodeAgent(
model=self.llm_model,
tools=[DuckDuckGoSearchTool()],
add_base_tools=False,
verbose=True
)
def process_task(self, task_description: str) -> str:
try:
response = self.agent.run(task_description)
return response
except Exception as e:
return f"An error occurred during agent processing: {e}"
|