Spaces:
Sleeping
Sleeping
File size: 1,403 Bytes
90c6fb1 7828173 90c6fb1 7828173 90c6fb1 7828173 90c6fb1 7828173 90c6fb1 7828173 90c6fb1 7828173 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
#import smolagentsfrom smolagents import CodeAgent, HfApiModel, InferenceClientModel, WebSearchTool
import numpy, math, xlrd, os
#model_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
#model_id = 'meta-llama/Llama-3.3-70B-Instruct'
#model = HfApiModel(model_id=model_id, token="HUGGINGFACEHUB_API_TOKEN")
#agent = CodeAgent(tools=[], model=model, add_base_tools=True)
#max_steps=5
#import os
from smolagents import CodeAgent, HfApiModel, InferenceClientModel, WebSearchTool
class BasicAgent:
"""Adapts smolagents.CodeAgent to the HF course template API."""
def __init__(self):
model_id = "meta-llama/Meta-Llama-3-70B-Instruct" # correct repo name
hf_token = os.getenv("HUGGINGFACEHUB_API_TOKEN") # read real secret
if not hf_token:
raise RuntimeError("HUGGINGFACEHUB_API_TOKEN not set in Space secrets")
model = HfApiModel(model_id=model_id, token=hf_token)
# add_base_tools=True already gives you search, python, etc.
self.agent = CodeAgent(tools=[], model=model, add_base_tools=True)
def __call__(self, question: str) -> str:
"""ONE question in β ONE pure-text answer out."""
# β Replace .run with whatever method actually returns the answer string.
return self.agent.run(question)
#agent.run(
# "At what temperature and for how long should I bake French baguettes made with type 65 flour?",
#) |