File size: 1,871 Bytes
59ff18d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
# agent.py
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.core.tools import FunctionTool
from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun
from langchain_experimental.tools.python.tool import PythonREPLTool
from langchain_community.document_loaders import YoutubeLoader
# Define all tool functions with type annotations
def search_duckduckgo(query: str) -> str:
"""Use DuckDuckGo to search the internet."""
return DuckDuckGoSearchRun().run(query)
def search_wikipedia(query: str) -> str:
"""Use Wikipedia to look up facts."""
return WikipediaQueryRun(api_wrapper=None).run(query)
def run_python(code: str) -> str:
"""Execute Python code and return output."""
return PythonREPLTool().run(code)
def get_youtube_transcript(url: str) -> str:
"""Extract transcript from YouTube video."""
loader = YoutubeLoader.from_youtube_url(url, add_video_info=False)
docs = loader.load()
return " ".join(doc.page_content for doc in docs)
# Build tool wrappers
TOOLS = [
FunctionTool.from_defaults(search_duckduckgo),
FunctionTool.from_defaults(search_wikipedia),
FunctionTool.from_defaults(run_python),
FunctionTool.from_defaults(get_youtube_transcript),
]
# Load LLM
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
# Create LlamaIndex agent
agent = AgentWorkflow.from_tools_or_functions(
TOOLS,
llm=llm,
system_prompt="You are a helpful and smart AI agent that solves tasks using reasoning and external tools."
)
# Optional: support context (stateful runs)
from llama_index.core.workflow import Context
ctx = Context(agent)
async def answer_question(question: str) -> str:
"""Run the agent on a single question."""
return await agent.arun(question) |