|
|
|
|
|
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI |
|
from llama_index.core.agent.workflow import AgentWorkflow |
|
from llama_index.core.tools import FunctionTool |
|
from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun |
|
from langchain_experimental.tools.python.tool import PythonREPLTool |
|
from langchain_community.document_loaders import YoutubeLoader |
|
|
|
|
|
|
|
def search_duckduckgo(query: str) -> str: |
|
"""Use DuckDuckGo to search the internet.""" |
|
return DuckDuckGoSearchRun().run(query) |
|
|
|
def search_wikipedia(query: str) -> str: |
|
"""Use Wikipedia to look up facts.""" |
|
return WikipediaQueryRun(api_wrapper=None).run(query) |
|
|
|
def run_python(code: str) -> str: |
|
"""Execute Python code and return output.""" |
|
return PythonREPLTool().run(code) |
|
|
|
def get_youtube_transcript(url: str) -> str: |
|
"""Extract transcript from YouTube video.""" |
|
loader = YoutubeLoader.from_youtube_url(url, add_video_info=False) |
|
docs = loader.load() |
|
return " ".join(doc.page_content for doc in docs) |
|
|
|
|
|
TOOLS = [ |
|
FunctionTool.from_defaults(search_duckduckgo), |
|
FunctionTool.from_defaults(search_wikipedia), |
|
FunctionTool.from_defaults(run_python), |
|
FunctionTool.from_defaults(get_youtube_transcript), |
|
] |
|
|
|
|
|
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") |
|
|
|
|
|
agent = AgentWorkflow.from_tools_or_functions( |
|
TOOLS, |
|
llm=llm, |
|
system_prompt="You are a helpful and smart AI agent that solves tasks using reasoning and external tools." |
|
) |
|
|
|
|
|
from llama_index.core.workflow import Context |
|
ctx = Context(agent) |
|
|
|
async def answer_question(question: str) -> str: |
|
"""Run the agent on a single question.""" |
|
return await agent.arun(question) |