dawid-lorek commited on
Commit
59ff18d
·
verified ·
1 Parent(s): 3a12a2c

Create agent.py

Browse files
Files changed (1) hide show
  1. agent.py +54 -0
agent.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # agent.py
2
+
3
+ from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
4
+ from llama_index.core.agent.workflow import AgentWorkflow
5
+ from llama_index.core.tools import FunctionTool
6
+ from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun
7
+ from langchain_experimental.tools.python.tool import PythonREPLTool
8
+ from langchain_community.document_loaders import YoutubeLoader
9
+
10
+ # Define all tool functions with type annotations
11
+
12
+ def search_duckduckgo(query: str) -> str:
13
+ """Use DuckDuckGo to search the internet."""
14
+ return DuckDuckGoSearchRun().run(query)
15
+
16
+ def search_wikipedia(query: str) -> str:
17
+ """Use Wikipedia to look up facts."""
18
+ return WikipediaQueryRun(api_wrapper=None).run(query)
19
+
20
+ def run_python(code: str) -> str:
21
+ """Execute Python code and return output."""
22
+ return PythonREPLTool().run(code)
23
+
24
+ def get_youtube_transcript(url: str) -> str:
25
+ """Extract transcript from YouTube video."""
26
+ loader = YoutubeLoader.from_youtube_url(url, add_video_info=False)
27
+ docs = loader.load()
28
+ return " ".join(doc.page_content for doc in docs)
29
+
30
+ # Build tool wrappers
31
+ TOOLS = [
32
+ FunctionTool.from_defaults(search_duckduckgo),
33
+ FunctionTool.from_defaults(search_wikipedia),
34
+ FunctionTool.from_defaults(run_python),
35
+ FunctionTool.from_defaults(get_youtube_transcript),
36
+ ]
37
+
38
+ # Load LLM
39
+ llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
40
+
41
+ # Create LlamaIndex agent
42
+ agent = AgentWorkflow.from_tools_or_functions(
43
+ TOOLS,
44
+ llm=llm,
45
+ system_prompt="You are a helpful and smart AI agent that solves tasks using reasoning and external tools."
46
+ )
47
+
48
+ # Optional: support context (stateful runs)
49
+ from llama_index.core.workflow import Context
50
+ ctx = Context(agent)
51
+
52
+ async def answer_question(question: str) -> str:
53
+ """Run the agent on a single question."""
54
+ return await agent.arun(question)