dawid-lorek commited on
Commit
92b0d1a
·
verified ·
1 Parent(s): 59ff18d

Update agent.py

Browse files
Files changed (1) hide show
  1. agent.py +8 -19
agent.py CHANGED
@@ -1,33 +1,29 @@
1
  # agent.py
2
 
3
- from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
4
  from llama_index.core.agent.workflow import AgentWorkflow
5
  from llama_index.core.tools import FunctionTool
 
 
6
  from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun
7
  from langchain_experimental.tools.python.tool import PythonREPLTool
8
  from langchain_community.document_loaders import YoutubeLoader
9
 
10
- # Define all tool functions with type annotations
11
-
12
  def search_duckduckgo(query: str) -> str:
13
- """Use DuckDuckGo to search the internet."""
14
  return DuckDuckGoSearchRun().run(query)
15
 
16
  def search_wikipedia(query: str) -> str:
17
- """Use Wikipedia to look up facts."""
18
  return WikipediaQueryRun(api_wrapper=None).run(query)
19
 
20
  def run_python(code: str) -> str:
21
- """Execute Python code and return output."""
22
  return PythonREPLTool().run(code)
23
 
24
  def get_youtube_transcript(url: str) -> str:
25
- """Extract transcript from YouTube video."""
26
  loader = YoutubeLoader.from_youtube_url(url, add_video_info=False)
27
  docs = loader.load()
28
- return " ".join(doc.page_content for doc in docs)
29
 
30
- # Build tool wrappers
31
  TOOLS = [
32
  FunctionTool.from_defaults(search_duckduckgo),
33
  FunctionTool.from_defaults(search_wikipedia),
@@ -35,20 +31,13 @@ TOOLS = [
35
  FunctionTool.from_defaults(get_youtube_transcript),
36
  ]
37
 
38
- # Load LLM
39
- llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
40
-
41
- # Create LlamaIndex agent
42
  agent = AgentWorkflow.from_tools_or_functions(
43
  TOOLS,
44
  llm=llm,
45
- system_prompt="You are a helpful and smart AI agent that solves tasks using reasoning and external tools."
46
  )
47
 
48
- # Optional: support context (stateful runs)
49
- from llama_index.core.workflow import Context
50
- ctx = Context(agent)
51
-
52
  async def answer_question(question: str) -> str:
53
- """Run the agent on a single question."""
54
  return await agent.arun(question)
 
1
  # agent.py
2
 
3
+ from llama_index.llms.openai import OpenAI
4
  from llama_index.core.agent.workflow import AgentWorkflow
5
  from llama_index.core.tools import FunctionTool
6
+
7
+ # LangChain-based tools
8
  from langchain_community.tools import DuckDuckGoSearchRun, WikipediaQueryRun
9
  from langchain_experimental.tools.python.tool import PythonREPLTool
10
  from langchain_community.document_loaders import YoutubeLoader
11
 
12
+ # Tool wrappers
 
13
  def search_duckduckgo(query: str) -> str:
 
14
  return DuckDuckGoSearchRun().run(query)
15
 
16
  def search_wikipedia(query: str) -> str:
 
17
  return WikipediaQueryRun(api_wrapper=None).run(query)
18
 
19
  def run_python(code: str) -> str:
 
20
  return PythonREPLTool().run(code)
21
 
22
  def get_youtube_transcript(url: str) -> str:
 
23
  loader = YoutubeLoader.from_youtube_url(url, add_video_info=False)
24
  docs = loader.load()
25
+ return " ".join(d.page_content for d in docs)
26
 
 
27
  TOOLS = [
28
  FunctionTool.from_defaults(search_duckduckgo),
29
  FunctionTool.from_defaults(search_wikipedia),
 
31
  FunctionTool.from_defaults(get_youtube_transcript),
32
  ]
33
 
34
+ llm = OpenAI(model="gpt-4")
 
 
 
35
  agent = AgentWorkflow.from_tools_or_functions(
36
  TOOLS,
37
  llm=llm,
38
+ system_prompt="You are a helpful AI agent using external tools."
39
  )
40
 
41
+ import asyncio
 
 
 
42
  async def answer_question(question: str) -> str:
 
43
  return await agent.arun(question)