Guillaume Fradet
commited on
Commit
·
b48ba9d
1
Parent(s):
b59053d
add custom prompt and tools
Browse files- app.py +42 -8
- prompt.py +58 -0
- requirements.txt +3 -1
app.py
CHANGED
@@ -3,10 +3,14 @@ import gradio as gr
|
|
3 |
import requests
|
4 |
import pandas as pd
|
5 |
|
6 |
-
from llama_index.
|
7 |
-
from llama_index.core.agent.workflow import ReActAgent
|
8 |
from llama_index.core.workflow import Context
|
9 |
-
from llama_index.core.agent.workflow import AgentStream, ToolCallResult
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# (Keep Constants as is)
|
12 |
# --- Constants ---
|
@@ -24,19 +28,49 @@ class BasicAgent:
|
|
24 |
return fixed_answer
|
25 |
|
26 |
class LLamaIndexAgent:
|
27 |
-
def __init__(self,
|
|
|
|
|
|
|
|
|
|
|
28 |
llm = HuggingFaceInferenceAPI(model_name=model_name)
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
self.ctx = Context(self.agent)
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
async def __call__(self, question: str) -> str:
|
34 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
35 |
|
36 |
handler = self.agent.run(question, ctx=self.ctx)
|
37 |
async for ev in handler.stream_events():
|
38 |
-
if isinstance(ev, ToolCallResult):
|
39 |
-
|
40 |
if isinstance(ev, AgentStream):
|
41 |
print(f"{ev.delta}", end="", flush=True)
|
42 |
|
|
|
3 |
import requests
|
4 |
import pandas as pd
|
5 |
|
6 |
+
from llama_index.core import PromptTemplate
|
|
|
7 |
from llama_index.core.workflow import Context
|
8 |
+
from llama_index.core.agent.workflow import ReActAgent, AgentStream, ToolCallResult
|
9 |
+
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
10 |
+
from llama_index.tools.wikipedia import WikipediaToolSpec
|
11 |
+
from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
|
12 |
+
|
13 |
+
from .prompt import custom_react_system_header_str
|
14 |
|
15 |
# (Keep Constants as is)
|
16 |
# --- Constants ---
|
|
|
28 |
return fixed_answer
|
29 |
|
30 |
class LLamaIndexAgent:
|
31 |
+
def __init__(self,
|
32 |
+
model_name="Qwen/Qwen2.5-Coder-32B-Instruct",
|
33 |
+
show_tools_desc=True,
|
34 |
+
show_prompt=True):
|
35 |
+
|
36 |
+
# LLM definition
|
37 |
llm = HuggingFaceInferenceAPI(model_name=model_name)
|
38 |
+
print(f"LLamaIndexAgent initialized with model \"{model_name}\"")
|
39 |
+
|
40 |
+
# tools definition
|
41 |
+
tool_spec_list = []
|
42 |
+
# tool_spec_list += WikipediaToolSpec().to_tool_list()
|
43 |
+
tool_spec_list += DuckDuckGoSearchToolSpec().to_tool_list()
|
44 |
+
|
45 |
+
# agent definition
|
46 |
+
self.agent = ReActAgent(llm=llm, tools=tool_spec_list)
|
47 |
+
|
48 |
+
# update default prompt with a custom one
|
49 |
+
custom_react_system_header = PromptTemplate(custom_react_system_header_str)
|
50 |
+
self.agent.update_prompts({"react_header": custom_react_system_header})
|
51 |
+
|
52 |
+
# context definition
|
53 |
self.ctx = Context(self.agent)
|
54 |
+
|
55 |
+
if show_tools_desc:
|
56 |
+
for i, tool in enumerate(tool_spec_list):
|
57 |
+
print("\n" + "="*30 + f" Tool {i+1} " + "="*30)
|
58 |
+
print(tool.metadata.description)
|
59 |
+
|
60 |
+
if show_prompt:
|
61 |
+
prompt_dict = self.agent.get_prompts()
|
62 |
+
for k, v in prompt_dict.items():
|
63 |
+
print("\n" + "="*30 + f" Prompt: {k} " + "="*30)
|
64 |
+
print(v.template)
|
65 |
+
|
66 |
|
67 |
async def __call__(self, question: str) -> str:
|
68 |
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
69 |
|
70 |
handler = self.agent.run(question, ctx=self.ctx)
|
71 |
async for ev in handler.stream_events():
|
72 |
+
# if isinstance(ev, ToolCallResult):
|
73 |
+
# print(f"\nCall {ev.tool_name} with {ev.tool_kwargs}\nReturned: {ev.tool_output}")
|
74 |
if isinstance(ev, AgentStream):
|
75 |
print(f"{ev.delta}", end="", flush=True)
|
76 |
|
prompt.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# custom ReAct prompt where we add the GAIA team prompting example at the beginning
|
2 |
+
|
3 |
+
custom_react_system_header_str = """\
|
4 |
+
|
5 |
+
You are a general AI assistant.
|
6 |
+
I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
|
7 |
+
YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
8 |
+
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
|
9 |
+
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
|
10 |
+
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
11 |
+
|
12 |
+
## Tools
|
13 |
+
|
14 |
+
You have access to a wide variety of tools. You are responsible for using the tools in any sequence you deem appropriate to complete the task at hand.
|
15 |
+
This may require breaking the task into subtasks and using different tools to complete each subtask.
|
16 |
+
|
17 |
+
You have access to the following tools:
|
18 |
+
{tool_desc}
|
19 |
+
|
20 |
+
|
21 |
+
## Output Format
|
22 |
+
|
23 |
+
Please answer in the same language as the question and use the following format:
|
24 |
+
|
25 |
+
```
|
26 |
+
Thought: The current language of the user is: (user's language). I need to use a tool to help me answer the question.
|
27 |
+
Action: tool name (one of {tool_names}) if using a tool.
|
28 |
+
Action Input: the input to the tool, in a JSON format representing the kwargs (e.g. {{"input": "hello world", "num_beams": 5}})
|
29 |
+
```
|
30 |
+
|
31 |
+
Please ALWAYS start with a Thought.
|
32 |
+
|
33 |
+
NEVER surround your response with markdown code markers. You may use code markers within your response if you need to.
|
34 |
+
|
35 |
+
Please use a valid JSON format for the Action Input. Do NOT do this {{'input': 'hello world', 'num_beams': 5}}.
|
36 |
+
|
37 |
+
If this format is used, the tool will respond in the following format:
|
38 |
+
|
39 |
+
```
|
40 |
+
Observation: tool response
|
41 |
+
```
|
42 |
+
|
43 |
+
You should keep repeating the above format till you have enough information to answer the question without using any more tools. At that point, you MUST respond in one of the following two formats:
|
44 |
+
|
45 |
+
```
|
46 |
+
Thought: I can answer without using any more tools. I'll use the user's language to answer
|
47 |
+
Answer: [your answer here (In the same language as the user's question)]
|
48 |
+
```
|
49 |
+
|
50 |
+
```
|
51 |
+
Thought: I cannot answer the question with the provided tools.
|
52 |
+
Answer: [your answer here (In the same language as the user's question)]
|
53 |
+
```
|
54 |
+
|
55 |
+
## Current Conversation
|
56 |
+
|
57 |
+
Below is the current conversation consisting of interleaving human and assistant messages.
|
58 |
+
"""
|
requirements.txt
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
gradio
|
2 |
requests
|
3 |
llama-index
|
4 |
-
llama-index-llms-huggingface-api
|
|
|
|
|
|
1 |
gradio
|
2 |
requests
|
3 |
llama-index
|
4 |
+
llama-index-llms-huggingface-api
|
5 |
+
llama_index.tools.wikipedia
|
6 |
+
llama_index.tools.duckduckgo
|