Spaces:
Runtime error
Runtime error
added custom agent
Browse files
main.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
from utils import create_index, get_agent_chain, get_prompt_and_tools, get_search_index
|
|
|
|
| 2 |
question_starters = ['who', 'why', 'what', 'how', 'where', 'when', 'which', 'whom', 'whose']
|
| 3 |
|
| 4 |
|
|
@@ -9,9 +10,13 @@ def index():
|
|
| 9 |
def run(question):
|
| 10 |
index = get_search_index()
|
| 11 |
|
| 12 |
-
prompt, tools = get_prompt_and_tools()
|
| 13 |
|
| 14 |
-
agent_chain = get_agent_chain(prompt, tools)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
result = None
|
| 17 |
|
|
|
|
| 1 |
from utils import create_index, get_agent_chain, get_prompt_and_tools, get_search_index
|
| 2 |
+
from utils import get_custom_agent, get_prompt_and_tools_for_custom_agent
|
| 3 |
question_starters = ['who', 'why', 'what', 'how', 'where', 'when', 'which', 'whom', 'whose']
|
| 4 |
|
| 5 |
|
|
|
|
| 10 |
def run(question):
|
| 11 |
index = get_search_index()
|
| 12 |
|
| 13 |
+
# prompt, tools = get_prompt_and_tools()
|
| 14 |
|
| 15 |
+
# agent_chain = get_agent_chain(prompt, tools)
|
| 16 |
+
|
| 17 |
+
prompt, tools = get_prompt_and_tools_for_custom_agent()
|
| 18 |
+
|
| 19 |
+
agent_chain = get_custom_agent(prompt, tools)
|
| 20 |
|
| 21 |
result = None
|
| 22 |
|
utils.py
CHANGED
|
@@ -102,7 +102,7 @@ def generate_answer(question) -> str:
|
|
| 102 |
global chat_history, gpt_3_5_index
|
| 103 |
gpt_3_5_chain = get_qa_chain(gpt_3_5_index)
|
| 104 |
result = gpt_3_5_chain(
|
| 105 |
-
{"question": question, "chat_history": chat_history, "vectordbkwargs": {"search_distance": 0.
|
| 106 |
chat_history = [(question, result["answer"])]
|
| 107 |
sources = []
|
| 108 |
|
|
@@ -147,7 +147,69 @@ def get_tools():
|
|
| 147 |
Tool(
|
| 148 |
name="Vectorstore",
|
| 149 |
func=generate_answer,
|
| 150 |
-
description="useful for when you need to answer questions about the coursera course
|
| 151 |
return_direct=True
|
| 152 |
)]
|
| 153 |
return tools
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
global chat_history, gpt_3_5_index
|
| 103 |
gpt_3_5_chain = get_qa_chain(gpt_3_5_index)
|
| 104 |
result = gpt_3_5_chain(
|
| 105 |
+
{"question": question, "chat_history": chat_history, "vectordbkwargs": {"search_distance": 0.6}})
|
| 106 |
chat_history = [(question, result["answer"])]
|
| 107 |
sources = []
|
| 108 |
|
|
|
|
| 147 |
Tool(
|
| 148 |
name="Vectorstore",
|
| 149 |
func=generate_answer,
|
| 150 |
+
description="useful for when you need to answer questions about the coursera course on 3D Printing.",
|
| 151 |
return_direct=True
|
| 152 |
)]
|
| 153 |
return tools
|
| 154 |
+
|
| 155 |
+
def get_custom_agent(prompt, tools):
|
| 156 |
+
|
| 157 |
+
llm_chain = LLMChain(llm=gpt_3_5, prompt=prompt)
|
| 158 |
+
|
| 159 |
+
output_parser = CustomOutputParser()
|
| 160 |
+
tool_names = [tool.name for tool in tools]
|
| 161 |
+
agent = LLMSingleActionAgent(
|
| 162 |
+
llm_chain=llm_chain,
|
| 163 |
+
output_parser=output_parser,
|
| 164 |
+
stop=["\nObservation:"],
|
| 165 |
+
allowed_tools=tool_names
|
| 166 |
+
)
|
| 167 |
+
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True, memory=memory,
|
| 168 |
+
intermediate_steps=True)
|
| 169 |
+
return agent_executor
|
| 170 |
+
|
| 171 |
+
def get_prompt_and_tools_for_custom_agent():
|
| 172 |
+
template = """
|
| 173 |
+
Have a conversation with a human, answering the following questions as best you can.
|
| 174 |
+
Always try to use Vectorstore first.
|
| 175 |
+
Your name is Coursera QA Bot because you are a personal assistant of a Coursera Course: The 3D Printing Evolution. You have access to the following tools:
|
| 176 |
+
|
| 177 |
+
{tools}
|
| 178 |
+
|
| 179 |
+
To answer for the new input, use the following format:
|
| 180 |
+
|
| 181 |
+
New Input: the input question you must answer
|
| 182 |
+
Thought: Do I need to use a tool? Yes
|
| 183 |
+
Action: the action to take, should be one of [{tool_names}]
|
| 184 |
+
Action Input: the input to the action
|
| 185 |
+
Observation: the result of the action
|
| 186 |
+
... (this Thought/Action/Action Input/Observation can repeat N times)
|
| 187 |
+
Thought: I now know the final answer
|
| 188 |
+
Final Answer: the final answer to the original input question. SOURCES: the sources referred to find the final answer
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
When you have a response to say to the Human and DO NOT need to use a tool:
|
| 192 |
+
1. DO NOT return "SOURCES" if you did not use any tool.
|
| 193 |
+
2. You MUST use this format:
|
| 194 |
+
```
|
| 195 |
+
Thought: Do I need to use a tool? No
|
| 196 |
+
AI: [your response here]
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
Begin! Remember to speak as a personal assistant when giving your final answer.
|
| 200 |
+
ALWAYS return a "SOURCES" part in your answer, if you used any tool.
|
| 201 |
+
|
| 202 |
+
Previous conversation history:
|
| 203 |
+
{chat_history}
|
| 204 |
+
New input: {input}
|
| 205 |
+
{agent_scratchpad}
|
| 206 |
+
SOURCES:"""
|
| 207 |
+
tools = get_tools()
|
| 208 |
+
prompt = CustomPromptTemplate(
|
| 209 |
+
template=template,
|
| 210 |
+
tools=tools,
|
| 211 |
+
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
|
| 212 |
+
# This includes the `intermediate_steps` variable because that is needed
|
| 213 |
+
input_variables=["input", "intermediate_steps", "chat_history"]
|
| 214 |
+
)
|
| 215 |
+
return prompt, tools
|