guillaumefrd commited on
Commit
79ec94d
·
1 Parent(s): cecab48

code interpreter + post process answer

Browse files
Files changed (4) hide show
  1. app.py +19 -3
  2. config.py +1 -1
  3. prompt.py +3 -3
  4. requirements.txt +2 -1
app.py CHANGED
@@ -9,6 +9,7 @@ from llama_index.core.agent.workflow import ReActAgent, AgentStream, ToolCallRes
9
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI # customized to support different provider
10
  from llama_index.tools.wikipedia import WikipediaToolSpec
11
  from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
 
12
 
13
  from prompt import custom_react_system_header_str
14
  from config import HF_MODEL_NAME, HF_PROVIDER
@@ -44,6 +45,7 @@ class LLamaIndexAgent:
44
  tool_spec_list = []
45
  # tool_spec_list += WikipediaToolSpec().to_tool_list()
46
  tool_spec_list += DuckDuckGoSearchToolSpec().to_tool_list()
 
47
 
48
  # agent definition
49
  self.agent = ReActAgent(llm=llm, tools=tool_spec_list)
@@ -67,7 +69,9 @@ class LLamaIndexAgent:
67
  print(v.template)
68
 
69
  async def __call__(self, question: str) -> str:
70
- print(f"Agent received question (first 50 chars): {question[:50]}...")
 
 
71
 
72
  handler = self.agent.run(question, ctx=self.ctx)
73
  async for ev in handler.stream_events():
@@ -77,6 +81,13 @@ class LLamaIndexAgent:
77
  print(f"{ev.delta}", end="", flush=True)
78
 
79
  response = await handler
 
 
 
 
 
 
 
80
  print("\n\n"+"-"*50)
81
  print(f"Agent returning with answer: {response}")
82
  return response
@@ -144,9 +155,7 @@ async def run_and_submit_all(profile: gr.OAuthProfile | None):
144
  print(f"Skipping item with missing task_id or question: {item}")
145
  continue
146
  try:
147
- # submitted_answer = agent(question_text)
148
  submitted_answer = await agent(question_text)
149
- submitted_answer = str(submitted_answer) # cast AgentOutput to str
150
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
151
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
152
  agent.ctx.clear() # clear context for next question
@@ -240,6 +249,13 @@ with gr.Blocks() as demo:
240
 
241
  if __name__ == "__main__":
242
  print("\n" + "-"*30 + " App Starting " + "-"*30)
 
 
 
 
 
 
 
243
  # Check for SPACE_HOST and SPACE_ID at startup for information
244
  space_host_startup = os.getenv("SPACE_HOST")
245
  space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
 
9
  from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI # customized to support different provider
10
  from llama_index.tools.wikipedia import WikipediaToolSpec
11
  from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec
12
+ from llama_index.tools.code_interpreter import CodeInterpreterToolSpec
13
 
14
  from prompt import custom_react_system_header_str
15
  from config import HF_MODEL_NAME, HF_PROVIDER
 
45
  tool_spec_list = []
46
  # tool_spec_list += WikipediaToolSpec().to_tool_list()
47
  tool_spec_list += DuckDuckGoSearchToolSpec().to_tool_list()
48
+ tool_spec_list += CodeInterpreterToolSpec().to_tool_list()
49
 
50
  # agent definition
51
  self.agent = ReActAgent(llm=llm, tools=tool_spec_list)
 
69
  print(v.template)
70
 
71
  async def __call__(self, question: str) -> str:
72
+ print("\n\n"+"*"*50)
73
+ print(f"Agent received question: {question}")
74
+ print("*"*50)
75
 
76
  handler = self.agent.run(question, ctx=self.ctx)
77
  async for ev in handler.stream_events():
 
81
  print(f"{ev.delta}", end="", flush=True)
82
 
83
  response = await handler
84
+
85
+ # post-process the response (cast AgentOutput to str and keep only what's after "FINAL ANSWER:" for the exact match)
86
+ response = str(response)
87
+ try:
88
+ response = response.split("FINAL ANSWER:")[-1].strip()
89
+ except:
90
+ print('Could not split response on "FINAL ANSWER:"')
91
  print("\n\n"+"-"*50)
92
  print(f"Agent returning with answer: {response}")
93
  return response
 
155
  print(f"Skipping item with missing task_id or question: {item}")
156
  continue
157
  try:
 
158
  submitted_answer = await agent(question_text)
 
159
  answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
160
  results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
161
  agent.ctx.clear() # clear context for next question
 
249
 
250
  if __name__ == "__main__":
251
  print("\n" + "-"*30 + " App Starting " + "-"*30)
252
+
253
+ # set hard-coded values to run locally
254
+ if not os.getenv("SPACE_HOST"):
255
+ os.environ["SPACE_HOST"] = "guillaumefrd-agents-final-assignment.hf.space"
256
+ if not os.getenv("SPACE_ID"):
257
+ os.environ["SPACE_ID"] = "guillaumefrd/agents_final_assignment"
258
+
259
  # Check for SPACE_HOST and SPACE_ID at startup for information
260
  space_host_startup = os.getenv("SPACE_HOST")
261
  space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup
config.py CHANGED
@@ -1,2 +1,2 @@
1
- HF_MODEL_NAME = "google/gemma-3-27b-it"
2
  HF_PROVIDER = "nebius"
 
1
+ HF_MODEL_NAME = "google/gemma-3-27b-it" # multimodal (need to handle image input)
2
  HF_PROVIDER = "nebius"
prompt.py CHANGED
@@ -3,7 +3,7 @@
3
  custom_react_system_header_str = """\
4
 
5
  You are a general AI assistant.
6
- I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER].
7
  YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
8
  If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
9
  If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
@@ -44,12 +44,12 @@ You should keep repeating the above format till you have enough information to a
44
 
45
  ```
46
  Thought: I can answer without using any more tools. I'll use the user's language to answer
47
- Answer: [your answer here (In the same language as the user's question)]
48
  ```
49
 
50
  ```
51
  Thought: I cannot answer the question with the provided tools.
52
- Answer: [your answer here (In the same language as the user's question)]
53
  ```
54
 
55
  ## Current Conversation
 
3
  custom_react_system_header_str = """\
4
 
5
  You are a general AI assistant.
6
+ A human will ask you a question. Report your Thoughts, Actions, Observations as described in ## Output Format, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER HERE]
7
  YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
8
  If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
9
  If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
 
44
 
45
  ```
46
  Thought: I can answer without using any more tools. I'll use the user's language to answer
47
+ FINAL ANSWER: [YOUR FINAL ANSWER HERE (In the same language as the user's question)]
48
  ```
49
 
50
  ```
51
  Thought: I cannot answer the question with the provided tools.
52
+ FINAL ANSWER: [YOUR FINAL ANSWER HERE (In the same language as the user's question)]
53
  ```
54
 
55
  ## Current Conversation
requirements.txt CHANGED
@@ -3,4 +3,5 @@ requests
3
  llama-index
4
  llama-index-llms-huggingface-api @ git+https://github.com/guillaumefrd/llama_index.git@add-provider-HF-API#subdirectory=llama-index-integrations/llms/llama-index-llms-huggingface-api
5
  llama_index.tools.wikipedia
6
- llama_index.tools.duckduckgo
 
 
3
  llama-index
4
  llama-index-llms-huggingface-api @ git+https://github.com/guillaumefrd/llama_index.git@add-provider-HF-API#subdirectory=llama-index-integrations/llms/llama-index-llms-huggingface-api
5
  llama_index.tools.wikipedia
6
+ llama_index.tools.duckduckgo
7
+ llama_index.tools.code_interpreter