wt002 commited on
Commit
ac2433e
·
verified ·
1 Parent(s): 5001e6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -1,4 +1,3 @@
1
-
2
  import os
3
  import gradio as gr
4
  import requests
@@ -63,7 +62,7 @@ class CodeLlamaTool(Tool):
63
  )
64
 
65
  def forward(self, question: str) -> str:
66
- prompt = f"""You are an AI that uses Python code to answer questions.
67
  Question: {question}
68
  Instructions:
69
  - If solving requires code, use a block like <tool>code</tool>.
@@ -309,7 +308,7 @@ Answer should be a short string, number, or comma-separated list. Keep it brief.
309
  ],
310
  add_base_tools=True
311
  )
312
- self.agent.prompt_templates["system_prompt"] = self.agent.prompt_templates["system_prompt"] + system_prompt
313
 
314
  def _build_safe_prompt(self, history: str, question: str, max_total_tokens=32768, reserve_for_output=2048):
315
  max_input_tokens = max_total_tokens - reserve_for_output
 
 
1
  import os
2
  import gradio as gr
3
  import requests
 
62
  )
63
 
64
  def forward(self, question: str) -> str:
65
+ self.prompt = f"""You are an AI that uses Python code to answer questions.
66
  Question: {question}
67
  Instructions:
68
  - If solving requires code, use a block like <tool>code</tool>.
 
308
  ],
309
  add_base_tools=True
310
  )
311
+ self.agent.prompt_templates["system_prompt"] = self.agent.prompt_templates["system_prompt"] + self.system_prompt
312
 
313
  def _build_safe_prompt(self, history: str, question: str, max_total_tokens=32768, reserve_for_output=2048):
314
  max_input_tokens = max_total_tokens - reserve_for_output