wt002 commited on
Commit
d4165c4
·
verified ·
1 Parent(s): 8a89d81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -18
app.py CHANGED
@@ -26,20 +26,31 @@ load_dotenv()
26
  import io
27
  import contextlib
28
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
29
 
30
- class ZephyrToolCallingAgentTool:
31
  name = "zephyr_tool_agent"
32
  description = "Uses Zephyr-7B to answer questions using code or reasoning"
33
 
 
 
 
 
 
 
 
 
34
  def __init__(self):
35
  self.model_id = "HuggingFaceH4/zephyr-7b-beta"
36
- token = os.getenv("HF_TOKEN") # Optional unless private
37
 
38
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, token=token)
39
  self.model = AutoModelForCausalLM.from_pretrained(
40
- self.model_id, device_map="auto", torch_dtype="auto", token=token
 
 
 
41
  )
42
-
43
  self.pipeline = pipeline(
44
  "text-generation",
45
  model=self.model,
@@ -48,30 +59,21 @@ class ZephyrToolCallingAgentTool:
48
  temperature=0.2
49
  )
50
 
51
- def _run_code(self, code: str) -> str:
52
- buffer = io.StringIO()
53
- try:
54
- with contextlib.redirect_stdout(buffer):
55
- exec(code, {})
56
- return buffer.getvalue().strip()
57
- except Exception as e:
58
- return f"Error during code execution: {e}"
59
 
60
  def run(self, question: str) -> str:
61
  prompt = f"""You are a helpful assistant. Use code to solve questions that involve calculations.
62
  If code is needed, return a block like <tool>code</tool>. End your answer with <final>answer</final>.
63
 
64
-
65
  Question: {question}
66
  Answer:"""
67
 
68
  result = self.pipeline(prompt)[0]["generated_text"]
69
 
70
- # Process result
71
  if "<tool>" in result and "</tool>" in result:
72
  code = result.split("<tool>")[1].split("</tool>")[0].strip()
73
- output = self._run_code(code)
74
- return f"FINAL ANSWER (code output): {output}"
75
 
76
  elif "<final>" in result and "</final>" in result:
77
  final = result.split("<final>")[1].split("</final>")[0].strip()
@@ -79,6 +81,15 @@ Answer:"""
79
 
80
  return "Could not determine how to respond. No <tool> or <final> block detected."
81
 
 
 
 
 
 
 
 
 
 
82
 
83
  #from smolagents import Tool
84
  #from langchain_community.document_loaders import WikipediaLoader
@@ -253,7 +264,7 @@ class BasicAgent:
253
  video_transcription_tool = VideoTranscriptionTool()
254
 
255
  # ✅ New Mistral-based Tool
256
- mistral_tool = ZephyrToolCallingAgentTool()
257
 
258
  system_prompt = f"""
259
  You are my general AI assistant. Your task is to answer the question I asked.
@@ -272,7 +283,7 @@ If the answer is a comma-separated list, apply the above rules for each element
272
  keywords_extract_tool, speech_to_text_tool,
273
  visit_webpage_tool, final_answer_tool,
274
  parse_excel_to_json, video_transcription_tool,
275
- mistral_tool # 🔧 Add here
276
  ],
277
  add_base_tools=True
278
  )
 
26
  import io
27
  import contextlib
28
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
29
+ from smolagents import Tool
30
 
31
+ class ZephyrToolCallingAgentTool(Tool):
32
  name = "zephyr_tool_agent"
33
  description = "Uses Zephyr-7B to answer questions using code or reasoning"
34
 
35
+ inputs = {
36
+ "question": {
37
+ "type": "string",
38
+ "description": "The user's question involving reasoning or code execution."
39
+ }
40
+ }
41
+ output_type = "string"
42
+
43
  def __init__(self):
44
  self.model_id = "HuggingFaceH4/zephyr-7b-beta"
45
+ token = os.getenv("HF_TOKEN")
46
 
47
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, token=token)
48
  self.model = AutoModelForCausalLM.from_pretrained(
49
+ self.model_id,
50
+ device_map="auto",
51
+ torch_dtype="auto",
52
+ token=token
53
  )
 
54
  self.pipeline = pipeline(
55
  "text-generation",
56
  model=self.model,
 
59
  temperature=0.2
60
  )
61
 
62
+ def _run(self, question: str) -> str:
63
+ return self.run(question)
 
 
 
 
 
 
64
 
65
  def run(self, question: str) -> str:
66
  prompt = f"""You are a helpful assistant. Use code to solve questions that involve calculations.
67
  If code is needed, return a block like <tool>code</tool>. End your answer with <final>answer</final>.
68
 
 
69
  Question: {question}
70
  Answer:"""
71
 
72
  result = self.pipeline(prompt)[0]["generated_text"]
73
 
 
74
  if "<tool>" in result and "</tool>" in result:
75
  code = result.split("<tool>")[1].split("</tool>")[0].strip()
76
+ return self._run_code(code)
 
77
 
78
  elif "<final>" in result and "</final>" in result:
79
  final = result.split("<final>")[1].split("</final>")[0].strip()
 
81
 
82
  return "Could not determine how to respond. No <tool> or <final> block detected."
83
 
84
+ def _run_code(self, code: str) -> str:
85
+ buffer = io.StringIO()
86
+ try:
87
+ with contextlib.redirect_stdout(buffer):
88
+ exec(code, {})
89
+ return f"FINAL ANSWER (code output): {buffer.getvalue().strip()}"
90
+ except Exception as e:
91
+ return f"Error during code execution: {e}"
92
+
93
 
94
  #from smolagents import Tool
95
  #from langchain_community.document_loaders import WikipediaLoader
 
264
  video_transcription_tool = VideoTranscriptionTool()
265
 
266
  # ✅ New Mistral-based Tool
267
+ zephyr_tool = ZephyrToolCallingAgentTool()
268
 
269
  system_prompt = f"""
270
  You are my general AI assistant. Your task is to answer the question I asked.
 
283
  keywords_extract_tool, speech_to_text_tool,
284
  visit_webpage_tool, final_answer_tool,
285
  parse_excel_to_json, video_transcription_tool,
286
+ zephyr_tool # 🔧 Add here
287
  ],
288
  add_base_tools=True
289
  )