Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -35,11 +35,25 @@ class MistralToolCallingAgentTool:
|
|
35 |
|
36 |
def __init__(self):
|
37 |
self.model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
self.pipeline = pipeline(
|
41 |
-
"text-generation",
|
42 |
-
|
|
|
|
|
|
|
43 |
)
|
44 |
|
45 |
def _run_code(self, code: str) -> str:
|
@@ -49,7 +63,7 @@ class MistralToolCallingAgentTool:
|
|
49 |
exec(code, {})
|
50 |
return buffer.getvalue().strip()
|
51 |
except Exception as e:
|
52 |
-
return f"Error during execution: {e}"
|
53 |
|
54 |
def run(self, question: str) -> str:
|
55 |
prompt = f"""You are a helpful assistant. Use code to solve questions that involve calculations.
|
@@ -57,19 +71,21 @@ If code is needed, return a block like <tool>code</tool>. End your answer with <
|
|
57 |
|
58 |
Question: {question}
|
59 |
Answer:"""
|
|
|
60 |
result = self.pipeline(prompt)[0]["generated_text"]
|
61 |
|
62 |
# Process result
|
63 |
if "<tool>" in result and "</tool>" in result:
|
64 |
code = result.split("<tool>")[1].split("</tool>")[0].strip()
|
65 |
output = self._run_code(code)
|
66 |
-
return f"
|
67 |
|
68 |
elif "<final>" in result and "</final>" in result:
|
69 |
final = result.split("<final>")[1].split("</final>")[0].strip()
|
70 |
return f"FINAL ANSWER: {final}"
|
71 |
|
72 |
-
return "
|
|
|
73 |
|
74 |
#from smolagents import Tool
|
75 |
#from langchain_community.document_loaders import WikipediaLoader
|
|
|
35 |
|
36 |
def __init__(self):
|
37 |
self.model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
38 |
+
token = os.getenv("HF_TOKEN")
|
39 |
+
|
40 |
+
if token is None:
|
41 |
+
raise EnvironmentError("HF_TOKEN is not set in environment variables.")
|
42 |
+
|
43 |
+
try:
|
44 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, token=token)
|
45 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
46 |
+
self.model_id, device_map="auto", torch_dtype="auto", token=token
|
47 |
+
)
|
48 |
+
except Exception as e:
|
49 |
+
raise RuntimeError(f"Error loading Mistral model: {e}")
|
50 |
+
|
51 |
self.pipeline = pipeline(
|
52 |
+
"text-generation",
|
53 |
+
model=self.model,
|
54 |
+
tokenizer=self.tokenizer,
|
55 |
+
max_new_tokens=512,
|
56 |
+
temperature=0.2
|
57 |
)
|
58 |
|
59 |
def _run_code(self, code: str) -> str:
|
|
|
63 |
exec(code, {})
|
64 |
return buffer.getvalue().strip()
|
65 |
except Exception as e:
|
66 |
+
return f"Error during code execution: {e}"
|
67 |
|
68 |
def run(self, question: str) -> str:
|
69 |
prompt = f"""You are a helpful assistant. Use code to solve questions that involve calculations.
|
|
|
71 |
|
72 |
Question: {question}
|
73 |
Answer:"""
|
74 |
+
|
75 |
result = self.pipeline(prompt)[0]["generated_text"]
|
76 |
|
77 |
# Process result
|
78 |
if "<tool>" in result and "</tool>" in result:
|
79 |
code = result.split("<tool>")[1].split("</tool>")[0].strip()
|
80 |
output = self._run_code(code)
|
81 |
+
return f"FINAL ANSWER (code output): {output}"
|
82 |
|
83 |
elif "<final>" in result and "</final>" in result:
|
84 |
final = result.split("<final>")[1].split("</final>")[0].strip()
|
85 |
return f"FINAL ANSWER: {final}"
|
86 |
|
87 |
+
return "Could not determine how to respond. No <tool> or <final> block detected."
|
88 |
+
|
89 |
|
90 |
#from smolagents import Tool
|
91 |
#from langchain_community.document_loaders import WikipediaLoader
|