Spaces:
Sleeping
Sleeping
EtienneB
commited on
Commit
·
353e950
1
Parent(s):
e89e56c
updates
Browse files- agent.py +3 -62
- scrapbook.py +62 -0
agent.py
CHANGED
@@ -76,71 +76,12 @@ def build_graph():
|
|
76 |
# Bind tools to LLM
|
77 |
llm_with_tools = llm.bind_tools(tools)
|
78 |
|
79 |
-
|
80 |
-
"""
|
81 |
-
Clean up the answer to remove common prefixes and formatting
|
82 |
-
that models often add but that can cause exact match failures.
|
83 |
-
|
84 |
-
Args:
|
85 |
-
answer: The raw answer from the model
|
86 |
-
|
87 |
-
Returns:
|
88 |
-
The cleaned answer as a string
|
89 |
-
"""
|
90 |
-
# Convert non-string types to strings
|
91 |
-
if not isinstance(answer, str):
|
92 |
-
# Handle numeric types (float, int)
|
93 |
-
if isinstance(answer, float):
|
94 |
-
# Format floating point numbers properly
|
95 |
-
# Check if it's an integer value in float form (e.g., 12.0)
|
96 |
-
if answer.is_integer():
|
97 |
-
formatted_answer = str(int(answer))
|
98 |
-
else:
|
99 |
-
# For currency values that might need formatting
|
100 |
-
if abs(answer) >= 1000:
|
101 |
-
formatted_answer = f"${answer:,.2f}"
|
102 |
-
else:
|
103 |
-
formatted_answer = str(answer)
|
104 |
-
return formatted_answer
|
105 |
-
elif isinstance(answer, int):
|
106 |
-
return str(answer)
|
107 |
-
else:
|
108 |
-
# For any other type
|
109 |
-
return str(answer)
|
110 |
-
|
111 |
-
# Now we know answer is a string, so we can safely use string methods
|
112 |
-
# Normalize whitespace
|
113 |
-
answer = answer.strip()
|
114 |
-
|
115 |
-
# Remove common prefixes and formatting that models add
|
116 |
-
prefixes_to_remove = [
|
117 |
-
"The answer is ",
|
118 |
-
"Answer: ",
|
119 |
-
"Final answer: ",
|
120 |
-
"The result is ",
|
121 |
-
"To answer this question: ",
|
122 |
-
"Based on the information provided, ",
|
123 |
-
"According to the information: ",
|
124 |
-
]
|
125 |
-
|
126 |
-
for prefix in prefixes_to_remove:
|
127 |
-
if answer.startswith(prefix):
|
128 |
-
answer = answer[len(prefix):].strip()
|
129 |
-
|
130 |
-
# Remove quotes if they wrap the entire answer
|
131 |
-
if (answer.startswith('"') and answer.endswith('"')) or (answer.startswith("'") and answer.endswith("'")):
|
132 |
-
answer = answer[1:-1].strip()
|
133 |
-
|
134 |
-
return answer
|
135 |
-
|
136 |
def assistant(state: MessagesState):
|
137 |
messages_with_system_prompt = [sys_msg] + state["messages"]
|
138 |
llm_response = llm_with_tools.invoke(messages_with_system_prompt)
|
139 |
-
|
140 |
-
|
141 |
-
clean_text = clean_answer(llm_response.content)
|
142 |
-
|
143 |
-
return {"messages": [AIMessage(content=json.dumps(clean_text, ensure_ascii=False))]}
|
144 |
|
145 |
# --- Graph Definition ---
|
146 |
builder = StateGraph(MessagesState)
|
|
|
76 |
# Bind tools to LLM
|
77 |
llm_with_tools = llm.bind_tools(tools)
|
78 |
|
79 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
def assistant(state: MessagesState):
|
81 |
messages_with_system_prompt = [sys_msg] + state["messages"]
|
82 |
llm_response = llm_with_tools.invoke(messages_with_system_prompt)
|
83 |
+
|
84 |
+
return {"messages": [AIMessage(content=json.dumps(llm_response.content, ensure_ascii=False))]}
|
|
|
|
|
|
|
85 |
|
86 |
# --- Graph Definition ---
|
87 |
builder = StateGraph(MessagesState)
|
scrapbook.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def clean_answer(answer: any) -> str:
|
2 |
+
"""
|
3 |
+
Clean up the answer to remove common prefixes and formatting
|
4 |
+
that models often add but that can cause exact match failures.
|
5 |
+
|
6 |
+
Args:
|
7 |
+
answer: The raw answer from the model
|
8 |
+
|
9 |
+
Returns:
|
10 |
+
The cleaned answer as a string
|
11 |
+
"""
|
12 |
+
# Convert non-string types to strings
|
13 |
+
if not isinstance(answer, str):
|
14 |
+
# Handle numeric types (float, int)
|
15 |
+
if isinstance(answer, float):
|
16 |
+
# Format floating point numbers properly
|
17 |
+
# Check if it's an integer value in float form (e.g., 12.0)
|
18 |
+
if answer.is_integer():
|
19 |
+
formatted_answer = str(int(answer))
|
20 |
+
else:
|
21 |
+
# For currency values that might need formatting
|
22 |
+
if abs(answer) >= 1000:
|
23 |
+
formatted_answer = f"${answer:,.2f}"
|
24 |
+
else:
|
25 |
+
formatted_answer = str(answer)
|
26 |
+
return formatted_answer
|
27 |
+
elif isinstance(answer, int):
|
28 |
+
return str(answer)
|
29 |
+
else:
|
30 |
+
# For any other type
|
31 |
+
return str(answer)
|
32 |
+
|
33 |
+
# Now we know answer is a string, so we can safely use string methods
|
34 |
+
# Normalize whitespace
|
35 |
+
answer = answer.strip()
|
36 |
+
|
37 |
+
# Remove common prefixes and formatting that models add
|
38 |
+
prefixes_to_remove = [
|
39 |
+
"The answer is ",
|
40 |
+
"Answer: ",
|
41 |
+
"Final answer: ",
|
42 |
+
"The result is ",
|
43 |
+
"To answer this question: ",
|
44 |
+
"Based on the information provided, ",
|
45 |
+
"According to the information: ",
|
46 |
+
]
|
47 |
+
|
48 |
+
for prefix in prefixes_to_remove:
|
49 |
+
if answer.startswith(prefix):
|
50 |
+
answer = answer[len(prefix):].strip()
|
51 |
+
|
52 |
+
# Remove quotes if they wrap the entire answer
|
53 |
+
if (answer.startswith('"') and answer.endswith('"')) or (answer.startswith("'") and answer.endswith("'")):
|
54 |
+
answer = answer[1:-1].strip()
|
55 |
+
|
56 |
+
return answer
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
# Clean the answer
|
61 |
+
clean_text = clean_answer(llm_response.content)
|
62 |
+
|