Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -136,32 +136,32 @@ def should_continue(state: AgentState) -> str:
|
|
136 |
return "reason"
|
137 |
return "continue"
|
138 |
|
|
|
|
|
139 |
def reasoning_node(state: AgentState) -> AgentState:
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
142 |
token = os.environ.get("HF_TOKEN")
|
143 |
if not token:
|
144 |
raise ValueError("Hugging Face API token not found in environment variables")
|
145 |
-
|
146 |
-
history = state["history"]
|
147 |
|
148 |
-
#
|
149 |
-
if not isinstance(history[-1], HumanMessage):
|
150 |
-
|
151 |
-
|
152 |
-
# Create the
|
153 |
llm = HuggingFaceHub(
|
154 |
repo_id="HuggingFaceH4/zephyr-7b-beta",
|
155 |
huggingfacehub_api_token=token,
|
156 |
-
model_kwargs={
|
157 |
-
"temperature": 0.1,
|
158 |
-
"max_new_tokens": 500
|
159 |
-
}
|
160 |
)
|
161 |
-
|
162 |
-
# Wrap the LLM in ChatHuggingFace
|
163 |
chat_model = ChatHuggingFace(llm=llm)
|
164 |
-
|
165 |
# Build prompt
|
166 |
prompt = ChatPromptTemplate.from_messages([
|
167 |
("system", (
|
@@ -175,32 +175,32 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
175 |
)),
|
176 |
*state['history']
|
177 |
])
|
178 |
-
|
179 |
chain = prompt | chat_model
|
180 |
response = chain.invoke({
|
181 |
"context": state['context'],
|
182 |
"reasoning": state['reasoning'],
|
183 |
"question": state['question']
|
184 |
})
|
185 |
-
|
186 |
-
# Parse response
|
187 |
content = response.content
|
188 |
reasoning, action, action_input = parse_agent_response(content)
|
189 |
-
|
190 |
-
# Update state
|
191 |
state['history'].append(AIMessage(content=content))
|
192 |
state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
|
193 |
-
|
|
|
194 |
if "final answer" in action.lower():
|
195 |
state['history'].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
|
196 |
else:
|
197 |
-
|
|
|
198 |
"tool": action,
|
199 |
-
"input": action_input
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
return state
|
|
|
204 |
|
205 |
|
206 |
def tool_node(state: AgentState) -> AgentState:
|
|
|
136 |
return "reason"
|
137 |
return "continue"
|
138 |
|
139 |
+
|
140 |
+
|
141 |
def reasoning_node(state: AgentState) -> AgentState:
|
142 |
+
import os
|
143 |
+
from langchain.schema import HumanMessage, AIMessage
|
144 |
+
from langchain_community.chat_models import ChatHuggingFace
|
145 |
+
from langchain.prompts import ChatPromptTemplate
|
146 |
+
from langchain_community.llms import HuggingFaceHub
|
147 |
+
|
148 |
+
# Ensure token is available
|
149 |
token = os.environ.get("HF_TOKEN")
|
150 |
if not token:
|
151 |
raise ValueError("Hugging Face API token not found in environment variables")
|
|
|
|
|
152 |
|
153 |
+
# Defensive: Ensure valid history
|
154 |
+
if not state["history"] or not isinstance(state["history"][-1], HumanMessage):
|
155 |
+
state["history"].append(HumanMessage(content="Continue."))
|
156 |
+
|
157 |
+
# Create the LLM
|
158 |
llm = HuggingFaceHub(
|
159 |
repo_id="HuggingFaceH4/zephyr-7b-beta",
|
160 |
huggingfacehub_api_token=token,
|
161 |
+
model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
|
|
|
|
|
|
|
162 |
)
|
|
|
|
|
163 |
chat_model = ChatHuggingFace(llm=llm)
|
164 |
+
|
165 |
# Build prompt
|
166 |
prompt = ChatPromptTemplate.from_messages([
|
167 |
("system", (
|
|
|
175 |
)),
|
176 |
*state['history']
|
177 |
])
|
178 |
+
|
179 |
chain = prompt | chat_model
|
180 |
response = chain.invoke({
|
181 |
"context": state['context'],
|
182 |
"reasoning": state['reasoning'],
|
183 |
"question": state['question']
|
184 |
})
|
185 |
+
|
|
|
186 |
content = response.content
|
187 |
reasoning, action, action_input = parse_agent_response(content)
|
188 |
+
|
|
|
189 |
state['history'].append(AIMessage(content=content))
|
190 |
state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
|
191 |
+
state['iterations'] += 1
|
192 |
+
|
193 |
if "final answer" in action.lower():
|
194 |
state['history'].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
|
195 |
else:
|
196 |
+
# Save action tool call in context instead of history
|
197 |
+
state['context']['current_tool'] = {
|
198 |
"tool": action,
|
199 |
+
"input": action_input
|
200 |
+
}
|
201 |
+
|
|
|
202 |
return state
|
203 |
+
|
204 |
|
205 |
|
206 |
def tool_node(state: AgentState) -> AgentState:
|