Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -141,11 +141,11 @@ def should_continue(state: AgentState) -> str:
|
|
141 |
def reasoning_node(state: AgentState) -> AgentState:
|
142 |
import os
|
143 |
from langchain.schema import HumanMessage, AIMessage
|
144 |
-
from
|
145 |
-
from langchain.prompts import ChatPromptTemplate
|
146 |
from langchain_community.llms import HuggingFaceHub
|
|
|
147 |
|
148 |
-
# Ensure token is available
|
149 |
token = os.environ.get("HF_TOKEN")
|
150 |
if not token:
|
151 |
raise ValueError("Hugging Face API token not found in environment variables")
|
@@ -154,39 +154,40 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
154 |
if not state["history"] or not isinstance(state["history"][-1], HumanMessage):
|
155 |
state["history"].append(HumanMessage(content="Continue."))
|
156 |
|
157 |
-
# Create the LLM
|
158 |
llm = HuggingFaceHub(
|
159 |
repo_id="HuggingFaceH4/zephyr-7b-beta",
|
160 |
-
huggingfacehub_api_token=
|
161 |
model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
|
162 |
)
|
163 |
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
chain = prompt | chat_model
|
181 |
response = chain.invoke({
|
182 |
-
"context": state[
|
183 |
-
"reasoning": state[
|
184 |
-
"question": state[
|
185 |
})
|
186 |
|
187 |
-
content = response
|
|
|
|
|
188 |
reasoning, action, action_input = parse_agent_response(content)
|
189 |
|
|
|
190 |
state['history'].append(AIMessage(content=content))
|
191 |
state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
|
192 |
state['iterations'] += 1
|
@@ -194,7 +195,6 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
194 |
if "final answer" in action.lower():
|
195 |
state['history'].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
|
196 |
else:
|
197 |
-
# Save action tool call in context instead of history
|
198 |
state['context']['current_tool'] = {
|
199 |
"tool": action,
|
200 |
"input": action_input
|
@@ -202,6 +202,7 @@ def reasoning_node(state: AgentState) -> AgentState:
|
|
202 |
|
203 |
return state
|
204 |
|
|
|
205 |
|
206 |
|
207 |
def tool_node(state: AgentState) -> AgentState:
|
|
|
141 |
def reasoning_node(state: AgentState) -> AgentState:
|
142 |
import os
|
143 |
from langchain.schema import HumanMessage, AIMessage
|
144 |
+
from langchain.prompts import PromptTemplate
|
|
|
145 |
from langchain_community.llms import HuggingFaceHub
|
146 |
+
from langchain.chains import LLMChain
|
147 |
|
148 |
+
# Ensure HF token is available
|
149 |
token = os.environ.get("HF_TOKEN")
|
150 |
if not token:
|
151 |
raise ValueError("Hugging Face API token not found in environment variables")
|
|
|
154 |
if not state["history"] or not isinstance(state["history"][-1], HumanMessage):
|
155 |
state["history"].append(HumanMessage(content="Continue."))
|
156 |
|
157 |
+
# Create the LLM (Zephyr-7B)
|
158 |
llm = HuggingFaceHub(
|
159 |
repo_id="HuggingFaceH4/zephyr-7b-beta",
|
160 |
+
huggingfacehub_api_token=token,
|
161 |
model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
|
162 |
)
|
163 |
|
164 |
+
# Flattened prompt (not chat-based)
|
165 |
+
flat_prompt = PromptTemplate.from_template(
|
166 |
+
"You're an expert problem solver. Analyze the question, select the best tool, "
|
167 |
+
"and provide reasoning.\n\n"
|
168 |
+
"Context:\n{context}\n\n"
|
169 |
+
"Reasoning Steps:\n{reasoning}\n\n"
|
170 |
+
"Question:\n{question}\n\n"
|
171 |
+
"Response Format:\nReasoning: [Your analysis]\nAction: [Tool name OR 'Final Answer']\n"
|
172 |
+
"Action Input: [Input for tool OR final response]"
|
173 |
+
)
|
174 |
+
|
175 |
+
# Build the chain
|
176 |
+
chain = LLMChain(prompt=flat_prompt, llm=llm)
|
177 |
+
|
178 |
+
# Run the chain
|
|
|
|
|
179 |
response = chain.invoke({
|
180 |
+
"context": state["context"],
|
181 |
+
"reasoning": state["reasoning"],
|
182 |
+
"question": state["question"]
|
183 |
})
|
184 |
|
185 |
+
content = response["text"]
|
186 |
+
|
187 |
+
# Parse response
|
188 |
reasoning, action, action_input = parse_agent_response(content)
|
189 |
|
190 |
+
# Update state
|
191 |
state['history'].append(AIMessage(content=content))
|
192 |
state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
|
193 |
state['iterations'] += 1
|
|
|
195 |
if "final answer" in action.lower():
|
196 |
state['history'].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
|
197 |
else:
|
|
|
198 |
state['context']['current_tool'] = {
|
199 |
"tool": action,
|
200 |
"input": action_input
|
|
|
202 |
|
203 |
return state
|
204 |
|
205 |
+
|
206 |
|
207 |
|
208 |
def tool_node(state: AgentState) -> AgentState:
|