wt002 commited on
Commit
6561573
·
verified ·
1 Parent(s): 3778af2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -27
app.py CHANGED
@@ -141,11 +141,11 @@ def should_continue(state: AgentState) -> str:
141
  def reasoning_node(state: AgentState) -> AgentState:
142
  import os
143
  from langchain.schema import HumanMessage, AIMessage
144
- from langchain_community.chat_models import ChatHuggingFace
145
- from langchain.prompts import ChatPromptTemplate
146
  from langchain_community.llms import HuggingFaceHub
 
147
 
148
- # Ensure token is available
149
  token = os.environ.get("HF_TOKEN")
150
  if not token:
151
  raise ValueError("Hugging Face API token not found in environment variables")
@@ -154,39 +154,40 @@ def reasoning_node(state: AgentState) -> AgentState:
154
  if not state["history"] or not isinstance(state["history"][-1], HumanMessage):
155
  state["history"].append(HumanMessage(content="Continue."))
156
 
157
- # Create the LLM
158
  llm = HuggingFaceHub(
159
  repo_id="HuggingFaceH4/zephyr-7b-beta",
160
- huggingfacehub_api_token=os.environ["HF_TOKEN"],
161
  model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
162
  )
163
 
164
- chat_model = ChatHuggingFace(llm=llm)
165
-
166
- # Build prompt
167
- prompt = ChatPromptTemplate.from_messages([
168
- ("system", (
169
- "You're an expert problem solver. Analyze the question, select the best tool, "
170
- "and provide reasoning. Available tools: duckduckgo_search, wikipedia_search, "
171
- "arxiv_search, document_qa, python_execution.\n\n"
172
- "Current Context:\n{context}\n\n"
173
- "Reasoning Steps:\n{reasoning}\n\n"
174
- "Response Format:\nReasoning: [Your analysis]\nAction: [Tool name OR 'Final Answer']\n"
175
- "Action Input: [Input for tool OR final response]"
176
- )),
177
- *state['history']
178
- ])
179
-
180
- chain = prompt | chat_model
181
  response = chain.invoke({
182
- "context": state['context'],
183
- "reasoning": state['reasoning'],
184
- "question": state['question']
185
  })
186
 
187
- content = response.content
 
 
188
  reasoning, action, action_input = parse_agent_response(content)
189
 
 
190
  state['history'].append(AIMessage(content=content))
191
  state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
192
  state['iterations'] += 1
@@ -194,7 +195,6 @@ def reasoning_node(state: AgentState) -> AgentState:
194
  if "final answer" in action.lower():
195
  state['history'].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
196
  else:
197
- # Save action tool call in context instead of history
198
  state['context']['current_tool'] = {
199
  "tool": action,
200
  "input": action_input
@@ -202,6 +202,7 @@ def reasoning_node(state: AgentState) -> AgentState:
202
 
203
  return state
204
 
 
205
 
206
 
207
  def tool_node(state: AgentState) -> AgentState:
 
141
  def reasoning_node(state: AgentState) -> AgentState:
142
  import os
143
  from langchain.schema import HumanMessage, AIMessage
144
+ from langchain.prompts import PromptTemplate
 
145
  from langchain_community.llms import HuggingFaceHub
146
+ from langchain.chains import LLMChain
147
 
148
+ # Ensure HF token is available
149
  token = os.environ.get("HF_TOKEN")
150
  if not token:
151
  raise ValueError("Hugging Face API token not found in environment variables")
 
154
  if not state["history"] or not isinstance(state["history"][-1], HumanMessage):
155
  state["history"].append(HumanMessage(content="Continue."))
156
 
157
+ # Create the LLM (Zephyr-7B)
158
  llm = HuggingFaceHub(
159
  repo_id="HuggingFaceH4/zephyr-7b-beta",
160
+ huggingfacehub_api_token=token,
161
  model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
162
  )
163
 
164
+ # Flattened prompt (not chat-based)
165
+ flat_prompt = PromptTemplate.from_template(
166
+ "You're an expert problem solver. Analyze the question, select the best tool, "
167
+ "and provide reasoning.\n\n"
168
+ "Context:\n{context}\n\n"
169
+ "Reasoning Steps:\n{reasoning}\n\n"
170
+ "Question:\n{question}\n\n"
171
+ "Response Format:\nReasoning: [Your analysis]\nAction: [Tool name OR 'Final Answer']\n"
172
+ "Action Input: [Input for tool OR final response]"
173
+ )
174
+
175
+ # Build the chain
176
+ chain = LLMChain(prompt=flat_prompt, llm=llm)
177
+
178
+ # Run the chain
 
 
179
  response = chain.invoke({
180
+ "context": state["context"],
181
+ "reasoning": state["reasoning"],
182
+ "question": state["question"]
183
  })
184
 
185
+ content = response["text"]
186
+
187
+ # Parse response
188
  reasoning, action, action_input = parse_agent_response(content)
189
 
190
+ # Update state
191
  state['history'].append(AIMessage(content=content))
192
  state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
193
  state['iterations'] += 1
 
195
  if "final answer" in action.lower():
196
  state['history'].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
197
  else:
 
198
  state['context']['current_tool'] = {
199
  "tool": action,
200
  "input": action_input
 
202
 
203
  return state
204
 
205
+
206
 
207
 
208
  def tool_node(state: AgentState) -> AgentState: