wt002 commited on
Commit
40de8fd
·
verified ·
1 Parent(s): 415844f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -35
app.py CHANGED
@@ -28,6 +28,9 @@ from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
28
  from langchain_core.tools import tool
29
  from typing import Dict, List, TypedDict, Annotated
30
  import operator
 
 
 
31
 
32
  # ====== Tool Definitions ======
33
  @tool
@@ -125,43 +128,44 @@ def should_continue(state: AgentState) -> str:
125
 
126
  def reasoning_node(state: AgentState) -> AgentState:
127
  """Agent reasoning and tool selection"""
128
- from langchain_community.chat_models import ChatHuggingFace
129
- from langchain.schema import SystemMessage
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
 
131
  # Build prompt
132
  prompt = ChatPromptTemplate.from_messages([
133
- SystemMessage(content=(
134
- "You are an intelligent AI assistant. Follow this process:\n"
135
- "1. Analyze the question: {question}\n"
136
- "2. Review context: {context}\n"
137
- "3. Reasoning Steps:\n{reasoning}\n"
138
- "4. Select ONE tool to use next OR provide FINAL ANSWER\n\n"
139
- "Available Tools:\n"
140
- "- duckduckgo_search: For current information\n"
141
- "- wikipedia_search: For factual knowledge\n"
142
- "- arxiv_search: For academic topics\n"
143
- "- document_qa: For questions about documents\n"
144
- "- python_execution: For calculations/code\n\n"
145
- "Response Format:\n"
146
- "Reasoning: [Your step-by-step analysis]\n"
147
- "Action: [Tool name OR 'Final Answer']\n"
148
- "Action Input: [Tool parameters OR final response]"
149
  )),
150
  *state['history']
151
  ])
152
 
153
- # Initialize model
154
- llm = ChatHuggingFace(
155
- model_name="HuggingFaceH4/zephyr-7b-beta",
156
- temperature=0.3
157
- )
158
-
159
- # Generate response
160
- response = llm.invoke(prompt.format_messages(
161
- question=state['question'],
162
- context=state['context'],
163
- reasoning=state['reasoning']
164
- ))
165
 
166
  # Parse response
167
  content = response.content
@@ -172,17 +176,16 @@ def reasoning_node(state: AgentState) -> AgentState:
172
  state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
173
 
174
  if "final answer" in action.lower():
175
- state['history'].append(AIMessage(
176
- content=f"FINAL ANSWER: {action_input}"
177
- ))
178
  else:
179
  state['history'].append({
180
- "role": "action_request",
181
  "tool": action,
182
- "input": action_input
 
183
  })
184
 
185
  return state
 
186
 
187
  def tool_node(state: AgentState) -> AgentState:
188
  """Execute selected tool and update state"""
 
28
  from langchain_core.tools import tool
29
  from typing import Dict, List, TypedDict, Annotated
30
  import operator
31
+ from langchain_community.llms import HuggingFaceHub
32
+ from langchain_community.chat_models import ChatHuggingFace
33
+
34
 
35
  # ====== Tool Definitions ======
36
  @tool
 
128
 
129
  def reasoning_node(state: AgentState) -> AgentState:
130
  """Agent reasoning and tool selection"""
131
+ # Get Hugging Face API token from environment
132
+ token = os.environ.get("HF_API_TOKEN")
133
+ if not token:
134
+ raise ValueError("Hugging Face API token not found in environment variables")
135
+
136
+ # Create the underlying LLM model
137
+ llm = HuggingFaceHub(
138
+ repo_id="HuggingFaceH4/zephyr-7b-beta",
139
+ huggingfacehub_api_token=token,
140
+ model_kwargs={
141
+ "temperature": 0.1,
142
+ "max_new_tokens": 500
143
+ }
144
+ )
145
+
146
+ # Wrap the LLM in ChatHuggingFace
147
+ chat_model = ChatHuggingFace(llm=llm)
148
 
149
  # Build prompt
150
  prompt = ChatPromptTemplate.from_messages([
151
+ ("system", (
152
+ "You're an expert problem solver. Analyze the question, select the best tool, "
153
+ "and provide reasoning. Available tools: duckduckgo_search, wikipedia_search, "
154
+ "arxiv_search, document_qa, python_execution.\n\n"
155
+ "Current Context:\n{context}\n\n"
156
+ "Reasoning Steps:\n{reasoning}\n\n"
157
+ "Response Format:\nReasoning: [Your analysis]\nAction: [Tool name OR 'Final Answer']\n"
158
+ "Action Input: [Input for tool OR final response]"
 
 
 
 
 
 
 
 
159
  )),
160
  *state['history']
161
  ])
162
 
163
+ chain = prompt | chat_model
164
+ response = chain.invoke({
165
+ "context": state['context'],
166
+ "reasoning": state['reasoning'],
167
+ "question": state['question']
168
+ })
 
 
 
 
 
 
169
 
170
  # Parse response
171
  content = response.content
 
176
  state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
177
 
178
  if "final answer" in action.lower():
179
+ state['history'].append(AIMessage(content=f"FINAL ANSWER: {action_input}"))
 
 
180
  else:
181
  state['history'].append({
 
182
  "tool": action,
183
+ "input": action_input,
184
+ "role": "action_request"
185
  })
186
 
187
  return state
188
+
189
 
190
  def tool_node(state: AgentState) -> AgentState:
191
  """Execute selected tool and update state"""