wt002 commited on
Commit
0a73fdc
·
verified ·
1 Parent(s): 6561573

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -34
app.py CHANGED
@@ -137,57 +137,55 @@ def should_continue(state: AgentState) -> str:
137
  return "continue"
138
 
139
 
140
-
141
  def reasoning_node(state: AgentState) -> AgentState:
142
  import os
 
143
  from langchain.schema import HumanMessage, AIMessage
144
- from langchain.prompts import PromptTemplate
145
- from langchain_community.llms import HuggingFaceHub
146
- from langchain.chains import LLMChain
147
 
148
- # Ensure HF token is available
149
- token = os.environ.get("HF_TOKEN")
150
- if not token:
151
- raise ValueError("Hugging Face API token not found in environment variables")
152
 
153
- # Defensive: Ensure valid history
154
  if not state["history"] or not isinstance(state["history"][-1], HumanMessage):
155
  state["history"].append(HumanMessage(content="Continue."))
156
 
157
- # Create the LLM (Zephyr-7B)
158
- llm = HuggingFaceHub(
159
- repo_id="HuggingFaceH4/zephyr-7b-beta",
160
- huggingfacehub_api_token=token,
161
- model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
162
- )
163
-
164
- # Flattened prompt (not chat-based)
165
- flat_prompt = PromptTemplate.from_template(
166
- "You're an expert problem solver. Analyze the question, select the best tool, "
167
- "and provide reasoning.\n\n"
168
- "Context:\n{context}\n\n"
169
- "Reasoning Steps:\n{reasoning}\n\n"
170
- "Question:\n{question}\n\n"
171
- "Response Format:\nReasoning: [Your analysis]\nAction: [Tool name OR 'Final Answer']\n"
172
- "Action Input: [Input for tool OR final response]"
173
  )
174
 
175
- # Build the chain
176
- chain = LLMChain(prompt=flat_prompt, llm=llm)
177
-
178
- # Run the chain
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  response = chain.invoke({
180
  "context": state["context"],
181
  "reasoning": state["reasoning"],
182
  "question": state["question"]
183
  })
184
 
185
- content = response["text"]
186
-
187
- # Parse response
188
  reasoning, action, action_input = parse_agent_response(content)
189
 
190
- # Update state
191
  state['history'].append(AIMessage(content=content))
192
  state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
193
  state['iterations'] += 1
@@ -202,7 +200,6 @@ def reasoning_node(state: AgentState) -> AgentState:
202
 
203
  return state
204
 
205
-
206
 
207
 
208
  def tool_node(state: AgentState) -> AgentState:
 
137
  return "continue"
138
 
139
 
 
140
  def reasoning_node(state: AgentState) -> AgentState:
141
  import os
142
+ from langchain_google_genai import ChatGoogleGenerativeAI
143
  from langchain.schema import HumanMessage, AIMessage
144
+ from langchain.prompts import ChatPromptTemplate
 
 
145
 
146
+ # Explicitly load the Google API key
147
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
148
+ if not GOOGLE_API_KEY:
149
+ raise ValueError("GOOGLE_API_KEY not set in environment variables.")
150
 
151
+ # Defensive: Ensure valid message history
152
  if not state["history"] or not isinstance(state["history"][-1], HumanMessage):
153
  state["history"].append(HumanMessage(content="Continue."))
154
 
155
+ # Initialize Gemini model
156
+ llm = ChatGoogleGenerativeAI(
157
+ model="gemini-1.5-flash",
158
+ temperature=0.1,
159
+ google_api_key=GOOGLE_API_KEY
 
 
 
 
 
 
 
 
 
 
 
160
  )
161
 
162
+ # Build prompt
163
+ prompt = ChatPromptTemplate.from_messages([
164
+ ("system", (
165
+ "You're an expert problem solver. Analyze the question, select the best tool, "
166
+ "and provide reasoning. Available tools: duckduckgo_search, wikipedia_search, "
167
+ "arxiv_search, document_qa, python_execution.\n\n"
168
+ "Current Context:\n{context}\n\n"
169
+ "Reasoning Steps:\n{reasoning}\n\n"
170
+ "Response Format:\nReasoning: [Your analysis]\nAction: [Tool name OR 'Final Answer']\n"
171
+ "Action Input: [Input for tool OR final response]"
172
+ )),
173
+ *state['history']
174
+ ])
175
+
176
+ chain = prompt | llm
177
+
178
+ # Invoke chain with inputs
179
  response = chain.invoke({
180
  "context": state["context"],
181
  "reasoning": state["reasoning"],
182
  "question": state["question"]
183
  })
184
 
185
+ content = response.content
 
 
186
  reasoning, action, action_input = parse_agent_response(content)
187
 
188
+ # Update agent state
189
  state['history'].append(AIMessage(content=content))
190
  state['reasoning'] += f"\nStep {state['iterations']+1}: {reasoning}"
191
  state['iterations'] += 1
 
200
 
201
  return state
202
 
 
203
 
204
 
205
  def tool_node(state: AgentState) -> AgentState: