cheremnm commited on
Commit
64d1ce9
·
verified ·
1 Parent(s): e2d6379

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -5,6 +5,10 @@ import chromadb # High-performance vector database for storing/querying dense v
5
  from dotenv import load_dotenv # Loading environment variables from a .env file
6
  import json # Parsing and handling JSON data
7
 
 
 
 
 
8
  # LangChain imports
9
  from langchain_openai import ChatOpenAI
10
  from langchain_core.documents import Document # Document data structures
@@ -137,7 +141,7 @@ Examples:
137
 
138
  chain = expand_prompt | llm | StrOutputParser()
139
  expanded_query = chain.invoke({"query": state['query'], "query_feedback":state["query_feedback"]})
140
- print("expanded_query", expanded_query)
141
  state["expanded_query"] = expanded_query
142
  return state
143
 
@@ -185,7 +189,7 @@ def retrieve_context(state):
185
 
186
  ]
187
  state['context'] = context # Complete the code to define the key for storing the context
188
- print("Extracted context with metadata:", context) # Debugging: Print the extracted context
189
  #print(f"Groundedness loop count: {state['groundedness_loop_count']}")
190
  return state
191
 
@@ -221,7 +225,7 @@ If the context does not contain enough information to answer accurately, clearly
221
  "feedback": state.get('feedback', 'No feedback provided') # add feedback to the prompt
222
  })
223
  state['response'] = response
224
- print("intermediate response: ", response)
225
 
226
  return state
227
 
@@ -337,7 +341,7 @@ Focus on biblical coherence, faith-based reasoning, and alignment with the theme
337
 
338
  # Store response suggestions in a structured format
339
  feedback = f"Previous Response: {state['response']}\nSuggestions: {chain.invoke({'query': state['query'], 'response': state['response']})}"
340
- print("feedback: ", feedback)
341
  print(f"State: {state}")
342
  state['feedback'] = feedback
343
  return state
@@ -370,7 +374,7 @@ Focus on biblical coherence, faith-based reasoning, and alignment with the theme
370
 
371
  # Store refinement suggestions without modifying the original expanded query
372
  query_feedback = f"Previous Expanded Query: {state['expanded_query']}\nSuggestions: {chain.invoke({'query': state['query'], 'expanded_query': state['expanded_query']})}"
373
- print("query_feedback: ", query_feedback)
374
  print(f"Groundedness loop count: {state['groundedness_loop_count']}")
375
  state['query_feedback'] = query_feedback
376
  return state
 
5
  from dotenv import load_dotenv # Loading environment variables from a .env file
6
  import json # Parsing and handling JSON data
7
 
8
+ # Supressing logging to langchain. Remove or comment this block to see logs when debugging
9
+ import logging
10
+ logging.getLogger("langchain").setLevel(logging.ERROR)
11
+
12
  # LangChain imports
13
  from langchain_openai import ChatOpenAI
14
  from langchain_core.documents import Document # Document data structures
 
141
 
142
  chain = expand_prompt | llm | StrOutputParser()
143
  expanded_query = chain.invoke({"query": state['query'], "query_feedback":state["query_feedback"]})
144
+ # print("expanded_query", expanded_query) #uncomment this line to see expanded query
145
  state["expanded_query"] = expanded_query
146
  return state
147
 
 
189
 
190
  ]
191
  state['context'] = context # Complete the code to define the key for storing the context
192
+ #print("Extracted context with metadata:", context) # Debugging: Print the extracted context
193
  #print(f"Groundedness loop count: {state['groundedness_loop_count']}")
194
  return state
195
 
 
225
  "feedback": state.get('feedback', 'No feedback provided') # add feedback to the prompt
226
  })
227
  state['response'] = response
228
+ #print("intermediate response: ", response) #uncomment this line to see intermediate response
229
 
230
  return state
231
 
 
341
 
342
  # Store response suggestions in a structured format
343
  feedback = f"Previous Response: {state['response']}\nSuggestions: {chain.invoke({'query': state['query'], 'response': state['response']})}"
344
+ #print("feedback: ", feedback) #uncomment this line to see feedback
345
  print(f"State: {state}")
346
  state['feedback'] = feedback
347
  return state
 
374
 
375
  # Store refinement suggestions without modifying the original expanded query
376
  query_feedback = f"Previous Expanded Query: {state['expanded_query']}\nSuggestions: {chain.invoke({'query': state['query'], 'expanded_query': state['expanded_query']})}"
377
+ #print("query_feedback: ", query_feedback)
378
  print(f"Groundedness loop count: {state['groundedness_loop_count']}")
379
  state['query_feedback'] = query_feedback
380
  return state