cheremnm commited on
Commit
0b40db8
Β·
verified Β·
1 Parent(s): 831f068

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +764 -0
app.py ADDED
@@ -0,0 +1,764 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import necessary libraries
2
+ import os # Interacting with the operating system (reading/writing files)
3
+ import chromadb # High-performance vector database for storing/querying dense vectors
4
+ from dotenv import load_dotenv # Loading environment variables from a .env file
5
+ import json # Parsing and handling JSON data
6
+
7
+ # LangChain imports
8
+ from langchain_openai import ChatOpenAI
9
+ from langchain_core.documents import Document # Document data structures
10
+ from langchain_core.runnables import RunnablePassthrough # LangChain core library for running pipelines
11
+ from langchain_core.output_parsers import StrOutputParser # String output parser
12
+ from langchain.prompts import ChatPromptTemplate # Template for chat prompts
13
+ from langchain.chains.query_constructor.base import AttributeInfo # Base classes for query construction
14
+ from langchain.retrievers.self_query.base import SelfQueryRetriever # Base classes for self-querying retrievers
15
+ from langchain.retrievers.document_compressors import LLMChainExtractor, CrossEncoderReranker # Document compressors
16
+ from langchain.retrievers import ContextualCompressionRetriever # Contextual compression retrievers
17
+
18
+ # LangChain community & experimental imports
19
+ from langchain_community.vectorstores import Chroma # Implementations of vector stores like Chroma
20
+ from langchain_community.document_loaders import PyPDFDirectoryLoader, PyPDFLoader # Document loaders for PDFs
21
+ from langchain_community.cross_encoders import HuggingFaceCrossEncoder # Cross-encoders from HuggingFace
22
+ from langchain_experimental.text_splitter import SemanticChunker # Experimental text splitting methods
23
+ from langchain.text_splitter import (
24
+ CharacterTextSplitter, # Splitting text by characters
25
+ RecursiveCharacterTextSplitter # Recursive splitting of text by characters
26
+ )
27
+ from langchain_core.tools import tool
28
+ from langchain.agents import create_tool_calling_agent, AgentExecutor
29
+ from langchain_core.prompts import ChatPromptTemplate
30
+
31
+ # LangChain OpenAI imports
32
+ from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI # OpenAI embeddings and models
33
+ from langchain.embeddings.openai import OpenAIEmbeddings # OpenAI embeddings for text vectors
34
+
35
+ # LlamaParse & LlamaIndex imports
36
+ from llama_parse import LlamaParse # Document parsing library
37
+ from llama_index.core import Settings, SimpleDirectoryReader # Core functionalities of the LlamaIndex
38
+
39
+ # LangGraph import
40
+ from langgraph.graph import StateGraph, END, START # State graph for managing states in LangChain
41
+
42
+ # Pydantic import
43
+ from pydantic import BaseModel # Pydantic for data validation
44
+
45
+ # Typing imports
46
+ from typing import Dict, List, Tuple, Any, TypedDict # Python typing for function annotations
47
+
48
+ # Other utilities
49
+ import numpy as np # Numpy for numerical operations
50
+ from groq import Groq
51
+ from mem0 import MemoryClient
52
+ import streamlit as st
53
+ from datetime import datetime
54
+
55
+ #====================================SETUP=====================================#
56
+ # Fetch secrets from Hugging Face Spaces
57
+ api_key = os.environ.get("API_KEY") or config.get("API_KEY")
58
+ endpoint = os.environ.get("OPENAI_API_BASE") or config.get("OPENAI_API_BASE")
59
+ llama_api_key = os.environ.get("GROQ_API_KEY") or config2.get("LLAMA_KEY")
60
+ MEM0_api_key = os.environ.get("MEM0_API_KEY")
61
+
62
+
63
+ # Initialize the OpenAI Embeddings
64
+ embedding_model = OpenAIEmbeddings(
65
+ openai_api_base=endpoint,
66
+ openai_api_key=api_key,
67
+ model='text-embedding-ada-002'
68
+ )
69
+
70
+
71
+ # Initialize the Chat OpenAI model
72
+ llm = ChatOpenAI(
73
+ openai_api_base=endpoint,
74
+ openai_api_key=api_key,
75
+ model="gpt-4o-mini",
76
+ streaming=False
77
+ )
78
+ # This initializes the Chat OpenAI model with the provided endpoint, API key, deployment name, and a temperature setting of 0 (to control response variability).
79
+
80
+ # set the LLM and embedding model in the LlamaIndex settings.
81
+ Settings.llm = llm
82
+ Settings.embedding = embedding_model
83
+
84
+ #================================Creating Langgraph agent======================#
85
+
86
+ class AgentState(TypedDict):
87
+ query: str # The current user query
88
+ expanded_query: str # The expanded version of the user query
89
+ context: List[Dict[str, Any]] # Retrieved documents (content and metadata)
90
+ response: str # The generated response to the user query
91
+ precision_score: float # The precision score of the response
92
+ groundedness_score: float # The groundedness score of the response
93
+ groundedness_loop_count: int # Counter for groundedness refinement loops
94
+ precision_loop_count: int # Counter for precision refinement loops
95
+ feedback: str
96
+ query_feedback: str
97
+ groundedness_check: bool
98
+ loop_max_iter: int
99
+
100
+ def expand_query(state: Dict[str, Any]) -> Dict[str, Any]:
101
+ """
102
+ Expands the user query to improve retrieval of bible and spiritual information.
103
+
104
+ Args:
105
+ state: Workflow state containing at least 'query' and 'query_feedback'.
106
+
107
+ Returns:
108
+ Workflow state with an additional 'expanded_query' key.
109
+ """
110
+ s: AgentState = state
111
+
112
+ print("---------Expanding Query---------")
113
+ system_message = '''You are an assistant that reformulates vague or short user questions into detailed, domain-specific queries related to bible and spiritual questions.
114
+
115
+ Examples:
116
+ - Input: "David and Goliath?"
117
+ Expanded: "What is the significance of the story of David and Goliath in the context of faith, courage, and divine intervention?"
118
+
119
+ - Input: "What does Jesus say about love?"
120
+ Expanded: "What teachings did Jesus offer about love in the New Testament, and how do passages like John 13:34–35 and Matthew 22:37–39 reflect those teachings?"
121
+
122
+ - Input: "Genesis creation"
123
+ Expanded: "How does the Book of Genesis describe the creation of the world, and what are the main theological interpretations of the seven days of creation?"
124
+
125
+ - Input: "End times?"
126
+ Expanded: "What does the Bible say about the end times, and how do texts like the Book of Revelation, Daniel, and Matthew 24 contribute to Christian eschatology?"
127
+
128
+ - Input: "Women in the Bible"
129
+ Expanded: "What roles do women play in the Bible, and how are figures such as Mary, Ruth, Esther, and Deborah portrayed in biblical narratives?"
130
+ '''
131
+ expand_prompt = ChatPromptTemplate.from_messages([
132
+ ("system", system_message),
133
+ ("user", "Expand this query: {query} using the feedback: {query_feedback}")
134
+
135
+ ])
136
+
137
+ chain = expand_prompt | llm | StrOutputParser()
138
+ expanded_query = chain.invoke({"query": state['query'], "query_feedback":state["query_feedback"]})
139
+ print("expanded_query", expanded_query)
140
+ state["expanded_query"] = expanded_query
141
+ return state
142
+
143
+
144
+ chroma_client = chromadb.PersistentClient(path="./combined")
145
+
146
+ vector_store = Chroma(
147
+ client=chroma_client, # <- pass the client you just made
148
+ collection_name="combined",
149
+ embedding_function=embedding_model,
150
+ )
151
+
152
+ # Create a retriever from the vector store
153
+ retriever = vector_store.as_retriever(
154
+ search_type='similarity',
155
+ search_kwargs={'k': 3}
156
+ )
157
+
158
+ def retrieve_context(state):
159
+ """
160
+ Retrieves context from the vector store using the expanded or original query.
161
+
162
+ Args:
163
+ state (Dict): The current state of the workflow, containing the query and expanded query.
164
+
165
+ Returns:
166
+ Dict: The updated state with the retrieved context.
167
+ """
168
+ print("---------retrieve_context---------")
169
+ query = state['expanded_query'] # Complete the code to define the key for the expanded query
170
+ #print("Query used for retrieval:", query) # Debugging: Print the query
171
+
172
+ # Retrieve documents from the vector store
173
+ docs = retriever.invoke(query)
174
+ print("Retrieved documents:", docs) # Debugging: Print the raw docs object
175
+
176
+ # Extract both page_content and metadata from each document
177
+ context = [
178
+ {
179
+
180
+ "content": doc.metadata.get("original_content", doc.page_content),
181
+ "metadata": doc.metadata
182
+ }
183
+ for doc in docs
184
+
185
+ ]
186
+ state['context'] = context # Complete the code to define the key for storing the context
187
+ print("Extracted context with metadata:", context) # Debugging: Print the extracted context
188
+ #print(f"Groundedness loop count: {state['groundedness_loop_count']}")
189
+ return state
190
+
191
+
192
+
193
+ def craft_response(state):
194
+ """
195
+ Generates a response using the retrieved context, focusing on biblical teachings and spiritual guidance.
196
+
197
+ Args:
198
+ state (Dict): The current state of the workflow, containing the query and retrieved context.
199
+
200
+ Returns:
201
+ Dict: The updated state with the generated response.
202
+ """
203
+ print("---------craft_response---------")
204
+ system_message = '''You are a helpful AI assistant trained to support users in understanding biblical teachings and spiritual guidance, using context retrieved from the Bible and the book *The Purpose Driven Life* by Rick Warren.
205
+ Your responses must strictly adhere to the retrieved context, which is extracted from biblical texts such as the CSB Bible, theological commentaries, or trusted religious sources.
206
+
207
+ Do not speculate, interpret creatively, or introduce knowledge not found in the provided context. Focus only on scriptural passages, interpretations, historical backgrounds, or theological themes directly supported by the retrieved content.
208
+ If the context does not contain enough information to answer accurately, clearly state that. Aim for clarity, scriptural accuracy, and relevance to the user's query.
209
+ '''
210
+
211
+ response_prompt = ChatPromptTemplate.from_messages([
212
+ ("system", system_message),
213
+ ("user", "Query: {query}\nContext: {context}\n\nfeedback: {feedback}")
214
+ ])
215
+
216
+ chain = response_prompt | llm
217
+ response = chain.invoke({
218
+ "query": state['query'],
219
+ "context": "\n".join([doc["content"] for doc in state['context']]),
220
+ "feedback": state.get('feedback', 'No feedback provided') # add feedback to the prompt
221
+ })
222
+ state['response'] = response
223
+ print("intermediate response: ", response)
224
+
225
+ return state
226
+
227
+
228
+
229
+ def score_groundedness(state):
230
+ """
231
+ Checks whether the response is grounded in the retrieved context.
232
+
233
+ Args:
234
+ state (Dict): The current state of the workflow, containing the response and context.
235
+
236
+ Returns:
237
+ Dict: The updated state with the groundedness score.
238
+ """
239
+ print("---------check_groundedness---------")
240
+
241
+ system_message = '''You are evaluating whether an AI-generated response is grounded in the retrieved context
242
+ provided from biblical texts (such as the CSB Bible) and the book *The Purpose Driven Life* by Rick Warren.
243
+ The context includes scripture, commentary, and theological content.
244
+
245
+ Your task is to assign a groundedness score between 0 and 1, where:
246
+
247
+ - 1.0 means the response is fully supported by the context,
248
+ - 0.0 means the response is entirely unsupported.
249
+
250
+ Be strict: if even a part of the response is not traceable to the context, reduce the score. Provide only
251
+ the numeric score.'''
252
+
253
+ groundedness_prompt = ChatPromptTemplate.from_messages([
254
+ ("system", system_message),
255
+ ("user", "Context: {context}\nResponse: {response}\n\nGroundedness score:")
256
+ ])
257
+
258
+ chain = groundedness_prompt | llm | StrOutputParser()
259
+ groundedness_score = float(chain.invoke({
260
+ "context": "\n".join([doc["content"] for doc in state['context']]),
261
+ "response":state['response'] # Complete the code to define the response
262
+ }))
263
+ print("groundedness_score: ", groundedness_score)
264
+ state['groundedness_loop_count'] += 1
265
+ print("#########Groundedness Incremented###########")
266
+ state['groundedness_score'] = groundedness_score
267
+
268
+ return state
269
+
270
+
271
+
272
+ def check_precision(state: Dict) -> Dict:
273
+ """
274
+ Checks whether the response precisely addresses the user’s query.
275
+
276
+ Args:
277
+ state (Dict): The current state of the workflow, containing the query and response.
278
+
279
+ Returns:
280
+ Dict: The updated state with the precision score.
281
+ """
282
+ print("---------check_precision---------")
283
+ system_message = '''You are assessing whether an AI-generated response precisely answers the user's query,
284
+ within the domain of biblical interpretation and spiritual guidance drawn from the Bible and *The Purpose Driven Life*.
285
+
286
+ Provide a precision score between 0 and 1:
287
+
288
+ - 1.0: The response fully and directly answers the query with clear relevance.
289
+ - 0.0: The response is vague, unrelated, or fails to address the query.
290
+
291
+ Only return a numeric score.'''
292
+
293
+ precision_prompt = ChatPromptTemplate.from_messages([
294
+ ("system", system_message),
295
+ ("user", "Query: {query}\nResponse: {response}\n\nPrecision score:")
296
+ ])
297
+
298
+ chain = precision_prompt | llm | StrOutputParser() # Complete the code to define the chain of processing
299
+ precision_score = float(chain.invoke({
300
+ "query": state['query'],
301
+ "response":state['response'] # Complete the code to access the response from the state
302
+ }))
303
+ state['precision_score'] = precision_score
304
+ print("precision_score:", precision_score)
305
+ state['precision_loop_count'] +=1
306
+ print("#########Precision Incremented###########")
307
+
308
+ return state
309
+
310
+
311
+
312
+ def refine_response(state: Dict) -> Dict:
313
+ """
314
+ Suggests improvements for the generated response.
315
+
316
+ Args:
317
+ state (Dict): The current state of the workflow, containing the query and response.
318
+
319
+ Returns:
320
+ Dict: The updated state with response refinement suggestions.
321
+ """
322
+ print("---------refine_response---------")
323
+
324
+ system_message = '''You are an expert assistant helping to improve AI-generated answers related to biblical interpretation and spiritual guidance.
325
+ Evaluate the response and suggest constructive improvements to enhance accuracy, specificity, and completeness.
326
+ Do not rewrite the response. Instead, point out what is vague, missing, or could be better explained.
327
+ Focus on biblical coherence, faith-based reasoning, and alignment with the themes and tone of the source texts.'''
328
+
329
+ refine_response_prompt = ChatPromptTemplate.from_messages([
330
+ ("system", system_message),
331
+ ("user", "Query: {query}\nResponse: {response}\n\n"
332
+ "What improvements can be made to enhance accuracy and completeness?")
333
+ ])
334
+
335
+ chain = refine_response_prompt | llm| StrOutputParser()
336
+
337
+ # Store response suggestions in a structured format
338
+ feedback = f"Previous Response: {state['response']}\nSuggestions: {chain.invoke({'query': state['query'], 'response': state['response']})}"
339
+ print("feedback: ", feedback)
340
+ print(f"State: {state}")
341
+ state['feedback'] = feedback
342
+ return state
343
+
344
+
345
+
346
+ def refine_query(state: Dict) -> Dict:
347
+ """
348
+ Suggests improvements for the expanded query.
349
+
350
+ Args:
351
+ state (Dict): The current state of the workflow, containing the query and expanded query.
352
+
353
+ Returns:
354
+ Dict: The updated state with query refinement suggestions.
355
+ """
356
+ print("---------refine_query---------")
357
+ system_message = '''You are an expert assistant helping to improve AI-generated query reformulations related to biblical interpretation and spiritual guidance, based on the Bible and *The Purpose Driven Life*.
358
+ Evaluate the response and suggest constructive improvements to enhance scriptural accuracy, theological clarity, and spiritual relevance.
359
+ Do not rewrite the expanded query itself. Instead, point out what is vague, theologically weak, mis-aligned with the source material, or could be better supported by the context.
360
+ Focus on biblical coherence, faith-based reasoning, and alignment with the themes and tone of the source texts.'''
361
+
362
+ refine_query_prompt = ChatPromptTemplate.from_messages([
363
+ ("system", system_message),
364
+ ("user", "Original Query: {query}\nExpanded Query: {expanded_query}\n\n"
365
+ "What improvements can be made for a better search?")
366
+ ])
367
+
368
+ chain = refine_query_prompt | llm | StrOutputParser()
369
+
370
+ # Store refinement suggestions without modifying the original expanded query
371
+ query_feedback = f"Previous Expanded Query: {state['expanded_query']}\nSuggestions: {chain.invoke({'query': state['query'], 'expanded_query': state['expanded_query']})}"
372
+ print("query_feedback: ", query_feedback)
373
+ print(f"Groundedness loop count: {state['groundedness_loop_count']}")
374
+ state['query_feedback'] = query_feedback
375
+ return state
376
+
377
+
378
+
379
+ def should_continue_groundedness(state):
380
+ """Decides if groundedness is sufficient or needs improvement."""
381
+ print("---------should_continue_groundedness---------")
382
+ print("groundedness loop count: ", state['groundedness_loop_count'])
383
+
384
+ # Threshold logic: groundedness score should be at least 0.8
385
+ if state['groundedness_score'] >= 0.8:
386
+ print("Moving to precision")
387
+ return "check_precision"
388
+ else:
389
+ # Allow a maximum of 2 refinement loops
390
+ if state['groundedness_loop_count'] > state['loop_max_iter']:
391
+ print("Maximum groundedness iterations reached")
392
+ return "max_iterations_reached"
393
+ else:
394
+ print("---------Groundedness Score Threshold Not Met. Refining Response-----------")
395
+ return "refine_response"
396
+
397
+
398
+
399
+ def should_continue_precision(state: Dict) -> str:
400
+ """Decides if precision is sufficient or needs improvement."""
401
+ print("---------should_continue_precision---------")
402
+ print("precision loop count: ", state['precision_loop_count'])
403
+
404
+ # Threshold for acceptable precision score
405
+ if state['precision_score'] >= 0.8:
406
+ return "pass" # Complete the workflow
407
+ else:
408
+ # Check if maximum refinement attempts have been reached
409
+ if state['precision_loop_count'] > state['loop_max_iter']:
410
+ return "max_iterations_reached"
411
+ else:
412
+ print("---------Precision Score Threshold Not met. Refining Query-----------")
413
+ return "refine_query"
414
+
415
+
416
+
417
+
418
+ def max_iterations_reached(state: Dict) -> Dict:
419
+
420
+ """Handles the case where max iterations are reached."""
421
+ print("---------max_iterations_reached---------")
422
+ state['response'] = "We need more context to provide an accurate answer."
423
+
424
+ return state
425
+
426
+
427
+
428
+ from langgraph.graph import END, StateGraph, START
429
+
430
+ def create_workflow() -> StateGraph:
431
+ """Creates the updated workflow for the AI spiritual agent."""
432
+ workflow = StateGraph(dict) # Workflow state is a dictionary
433
+
434
+ # Add processing nodes
435
+ workflow.add_node("expand_query", expand_query) # Step 1: Expand user query. Complete with the function to expand the query
436
+ workflow.add_node("retrieve_context", retrieve_context) # Step 2: Retrieve relevant documents. Complete with the function to retrieve context
437
+ workflow.add_node("craft_response", craft_response) # Step 3: Generate a response based on retrieved data. Complete with the function to craft a response
438
+ workflow.add_node("score_groundedness", score_groundedness) # Step 4: Evaluate response grounding. Complete with the function to score groundedness
439
+ workflow.add_node("refine_response", refine_response) # Step 5: Improve response if it's weakly grounded. Complete with the function to refine the response
440
+ workflow.add_node("check_precision", check_precision) # Step 6: Evaluate response precision. Complete with the function to check precision
441
+ workflow.add_node("refine_query", refine_query) # Step 7: Improve query if response lacks precision. Complete with the function to refine the query
442
+ workflow.add_node("max_iterations_reached", max_iterations_reached) # Step 8: Handle max iterations. Complete with the function to handle max iterations
443
+
444
+ # Main flow edges
445
+ workflow.add_edge(START, "expand_query")
446
+ workflow.add_edge("expand_query", "retrieve_context")
447
+ workflow.add_edge("retrieve_context", "craft_response")
448
+ workflow.add_edge("craft_response", "score_groundedness")
449
+
450
+ # Groundedness logic
451
+ workflow.add_conditional_edges(
452
+ "score_groundedness",
453
+ should_continue_groundedness,
454
+ {
455
+ "check_precision": "check_precision",
456
+ "refine_response": "refine_response",
457
+ "max_iterations_reached": "max_iterations_reached"
458
+ }
459
+ )
460
+
461
+ # Edge to reprocess refined response
462
+ workflow.add_edge("refine_response", "craft_response")
463
+
464
+ # Precision logic
465
+ workflow.add_conditional_edges(
466
+ "check_precision",
467
+ should_continue_precision,
468
+ {
469
+ "pass": END,
470
+ "refine_query": "refine_query",
471
+ "max_iterations_reached": "max_iterations_reached"
472
+ }
473
+ )
474
+
475
+ # Edge to re-expand refined query and reenter flow
476
+ workflow.add_edge("refine_query", "expand_query")
477
+ workflow.add_edge("max_iterations_reached", END)
478
+
479
+ return workflow
480
+
481
+
482
+
483
+
484
+ #=========================== Defining the agentic rag tool ====================#
485
+ WORKFLOW_APP = create_workflow().compile()
486
+ @tool
487
+ def agentic_rag(query: str) -> Dict[str, Any]:
488
+ """
489
+ Runs the RAG-based agent with conversation history for context-aware responses.
490
+ """
491
+ if not query or not isinstance(query, str):
492
+ return {"error": "Invalid or empty query provided"}
493
+ inputs = {
494
+ "query": query,
495
+ "expanded_query": "", #Initialized as an empty string since the expand_query function will populate this field with the reformulated query based on the original query
496
+ "context": [], # Retrieved documents (initially empty)
497
+ "response": "", #Initialized as an empty string since the craft_response function will generate the AI response and store it here
498
+ "precision_score": 0.0, #Initialized as 0.0 since the check_precision function will compute and assign a precision score between 0 and 1.
499
+ "groundedness_score": 0.0, #Initialized as 0.0 since the score_groundedness function will compute and assign a groundedness score between 0 and 1.
500
+ "groundedness_loop_count": 0, #Initialized as 0 to track the number of groundedness refinement loops, incremented in score_groundedness.
501
+ "precision_loop_count": 0, #Initialized as 0 to track the number of precision refinement loops, incremented in check_precision.
502
+ "feedback": "", #Initialized as an empty string since the refine_response function will populate this with suggestions for improving the response.
503
+ "query_feedback": "", #Initialized as an empty string since the refine_query function will populate this with suggestions for improving the expanded query.
504
+ "loop_max_iter": 5
505
+ }
506
+ output = WORKFLOW_APP.invoke(inputs)
507
+
508
+ return output
509
+
510
+
511
+ #================================ Guardrails ===========================#
512
+ llama_guard_client = Groq(api_key=llama_api_key)
513
+ # Function to filter user input with Llama Guard
514
+ def filter_input_with_llama_guard(user_input, model="meta-llama/llama-guard-4-12b"):
515
+ """
516
+ Filters user input using Llama Guard to ensure it is safe.
517
+
518
+ Parameters:
519
+ - user_input: The input provided by the user.
520
+ - model: The Llama Guard model to be used for filtering (default is "meta-llama/llama-guard-4-12bb").
521
+
522
+ Returns:
523
+ - The filtered and safe input.
524
+ """
525
+ try:
526
+ # Create a request to Llama Guard to filter the user input
527
+ response = llama_guard_client.chat.completions.create(
528
+ messages=[{"role": "user", "content": user_input}],
529
+ model=model,
530
+ )
531
+ # Return the filtered input
532
+ return response.choices[0].message.content.strip()
533
+ except Exception as e:
534
+ print(f"Error with Llama Guard: {e}")
535
+ return None
536
+
537
+
538
+ #============================= Adding Memory to the agent using mem0 ===============================#
539
+
540
+ class SpiritualBot:
541
+ def __init__(self):
542
+ """
543
+ Initialize the SpiritualBot class, setting up memory, the LLM client, tools, and the agent executor.
544
+ """
545
+ # Initialize a memory client to store and retrieve customer interactions
546
+ self.memory = MemoryClient(api_key=os.environ.get("MEM0_API_KEY")) # Complete the code to define the memory client API key
547
+
548
+ # Initialize the OpenAI client using the provided credentials
549
+ self.client = ChatOpenAI(
550
+ model_name="gpt-4o-mini", # Specify the model to use (e.g., GPT-4 optimized version)
551
+ api_key=os.environ.get("API_KEY"), # API key for authentication
552
+ openai_api_base = os.environ.get("OPENAI_API_BASE"),
553
+ temperature=0 # Controls randomness in responses; 0 ensures deterministic results
554
+ )
555
+
556
+ # Define tools available to the chatbot, such as web search
557
+ tools = [agentic_rag]
558
+
559
+ # Define the system prompt to set the behavior of the chatbot
560
+ system_prompt = """You are a compassionate and knowledgeable Spiritual Assistant.
561
+ Your purpose is to help users explore biblical teachings and spiritual insights, drawing only from the Bible and *The Purpose Driven Life* by Rick Warren.
562
+
563
+ Guidelines for Interaction:
564
+ - Maintain a respectful, thoughtful, and non-judgmental tone at all times.
565
+ - Ground every response in scripture or the provided spiritual context β€” never speculate or invent theology.
566
+ - Use the agentic_rag tool to retrieve contextually relevant passages and interpretations from trusted sources.
567
+ - If a user asks a vague question, gently encourage them to clarify their spiritual needs or the passage of interest.
568
+ - When possible, help the user reflect on how biblical principles can apply to personal growth, purpose, and everyday life.
569
+ - Avoid doctrinal debates or denominational bias β€” focus on shared themes of purpose, love, faith, and spiritual growth.
570
+ - If you cannot answer based on the given sources, humbly acknowledge the limitation and suggest scripture or topics the user might explore further.
571
+
572
+ Your goal is to walk alongside users on their spiritual journey, offering encouragement, insight, and biblical grounding.
573
+ """
574
+
575
+ # Build the prompt template for the agent
576
+ prompt = ChatPromptTemplate.from_messages([
577
+ ("system", system_prompt), # System instructions
578
+ ("human", "{input}"), # Placeholder for human input
579
+ ("placeholder", "{agent_scratchpad}") # Placeholder for intermediate reasoning steps
580
+ ])
581
+
582
+ # Create an agent capable of interacting with tools and executing tasks
583
+ agent = create_tool_calling_agent(self.client, tools, prompt)
584
+
585
+ # Wrap the agent in an executor to manage tool interactions and execution flow
586
+ self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
587
+
588
+
589
+ def store_customer_interaction(self, user_id: str, message: str, response: str, metadata: Dict = None):
590
+ """
591
+ Store customer interaction in memory for future reference.
592
+
593
+ Args:
594
+ user_id (str): Unique identifier for the customer.
595
+ message (str): Customer's query or message.
596
+ response (str): Chatbot's response.
597
+ metadata (Dict, optional): Additional metadata for the interaction.
598
+ """
599
+ if metadata is None:
600
+ metadata = {}
601
+
602
+ # Add a timestamp to the metadata for tracking purposes
603
+ metadata["timestamp"] = datetime.now().isoformat()
604
+
605
+ # Format the conversation for storage
606
+ conversation = [
607
+ {"role": "user", "content": message},
608
+ {"role": "assistant", "content": response}
609
+ ]
610
+
611
+ # Store the interaction in the memory client
612
+ self.memory.add(
613
+ conversation,
614
+ user_id=user_id,
615
+ output_format="v1.1",
616
+ metadata=metadata
617
+ )
618
+
619
+
620
+ def get_relevant_history(self, user_id: str, query: str) -> List[Dict]:
621
+ """
622
+ Retrieve past interactions relevant to the current query.
623
+
624
+ Args:
625
+ user_id (str): Unique identifier for the customer.
626
+ query (str): The customer's current query.
627
+
628
+ Returns:
629
+ List[Dict]: A list of relevant past interactions.
630
+ """
631
+ return self.memory.search(
632
+ query=query, # Search for interactions related to the query
633
+ user_id=user_id, # Restrict search to the specific user
634
+ limit=5 # Complete the code to define the limit for retrieved interactions
635
+ )
636
+
637
+
638
+ def handle_customer_query(self, user_id: str, query: str) -> str:
639
+ """
640
+ Process a customer's query and provide a response, taking into account past interactions.
641
+
642
+ Args:
643
+ user_id (str): Unique identifier for the customer.
644
+ query (str): Customer's query.
645
+
646
+ Returns:
647
+ str: Chatbot's response.
648
+ """
649
+
650
+ # Retrieve relevant past interactions for context
651
+ relevant_history = self.get_relevant_history(user_id, query)
652
+
653
+ # Build a context string from the relevant history
654
+ context = "Previous relevant interactions:\n"
655
+ for memory in relevant_history:
656
+ context += f"Customer: {memory['memory']}\n" # Customer's past messages
657
+ context += f"Support: {memory['memory']}\n" # Chatbot's past responses
658
+ context += "---\n"
659
+
660
+ # Print context for debugging purposes
661
+ print("Context: ", context)
662
+
663
+ # Prepare a prompt combining past context and the current query
664
+ prompt = f"""
665
+ Context:
666
+ {context}
667
+
668
+ Current customer query: {query}
669
+
670
+ Provide a helpful response that takes into account any relevant past interactions.
671
+ """
672
+
673
+ # Generate a response using the agent
674
+ response = self.agent_executor.invoke({"input": prompt})
675
+
676
+ # Store the current interaction for future reference
677
+ self.store_customer_interaction(
678
+ user_id=user_id,
679
+ message=query,
680
+ response=response["output"],
681
+ metadata={"type": "support_query"}
682
+ )
683
+
684
+ # Return the chatbot's response
685
+ return response['output']
686
+
687
+
688
+ #=====================User Interface using streamlit ===========================#
689
+ def spritual_assistant_streamlit():
690
+ """
691
+ A Streamlit-based UI for the Spiritual Assistant Agent.
692
+ """
693
+ print("πŸ“– Welcome to the Spiritual Assistant!")
694
+ print("You can ask questions about the Bible, Jesus, faith, and Christian life")
695
+ print("Type 'exit' to end the conversation.\n")
696
+
697
+ # Initialize session state for chat history and user_id if they don't exist
698
+ if 'chat_history' not in st.session_state:
699
+ st.session_state.chat_history = []
700
+ if 'user_id' not in st.session_state:
701
+ st.session_state.user_id = None
702
+
703
+ # Login form: Only if user is not logged in
704
+ if st.session_state.user_id is None:
705
+ with st.form("login_form", clear_on_submit=True):
706
+ user_id = st.text_input("Please enter your name to begin:")
707
+ submit_button = st.form_submit_button("Login")
708
+ if submit_button and user_id:
709
+ st.session_state.user_id = user_id
710
+ st.session_state.chat_history.append({
711
+ "role": "assistant",
712
+ "content": f"Welcome γ‚ˆγ†γ“γ , {user_id}! How can I guide you in your spiritual path today?"
713
+ })
714
+ st.session_state.login_submitted = True
715
+ if st.session_state.get("login_submitted", False):
716
+ st.session_state.pop("login_submitted")
717
+ st.rerun()
718
+ else:
719
+ for message in st.session_state.chat_history:
720
+ with st.chat_message(message["role"]):
721
+ st.write(message["content"])
722
+
723
+ # === Filled Blanks ===
724
+ user_query = st.chat_input("Type your question here (or 'exit' to end)...")
725
+
726
+ if user_query:
727
+ if user_query.lower() == "exit":
728
+ st.session_state.chat_history.append({"role": "user", "content": "exit"})
729
+ with st.chat_message("user"):
730
+ st.write("exit")
731
+ goodbye_msg = "Sayonara! May your path be filled with peace and happiness!"
732
+ st.session_state.chat_history.append({"role": "assistant", "content": goodbye_msg})
733
+ with st.chat_message("assistant"):
734
+ st.write(goodbye_msg)
735
+ st.session_state.user_id = None
736
+ st.rerun()
737
+ return
738
+
739
+ st.session_state.chat_history.append({"role": "user", "content": user_query})
740
+ with st.chat_message("user"):
741
+ st.write(user_query)
742
+
743
+ filtered_result = filter_input_with_llama_guard(user_query)
744
+ filtered_result = filtered_result.replace("\n", " ").upper()
745
+
746
+ if filtered_result in ["SAFE", "S0", "S6", "S7"]:
747
+ try:
748
+ if 'chatbot' not in st.session_state:
749
+ st.session_state.chatbot = SpiritualBot()
750
+ response = st.session_state.chatbot.handle_customer_query(
751
+ st.session_state.user_id, user_query)
752
+ st.write(response)
753
+ st.session_state.chat_history.append({"role": "assistant", "content": response})
754
+ except Exception as e:
755
+ error_msg = f"Sorry, I encountered an error while processing your query. Please try again. Error: {str(e)}"
756
+ st.write(error_msg)
757
+ st.session_state.chat_history.append({"role": "assistant", "content": error_msg})
758
+ else:
759
+ inappropriate_msg = "I apologize, but I cannot process that input as it may be inappropriate. Please try again."
760
+ st.write(inappropriate_msg)
761
+ st.session_state.chat_history.append({"role": "assistant", "content": inappropriate_msg})
762
+
763
+ if __name__ == "__main__":
764
+ spritual_assistant_streamlit()