Spaces:
Running
Running
# Import necessary libraries | |
import os # Interacting with the operating system (reading/writing files) | |
os.environ["CHROMADB_TELEMETRY"] = "0" #Disable Chroma telemetry reporting | |
import chromadb # High-performance vector database for storing/querying dense vectors | |
from dotenv import load_dotenv # Loading environment variables from a .env file | |
import json # Parsing and handling JSON data | |
# LangChain imports | |
from langchain_openai import ChatOpenAI | |
from langchain_core.documents import Document # Document data structures | |
from langchain_core.runnables import RunnablePassthrough # LangChain core library for running pipelines | |
from langchain_core.output_parsers import StrOutputParser # String output parser | |
from langchain.prompts import ChatPromptTemplate # Template for chat prompts | |
from langchain.chains.query_constructor.base import AttributeInfo # Base classes for query construction | |
from langchain.retrievers.self_query.base import SelfQueryRetriever # Base classes for self-querying retrievers | |
from langchain.retrievers.document_compressors import LLMChainExtractor, CrossEncoderReranker # Document compressors | |
from langchain.retrievers import ContextualCompressionRetriever # Contextual compression retrievers | |
# LangChain community & experimental imports | |
from langchain_community.vectorstores import Chroma # Implementations of vector stores like Chroma | |
from langchain_community.document_loaders import PyPDFDirectoryLoader, PyPDFLoader # Document loaders for PDFs | |
from langchain_community.cross_encoders import HuggingFaceCrossEncoder # Cross-encoders from HuggingFace | |
from langchain_experimental.text_splitter import SemanticChunker # Experimental text splitting methods | |
from langchain.text_splitter import ( | |
CharacterTextSplitter, # Splitting text by characters | |
RecursiveCharacterTextSplitter # Recursive splitting of text by characters | |
) | |
from langchain_core.tools import tool | |
from langchain.agents import create_tool_calling_agent, AgentExecutor | |
from langchain_core.prompts import ChatPromptTemplate | |
# LangChain OpenAI imports | |
from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI # OpenAI embeddings and models | |
from langchain.embeddings.openai import OpenAIEmbeddings # OpenAI embeddings for text vectors | |
# LlamaParse & LlamaIndex imports | |
from llama_parse import LlamaParse # Document parsing library | |
from llama_index.core import Settings, SimpleDirectoryReader # Core functionalities of the LlamaIndex | |
# LangGraph import | |
from langgraph.graph import StateGraph, END, START # State graph for managing states in LangChain | |
# Pydantic import | |
from pydantic import BaseModel # Pydantic for data validation | |
# Typing imports | |
from typing import Dict, List, Tuple, Any, TypedDict # Python typing for function annotations | |
# Other utilities | |
import numpy as np # Numpy for numerical operations | |
from groq import Groq | |
from mem0 import MemoryClient | |
import streamlit as st | |
from datetime import datetime | |
#====================================SETUP=====================================# | |
# Fetch secrets from Hugging Face Spaces | |
api_key = os.environ.get("API_KEY") or config.get("API_KEY") | |
endpoint = os.environ.get("OPENAI_API_BASE") or config.get("OPENAI_API_BASE") | |
llama_api_key = os.environ.get("GROQ_API_KEY") or config2.get("LLAMA_KEY") | |
MEM0_api_key = os.environ.get("MEM0_API_KEY") | |
# Initialize the OpenAI Embeddings | |
embedding_model = OpenAIEmbeddings( | |
openai_api_base=endpoint, | |
openai_api_key=api_key, | |
model='text-embedding-ada-002' | |
) | |
# Initialize the Chat OpenAI model | |
llm = ChatOpenAI( | |
openai_api_base=endpoint, | |
openai_api_key=api_key, | |
model="gpt-4o-mini", | |
streaming=False | |
) | |
# This initializes the Chat OpenAI model with the provided endpoint, API key, deployment name, and a temperature setting of 0 (to control response variability). | |
# set the LLM and embedding model in the LlamaIndex settings. | |
Settings.llm = llm | |
Settings.embedding = embedding_model | |
#================================Creating Langgraph agent======================# | |
class AgentState(TypedDict): | |
query: str # The current user query | |
expanded_query: str # The expanded version of the user query | |
context: List[Dict[str, Any]] # Retrieved documents (content and metadata) | |
response: str # The generated response to the user query | |
precision_score: float # The precision score of the response | |
groundedness_score: float # The groundedness score of the response | |
groundedness_loop_count: int # Counter for groundedness refinement loops | |
precision_loop_count: int # Counter for precision refinement loops | |
feedback: str | |
query_feedback: str | |
groundedness_check: bool | |
loop_max_iter: int | |
def expand_query(state: Dict[str, Any]) -> Dict[str, Any]: | |
""" | |
Expands the user query to improve retrieval of bible and spiritual information. | |
Args: | |
state: Workflow state containing at least 'query' and 'query_feedback'. | |
Returns: | |
Workflow state with an additional 'expanded_query' key. | |
""" | |
s: AgentState = state | |
print("---------Expanding Query---------") | |
system_message = '''You are an assistant that reformulates vague or short user questions into detailed, domain-specific queries related to bible and spiritual questions. | |
Examples: | |
- Input: "David and Goliath?" | |
Expanded: "What is the significance of the story of David and Goliath in the context of faith, courage, and divine intervention?" | |
- Input: "What does Jesus say about love?" | |
Expanded: "What teachings did Jesus offer about love in the New Testament, and how do passages like John 13:34โ35 and Matthew 22:37โ39 reflect those teachings?" | |
- Input: "Genesis creation" | |
Expanded: "How does the Book of Genesis describe the creation of the world, and what are the main theological interpretations of the seven days of creation?" | |
- Input: "End times?" | |
Expanded: "What does the Bible say about the end times, and how do texts like the Book of Revelation, Daniel, and Matthew 24 contribute to Christian eschatology?" | |
- Input: "Women in the Bible" | |
Expanded: "What roles do women play in the Bible, and how are figures such as Mary, Ruth, Esther, and Deborah portrayed in biblical narratives?" | |
''' | |
expand_prompt = ChatPromptTemplate.from_messages([ | |
("system", system_message), | |
("user", "Expand this query: {query} using the feedback: {query_feedback}") | |
]) | |
chain = expand_prompt | llm | StrOutputParser() | |
expanded_query = chain.invoke({"query": state['query'], "query_feedback":state["query_feedback"]}) | |
#print("expanded_query", expanded_query) #uncomment this line to see expanded query | |
state["expanded_query"] = expanded_query | |
return state | |
chroma_client = chromadb.PersistentClient(path="./combined") | |
vector_store = Chroma( | |
client=chroma_client, # <- pass the client you just made | |
collection_name="combined", | |
embedding_function=embedding_model, | |
) | |
# Create a retriever from the vector store | |
retriever = vector_store.as_retriever( | |
search_type='similarity', | |
search_kwargs={'k': 3} | |
) | |
def retrieve_context(state): | |
""" | |
Retrieves context from the vector store using the expanded or original query. | |
Args: | |
state (Dict): The current state of the workflow, containing the query and expanded query. | |
Returns: | |
Dict: The updated state with the retrieved context. | |
""" | |
print("---------retrieve_context---------") | |
query = state['expanded_query'] # Complete the code to define the key for the expanded query | |
#print("Query used for retrieval:", query) # Debugging: Print the query | |
# Retrieve documents from the vector store | |
docs = retriever.invoke(query) | |
#print("Retrieved documents:", docs) # Debugging: Print the raw docs object | |
# Extract both page_content and metadata from each document | |
context = [ | |
{ | |
"content": doc.metadata.get("original_content", doc.page_content), | |
"metadata": doc.metadata | |
} | |
for doc in docs | |
] | |
state['context'] = context # Complete the code to define the key for storing the context | |
#print("Extracted context with metadata:", context) # Debugging: Print the extracted context | |
print(f"Groundedness loop count: {state['groundedness_loop_count']}") | |
return state | |
def craft_response(state): | |
""" | |
Generates a response using the retrieved context, focusing on biblical teachings and spiritual guidance. | |
Args: | |
state (Dict): The current state of the workflow, containing the query and retrieved context. | |
Returns: | |
Dict: The updated state with the generated response. | |
""" | |
print("---------craft_response---------") | |
system_message = '''You are a helpful AI assistant trained to support users in understanding biblical teachings and spiritual guidance, using context retrieved from the Bible and the book *The Purpose Driven Life* by Rick Warren. | |
Your responses must strictly adhere to the retrieved context, which is extracted from biblical texts such as the CSB Bible, theological commentaries, or trusted religious sources. | |
Do not speculate, interpret creatively, or introduce knowledge not found in the provided context. Focus only on scriptural passages, interpretations, historical backgrounds, or theological themes directly supported by the retrieved content. | |
If the context does not contain enough information to answer accurately, clearly state that. Aim for clarity, scriptural accuracy, and relevance to the user's query. | |
''' | |
response_prompt = ChatPromptTemplate.from_messages([ | |
("system", system_message), | |
("user", "Query: {query}\nContext: {context}\n\nfeedback: {feedback}") | |
]) | |
chain = response_prompt | llm | |
response = chain.invoke({ | |
"query": state['query'], | |
"context": "\n".join([doc["content"] for doc in state['context']]), | |
"feedback": state.get('feedback', 'No feedback provided') # add feedback to the prompt | |
}) | |
state['response'] = response | |
#print("intermediate response: ", response) #uncomment this line to see intermediate response | |
return state | |
def score_groundedness(state): | |
""" | |
Checks whether the response is grounded in the retrieved context. | |
Args: | |
state (Dict): The current state of the workflow, containing the response and context. | |
Returns: | |
Dict: The updated state with the groundedness score. | |
""" | |
print("---------check_groundedness---------") | |
system_message = '''You are evaluating whether an AI-generated response is grounded in the retrieved context | |
provided from biblical texts (such as the CSB Bible) and the book *The Purpose Driven Life* by Rick Warren. | |
The context includes scripture, commentary, and theological content. | |
Your task is to assign a groundedness score between 0 and 1, where: | |
- 1.0 means the response is fully supported by the context, | |
- 0.0 means the response is entirely unsupported. | |
Be strict: if even a part of the response is not traceable to the context, reduce the score. Provide only | |
the numeric score.''' | |
groundedness_prompt = ChatPromptTemplate.from_messages([ | |
("system", system_message), | |
("user", "Context: {context}\nResponse: {response}\n\nGroundedness score:") | |
]) | |
chain = groundedness_prompt | llm | StrOutputParser() | |
groundedness_score = float(chain.invoke({ | |
"context": "\n".join([doc["content"] for doc in state['context']]), | |
"response":state['response'] # Complete the code to define the response | |
})) | |
print("groundedness_score: ", groundedness_score) | |
state['groundedness_loop_count'] += 1 | |
print("#########Groundedness Incremented###########") | |
state['groundedness_score'] = groundedness_score | |
return state | |
def check_precision(state: Dict) -> Dict: | |
""" | |
Checks whether the response precisely addresses the userโs query. | |
Args: | |
state (Dict): The current state of the workflow, containing the query and response. | |
Returns: | |
Dict: The updated state with the precision score. | |
""" | |
print("---------check_precision---------") | |
system_message = '''You are assessing whether an AI-generated response precisely answers the user's query, | |
within the domain of biblical interpretation and spiritual guidance drawn from the Bible and *The Purpose Driven Life*. | |
Provide a precision score between 0 and 1: | |
- 1.0: The response fully and directly answers the query with clear relevance. | |
- 0.0: The response is vague, unrelated, or fails to address the query. | |
Only return a numeric score.''' | |
precision_prompt = ChatPromptTemplate.from_messages([ | |
("system", system_message), | |
("user", "Query: {query}\nResponse: {response}\n\nPrecision score:") | |
]) | |
chain = precision_prompt | llm | StrOutputParser() # Complete the code to define the chain of processing | |
precision_score = float(chain.invoke({ | |
"query": state['query'], | |
"response":state['response'] # Complete the code to access the response from the state | |
})) | |
state['precision_score'] = precision_score | |
print("precision_score:", precision_score) | |
state['precision_loop_count'] +=1 | |
print("#########Precision Incremented###########") | |
return state | |
def refine_response(state: Dict) -> Dict: | |
""" | |
Suggests improvements for the generated response. | |
Args: | |
state (Dict): The current state of the workflow, containing the query and response. | |
Returns: | |
Dict: The updated state with response refinement suggestions. | |
""" | |
print("---------refine_response---------") | |
system_message = '''You are an expert assistant helping to improve AI-generated answers related to biblical interpretation and spiritual guidance. | |
Evaluate the response and suggest constructive improvements to enhance accuracy, specificity, and completeness. | |
Do not rewrite the response. Instead, point out what is vague, missing, or could be better explained. | |
Focus on biblical coherence, faith-based reasoning, and alignment with the themes and tone of the source texts.''' | |
refine_response_prompt = ChatPromptTemplate.from_messages([ | |
("system", system_message), | |
("user", "Query: {query}\nResponse: {response}\n\n" | |
"What improvements can be made to enhance accuracy and completeness?") | |
]) | |
chain = refine_response_prompt | llm| StrOutputParser() | |
# Store response suggestions in a structured format | |
feedback = f"Previous Response: {state['response']}\nSuggestions: {chain.invoke({'query': state['query'], 'response': state['response']})}" | |
#print("feedback: ", feedback) #uncomment this line to see feedback | |
#print(f"State: {state}") | |
state['feedback'] = feedback | |
return state | |
def refine_query(state: Dict) -> Dict: | |
""" | |
Suggests improvements for the expanded query. | |
Args: | |
state (Dict): The current state of the workflow, containing the query and expanded query. | |
Returns: | |
Dict: The updated state with query refinement suggestions. | |
""" | |
print("---------refine_query---------") | |
system_message = '''You are an expert assistant helping to improve AI-generated query reformulations related to biblical interpretation and spiritual guidance, based on the Bible and *The Purpose Driven Life*. | |
Evaluate the response and suggest constructive improvements to enhance scriptural accuracy, theological clarity, and spiritual relevance. | |
Do not rewrite the expanded query itself. Instead, point out what is vague, theologically weak, mis-aligned with the source material, or could be better supported by the context. | |
Focus on biblical coherence, faith-based reasoning, and alignment with the themes and tone of the source texts.''' | |
refine_query_prompt = ChatPromptTemplate.from_messages([ | |
("system", system_message), | |
("user", "Original Query: {query}\nExpanded Query: {expanded_query}\n\n" | |
"What improvements can be made for a better search?") | |
]) | |
chain = refine_query_prompt | llm | StrOutputParser() | |
# Store refinement suggestions without modifying the original expanded query | |
query_feedback = f"Previous Expanded Query: {state['expanded_query']}\nSuggestions: {chain.invoke({'query': state['query'], 'expanded_query': state['expanded_query']})}" | |
#print("query_feedback: ", query_feedback) | |
print(f"Groundedness loop count: {state['groundedness_loop_count']}") | |
state['query_feedback'] = query_feedback | |
return state | |
def should_continue_groundedness(state): | |
"""Decides if groundedness is sufficient or needs improvement.""" | |
print("---------should_continue_groundedness---------") | |
print("groundedness loop count: ", state['groundedness_loop_count']) | |
# Threshold logic: groundedness score should be at least 0.8 | |
if state['groundedness_score'] >= 0.8: | |
print("Moving to precision") | |
return "check_precision" | |
else: | |
# Allow a maximum of 2 refinement loops | |
if state['groundedness_loop_count'] > state['loop_max_iter']: | |
print("Maximum groundedness iterations reached") | |
return "max_iterations_reached" | |
else: | |
print("---------Groundedness Score Threshold Not Met. Refining Response-----------") | |
return "refine_response" | |
def should_continue_precision(state: Dict) -> str: | |
"""Decides if precision is sufficient or needs improvement.""" | |
print("---------should_continue_precision---------") | |
print("precision loop count: ", state['precision_loop_count']) | |
# Threshold for acceptable precision score | |
if state['precision_score'] >= 0.8: | |
return "pass" # Complete the workflow | |
else: | |
# Check if maximum refinement attempts have been reached | |
if state['precision_loop_count'] > state['loop_max_iter']: | |
return "max_iterations_reached" | |
else: | |
print("---------Precision Score Threshold Not met. Refining Query-----------") | |
return "refine_query" | |
def max_iterations_reached(state: Dict) -> Dict: | |
"""Handles the case where max iterations are reached.""" | |
print("---------max_iterations_reached---------") | |
state['response'] = "We need more context to provide an accurate answer." | |
return state | |
from langgraph.graph import END, StateGraph, START | |
def create_workflow() -> StateGraph: | |
"""Creates the updated workflow for the AI spiritual agent.""" | |
workflow = StateGraph(dict) # Workflow state is a dictionary | |
# Add processing nodes | |
workflow.add_node("expand_query", expand_query) # Step 1: Expand user query. Complete with the function to expand the query | |
workflow.add_node("retrieve_context", retrieve_context) # Step 2: Retrieve relevant documents. Complete with the function to retrieve context | |
workflow.add_node("craft_response", craft_response) # Step 3: Generate a response based on retrieved data. Complete with the function to craft a response | |
workflow.add_node("score_groundedness", score_groundedness) # Step 4: Evaluate response grounding. Complete with the function to score groundedness | |
workflow.add_node("refine_response", refine_response) # Step 5: Improve response if it's weakly grounded. Complete with the function to refine the response | |
workflow.add_node("check_precision", check_precision) # Step 6: Evaluate response precision. Complete with the function to check precision | |
workflow.add_node("refine_query", refine_query) # Step 7: Improve query if response lacks precision. Complete with the function to refine the query | |
workflow.add_node("max_iterations_reached", max_iterations_reached) # Step 8: Handle max iterations. Complete with the function to handle max iterations | |
# Main flow edges | |
workflow.add_edge(START, "expand_query") | |
workflow.add_edge("expand_query", "retrieve_context") | |
workflow.add_edge("retrieve_context", "craft_response") | |
workflow.add_edge("craft_response", "score_groundedness") | |
# Groundedness logic | |
workflow.add_conditional_edges( | |
"score_groundedness", | |
should_continue_groundedness, | |
{ | |
"check_precision": "check_precision", | |
"refine_response": "refine_response", | |
"max_iterations_reached": "max_iterations_reached" | |
} | |
) | |
# Edge to reprocess refined response | |
workflow.add_edge("refine_response", "craft_response") | |
# Precision logic | |
workflow.add_conditional_edges( | |
"check_precision", | |
should_continue_precision, | |
{ | |
"pass": END, | |
"refine_query": "refine_query", | |
"max_iterations_reached": "max_iterations_reached" | |
} | |
) | |
# Edge to re-expand refined query and reenter flow | |
workflow.add_edge("refine_query", "expand_query") | |
workflow.add_edge("max_iterations_reached", END) | |
return workflow | |
#=========================== Defining the agentic rag tool ====================# | |
WORKFLOW_APP = create_workflow().compile() | |
def agentic_rag(query: str) -> Dict[str, Any]: | |
""" | |
Runs the RAG-based agent with conversation history for context-aware responses. | |
""" | |
if not query or not isinstance(query, str): | |
return {"error": "Invalid or empty query provided"} | |
inputs = { | |
"query": query, | |
"expanded_query": "", #Initialized as an empty string since the expand_query function will populate this field with the reformulated query based on the original query | |
"context": [], # Retrieved documents (initially empty) | |
"response": "", #Initialized as an empty string since the craft_response function will generate the AI response and store it here | |
"precision_score": 0.0, #Initialized as 0.0 since the check_precision function will compute and assign a precision score between 0 and 1. | |
"groundedness_score": 0.0, #Initialized as 0.0 since the score_groundedness function will compute and assign a groundedness score between 0 and 1. | |
"groundedness_loop_count": 0, #Initialized as 0 to track the number of groundedness refinement loops, incremented in score_groundedness. | |
"precision_loop_count": 0, #Initialized as 0 to track the number of precision refinement loops, incremented in check_precision. | |
"feedback": "", #Initialized as an empty string since the refine_response function will populate this with suggestions for improving the response. | |
"query_feedback": "", #Initialized as an empty string since the refine_query function will populate this with suggestions for improving the expanded query. | |
"loop_max_iter": 5 | |
} | |
output = WORKFLOW_APP.invoke(inputs, config={"recursion_limit": 50}) | |
return output | |
#================================ Guardrails ===========================# | |
llama_guard_client = Groq(api_key=llama_api_key) | |
# Function to filter user input with Llama Guard | |
def filter_input_with_llama_guard(user_input, model="meta-llama/llama-guard-4-12b"): | |
""" | |
Filters user input using Llama Guard to ensure it is safe. | |
Parameters: | |
- user_input: The input provided by the user. | |
- model: The Llama Guard model to be used for filtering (default is "meta-llama/llama-guard-4-12bb"). | |
Returns: | |
- The filtered and safe input. | |
""" | |
try: | |
# Create a request to Llama Guard to filter the user input | |
response = llama_guard_client.chat.completions.create( | |
messages=[{"role": "user", "content": user_input}], | |
model=model, | |
) | |
# Return the filtered input | |
return response.choices[0].message.content.strip() | |
except Exception as e: | |
print(f"Error with Llama Guard: {e}") | |
return None | |
#============================= Adding Memory to the agent using mem0 ===============================# | |
class SpiritualBot: | |
def __init__(self): | |
""" | |
Initialize the SpiritualBot class, setting up memory, the LLM client, tools, and the agent executor. | |
""" | |
# Initialize a memory client to store and retrieve customer interactions | |
self.memory = MemoryClient(api_key=os.environ.get("MEM0_API_KEY")) # Complete the code to define the memory client API key | |
# Initialize the OpenAI client using the provided credentials | |
self.client = ChatOpenAI( | |
model_name="gpt-4o-mini", # Specify the model to use (e.g., GPT-4 optimized version) | |
api_key=os.environ.get("API_KEY"), # API key for authentication | |
openai_api_base = os.environ.get("OPENAI_API_BASE"), | |
temperature=0 # Controls randomness in responses; 0 ensures deterministic results | |
) | |
# Define tools available to the chatbot, such as web search | |
tools = [agentic_rag] | |
# Define the system prompt to set the behavior of the chatbot | |
system_prompt = """You are a compassionate and knowledgeable Spiritual Assistant. | |
Your purpose is to help users explore biblical teachings and spiritual insights, drawing only from the Bible and *The Purpose Driven Life* by Rick Warren. | |
Guidelines for Interaction: | |
- Maintain a respectful, thoughtful, and non-judgmental tone at all times. | |
- Ground every response in scripture or the provided spiritual context โ never speculate or invent theology. | |
- Use the agentic_rag tool to retrieve contextually relevant passages and interpretations from trusted sources. | |
- If a user asks a vague question, gently encourage them to clarify their spiritual needs or the passage of interest. | |
- When possible, help the user reflect on how biblical principles can apply to personal growth, purpose, and everyday life. | |
- Avoid doctrinal debates or denominational bias โ focus on shared themes of purpose, love, faith, and spiritual growth. | |
- If you cannot answer based on the given sources, humbly acknowledge the limitation and suggest scripture or topics the user might explore further. | |
Your goal is to walk alongside users on their spiritual journey, offering encouragement, insight, and biblical grounding. | |
""" | |
# Build the prompt template for the agent | |
prompt = ChatPromptTemplate.from_messages([ | |
("system", system_prompt), # System instructions | |
("human", "{input}"), # Placeholder for human input | |
("placeholder", "{agent_scratchpad}") # Placeholder for intermediate reasoning steps | |
]) | |
# Create an agent capable of interacting with tools and executing tasks | |
agent = create_tool_calling_agent(self.client, tools, prompt) | |
# Wrap the agent in an executor to manage tool interactions and execution flow | |
self.agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=False) #change to True to see user query | |
def store_customer_interaction(self, user_id: str, message: str, response: str, metadata: Dict = None): | |
""" | |
Store customer interaction in memory for future reference. | |
Args: | |
user_id (str): Unique identifier for the customer. | |
message (str): Customer's query or message. | |
response (str): Chatbot's response. | |
metadata (Dict, optional): Additional metadata for the interaction. | |
""" | |
if metadata is None: | |
metadata = {} | |
# Add a timestamp to the metadata for tracking purposes | |
metadata["timestamp"] = datetime.now().isoformat() | |
# Format the conversation for storage | |
conversation = [ | |
{"role": "user", "content": message}, | |
{"role": "assistant", "content": response} | |
] | |
# Store the interaction in the memory client | |
self.memory.add( | |
conversation, | |
user_id=user_id, | |
output_format="v1.1", | |
metadata=metadata | |
) | |
def get_relevant_history(self, user_id: str, query: str) -> List[Dict]: | |
""" | |
Retrieve past interactions relevant to the current query. | |
Args: | |
user_id (str): Unique identifier for the customer. | |
query (str): The customer's current query. | |
Returns: | |
List[Dict]: A list of relevant past interactions. | |
""" | |
return self.memory.search( | |
query=query, # Search for interactions related to the query | |
user_id=user_id, # Restrict search to the specific user | |
limit=5 # Complete the code to define the limit for retrieved interactions | |
) | |
def handle_customer_query(self, user_id: str, query: str) -> str: | |
""" | |
Process a customer's query and provide a response, taking into account past interactions. | |
Args: | |
user_id (str): Unique identifier for the customer. | |
query (str): Customer's query. | |
Returns: | |
str: Chatbot's response. | |
""" | |
# Retrieve relevant past interactions for context | |
relevant_history = self.get_relevant_history(user_id, query) | |
# Build a context string from the relevant history | |
context = "Previous relevant interactions:\n" | |
for memory in relevant_history: | |
context += f"Customer: {memory['memory']}\n" # Customer's past messages | |
context += f"Support: {memory['memory']}\n" # Chatbot's past responses | |
context += "---\n" | |
# Print context for debugging purposes | |
#print("Context: ", context) | |
# Prepare a prompt combining past context and the current query | |
prompt = f""" | |
Context: | |
{context} | |
Current customer query: {query} | |
Provide a helpful response that takes into account any relevant past interactions. | |
""" | |
# Generate a response using the agent | |
response = self.agent_executor.invoke({"input": prompt}) | |
# Store the current interaction for future reference | |
self.store_customer_interaction( | |
user_id=user_id, | |
message=query, | |
response=response["output"], | |
metadata={"type": "support_query"} | |
) | |
# Return the chatbot's response | |
return response['output'] | |
#=====================User Interface using streamlit ===========================# | |
def spritual_assistant_streamlit(): | |
""" | |
A Streamlit-based UI for the Spiritual Assistant Agent. | |
""" | |
st.title("Welcome to the Spiritual Assistant!") | |
st.write("You can ask questions about the Bible, Jesus, faith, and Christian life") | |
st.write("Type 'exit' to end the conversation.\n") | |
# Initialize session state for chat history and user_id if they don't exist | |
if 'chat_history' not in st.session_state: | |
st.session_state.chat_history = [] | |
if 'user_id' not in st.session_state: | |
st.session_state.user_id = None | |
# Login form: Only if user is not logged in | |
if st.session_state.user_id is None: | |
with st.form("login_form", clear_on_submit=True): | |
user_id = st.text_input("Please enter your name to begin:") | |
submit_button = st.form_submit_button("Login") | |
st.write("""๐ **Privacy Notice:** | |
User data remains private. | |
All processing occurs **within the current session**. | |
No user data is **stored**, **shared**, or used for **model training** or any other purpose. | |
""") | |
if submit_button and user_id: | |
st.session_state.user_id = user_id | |
st.session_state.chat_history.append({ | |
"role": "assistant", | |
"content": f"""๐ **Welcome ใใใใ, {user_id}!** | |
How can I guide you in your spiritual path today? | |
ไปๆฅใใใชใใฎ็ฒพ็ฅ็ใช้ใใฉใฎใใใซๅฐใใใจใใงใใใงใใใใ? | |
--- | |
๐ **Privacy Notice:** | |
User questions remains private. | |
All processing occurs **within the current session**. | |
No user data is **stored**, **shared**, or used for **model training** or any other purpose. | |
""" | |
}) | |
st.session_state.login_submitted = True | |
if st.session_state.get("login_submitted", False): | |
st.session_state.pop("login_submitted") | |
st.rerun() | |
else: | |
for message in st.session_state.chat_history: | |
with st.chat_message(message["role"]): | |
st.write(message["content"]) | |
# === Filled Blanks === | |
user_query = st.chat_input("Type your question here (or 'exit' to end)...") | |
if user_query: | |
if user_query.lower() == "exit": | |
st.session_state.chat_history.append({"role": "user", "content": "exit"}) | |
with st.chat_message("user"): | |
st.write("exit") | |
goodbye_msg = "Sayonara! May your path be filled with peace and happiness!" | |
st.session_state.chat_history.append({"role": "assistant", "content": goodbye_msg}) | |
with st.chat_message("assistant"): | |
st.write(goodbye_msg) | |
st.session_state.user_id = None | |
st.rerun() | |
return | |
st.session_state.chat_history.append({"role": "user", "content": user_query}) | |
with st.chat_message("user"): | |
st.write(user_query) | |
filtered_result = filter_input_with_llama_guard(user_query) | |
filtered_result = filtered_result.replace("\n", " ").upper() | |
if filtered_result in ["SAFE", "S0", "S6", "S7"]: | |
try: | |
if 'chatbot' not in st.session_state: | |
st.session_state.chatbot = SpiritualBot() | |
response = st.session_state.chatbot.handle_customer_query( | |
st.session_state.user_id, user_query) | |
st.write(response) | |
st.session_state.chat_history.append({"role": "assistant", "content": response}) | |
except Exception as e: | |
error_msg = f"Sorry, I encountered an error while processing your query. Please try again. Error: {str(e)}" | |
st.write(error_msg) | |
st.session_state.chat_history.append({"role": "assistant", "content": error_msg}) | |
else: | |
inappropriate_msg = "I apologize, but I cannot process that input as it may be inappropriate. Please try again." | |
st.write(inappropriate_msg) | |
st.session_state.chat_history.append({"role": "assistant", "content": inappropriate_msg}) | |
if __name__ == "__main__": | |
spritual_assistant_streamlit() | |