File size: 9,527 Bytes
9dc639f
 
74221f2
9dc639f
 
293661c
 
9dc639f
 
 
 
 
756269e
293661c
53b33ac
c06a9ab
53b33ac
 
 
 
 
 
fc48f50
db87ae8
293661c
db87ae8
78bd826
c06a9ab
 
 
 
 
 
 
74221f2
726773c
 
 
 
 
 
 
 
 
 
e8182c5
864c041
74221f2
 
 
 
 
293661c
74221f2
 
 
 
293661c
74221f2
 
 
 
756269e
 
 
 
 
 
 
 
 
 
 
293661c
 
 
 
 
53b33ac
293661c
 
 
 
756269e
 
54fafa1
756269e
 
b0739e4
74221f2
54fafa1
 
0aef3aa
a5cb58f
21ce388
756269e
21ce388
53b33ac
756269e
0aef3aa
 
 
 
 
 
 
db87ae8
756269e
db87ae8
0aef3aa
53b33ac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c06a9ab
 
 
 
 
 
 
 
 
 
 
 
 
53b33ac
 
b0739e4
53b33ac
 
 
 
 
 
 
 
 
 
b0739e4
53b33ac
 
 
 
 
b0739e4
756269e
c09fe62
e27c8c7
 
 
293661c
5969369
9dc639f
 
74221f2
9dc639f
 
 
 
e27c8c7
9dc639f
293661c
9dc639f
b0739e4
 
9dc639f
 
 
 
e27c8c7
9dc639f
b0739e4
9dc639f
 
 
864c041
9dc639f
 
5969369
756269e
53b33ac
 
 
 
 
 
 
 
 
 
 
 
 
76f6ca7
 
 
756269e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
import os
import getpass
import spacy
import pandas as pd
from typing import Optional
import subprocess
from langchain.llms.base import LLM
from langchain.docstore.document import Document
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from smolagents import CodeAgent, DuckDuckGoSearchTool, ManagedAgent, LiteLLMModel
from pydantic import BaseModel, ValidationError, validator
from mistralai import Mistral
from langchain.prompts import PromptTemplate
from langchain_google_genai import ChatGoogleGenerativeAI
# Import chains and tools
from classification_chain import get_classification_chain
from cleaner_chain import get_cleaner_chain
from refusal_chain import get_refusal_chain
from tailor_chain import get_tailor_chain
from prompts import classification_prompt, refusal_prompt, tailor_prompt

# Initialize Mistral API client
mistral_api_key = os.environ.get("MISTRAL_API_KEY")
client = Mistral(api_key=mistral_api_key)

gemini_llm = ChatGoogleGenerativeAI(
    model="gemini-1.5-pro",
    temperature=0.5,
    max_retries=2,
    google_api_key=os.environ.get("GEMINI_API_KEY"),
    # Additional parameters or safety_settings can be added here if needed
)
# Load spaCy model for NER and download it if not already installed
def install_spacy_model():
    try:
        spacy.load("en_core_web_sm")
        print("spaCy model 'en_core_web_sm' is already installed.")
    except OSError:
        print("Downloading spaCy model 'en_core_web_sm'...")
        subprocess.run(["python", "-m", "spacy", "download", "en_core_web_sm"], check=True)
        print("spaCy model 'en_core_web_sm' downloaded successfully.")

install_spacy_model()
nlp = spacy.load("en_core_web_sm")

# Function to extract the main topic from the query using spaCy NER
def extract_main_topic(query: str) -> str:
    doc = nlp(query)
    main_topic = None
    for ent in doc.ents:
        if ent.label_ in ["ORG", "PRODUCT", "PERSON", "GPE", "TIME"]:
            main_topic = ent.text
            break
    if not main_topic:
        for token in doc:
            if token.pos_ in ["NOUN", "PROPN"]:
                main_topic = token.text
                break
    return main_topic if main_topic else "this topic"

# Pydantic model to handle string input validation
class QueryInput(BaseModel):
    query: str

    # Validator to ensure the query is always a string
    @validator('query')
    def check_query_is_string(cls, v):
        if not isinstance(v, str):
            raise ValueError("Query must be a valid string.")
        return v

# Function to classify query based on wellness topics
def classify_query(query: str) -> str:
    wellness_keywords = ["box breathing", "meditation", "yoga", "mindfulness", "breathing exercises"]
    if any(keyword in query.lower() for keyword in wellness_keywords):
        return "Wellness"
    # Fallback to classification chain if not directly recognized
    class_result = classification_chain.invoke({"query": query})
    classification = class_result.get("text", "").strip()
    return classification if classification != "OutOfScope" else "OutOfScope"

# Function to moderate text using Mistral moderation API (sync version)
def moderate_text(query: str) -> str:
    try:
        # Use Pydantic to validate text input
        query_input = QueryInput(query=query)  # This will validate that the query is a string
    except ValidationError as e:
        print(f"Error validating text: {e}")
        return "Invalid text format."
    
    # Call the Mistral moderation API
    response = client.classifiers.moderate_chat(
        model="mistral-moderation-latest",
        inputs=[{"role": "user", "content": query}]
    )
    
    # Check if harmful categories are present in the response
    if hasattr(response, 'results') and response.results:
        categories = response.results[0].categories
        if categories.get("violence_and_threats", False) or \
           categories.get("hate_and_discrimination", False) or \
           categories.get("dangerous_and_criminal_content", False) or \
           categories.get("selfharm", False):
            return "OutOfScope"
    
    return query


# Function to build or load the vector store from CSV data
def build_or_load_vectorstore(csv_path: str, store_dir: str) -> FAISS:
    if os.path.exists(store_dir):
        print(f"DEBUG: Found existing FAISS store at '{store_dir}'. Loading...")
        embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1")
        vectorstore = FAISS.load_local(store_dir, embeddings)
        return vectorstore
    else:
        print(f"DEBUG: Building new store from CSV: {csv_path}")
        df = pd.read_csv(csv_path)
        df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
        df.columns = df.columns.str.strip()
        if "Answer" in df.columns:
            df.rename(columns={"Answer": "Answers"}, inplace=True)
        if "Question" not in df.columns and "Question " in df.columns:
            df.rename(columns={"Question ": "Question"}, inplace=True)
        if "Question" not in df.columns or "Answers" not in df.columns:
            raise ValueError("CSV must have 'Question' and 'Answers' columns.")
        docs = []
        for _, row in df.iterrows():
            q = str(row["Question"])
            ans = str(row["Answers"])
            doc = Document(page_content=ans, metadata={"question": q})
            docs.append(doc)
        embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/multi-qa-mpnet-base-dot-v1")
        vectorstore = FAISS.from_documents(docs, embedding=embeddings)
        vectorstore.save_local(store_dir)
        return vectorstore

# Function to build RAG chain
def build_rag_chain(vectorstore: FAISS) -> RetrievalQA:
    """Build RAG chain using the Gemini LLM directly without a custom class."""
    try:
        retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 3})
        chain = RetrievalQA.from_chain_type(
            llm=gemini_llm,  # Directly use the ChatGoogleGenerativeAI instance
            chain_type="stuff",
            retriever=retriever,
            return_source_documents=True
        )
        return chain
    except Exception as e:
        raise RuntimeError(f"Error building RAG chain: {str(e)}")

# Function to perform web search using DuckDuckGo
def do_web_search(query: str) -> str:
    search_tool = DuckDuckGoSearchTool()
    web_agent = CodeAgent(tools=[search_tool], model=pydantic_agent)
    managed_web_agent = ManagedAgent(agent=web_agent, name="web_search", description="Runs web search for you.")
    manager_agent = CodeAgent(tools=[], model=pydantic_agent, managed_agents=[managed_web_agent])
    
    search_query = f"Give me relevant info: {query}"
    response = manager_agent.run(search_query)
    return response

# Function to combine web and knowledge base responses
def merge_responses(kb_answer: str, web_answer: str) -> str:
    # Merge both answers with a cohesive response
    final_answer = f"Knowledge Base Answer: {kb_answer}\n\nWeb Search Result: {web_answer}"
    return final_answer.strip()

# Orchestrate the entire workflow
def run_pipeline(query: str) -> str:
    # Moderate the query for harmful content
    moderated_query = moderate_text(query)
    if moderated_query == "OutOfScope":
        return "Sorry, this query contains harmful or inappropriate content."

    # Classify the query manually
    classification = classify_query(moderated_query)

    if classification == "OutOfScope":
        refusal_text = refusal_chain.run({"topic": "this topic"})
        final_refusal = tailor_chain.run({"response": refusal_text})
        return final_refusal.strip()

    if classification == "Wellness":
        rag_result = wellness_rag_chain({"query": moderated_query})
        csv_answer = rag_result["result"].strip()
        web_answer = ""  # Empty if we found an answer from the knowledge base
        if not csv_answer:
            web_answer = do_web_search(moderated_query)
        final_merged = merge_responses(csv_answer, web_answer)
        final_answer = tailor_chain.run({"response": final_merged})
        return final_answer.strip()

    if classification == "Brand":
        rag_result = brand_rag_chain({"query": moderated_query})
        csv_answer = rag_result["result"].strip()
        final_merged = merge_responses(csv_answer, "")
        final_answer = tailor_chain.run({"response": final_merged})
        return final_answer.strip()

    refusal_text = refusal_chain.run({"topic": "this topic"})
    final_refusal = tailor_chain.run({"response": refusal_text})
    return final_refusal.strip()

# Initialize chains
classification_chain = get_classification_chain()
refusal_chain = get_refusal_chain()
tailor_chain = get_tailor_chain()
cleaner_chain = get_cleaner_chain()

wellness_csv = "AIChatbot.csv"
brand_csv = "BrandAI.csv"
wellness_store_dir = "faiss_wellness_store"
brand_store_dir = "faiss_brand_store"

wellness_vectorstore = build_or_load_vectorstore(wellness_csv, wellness_store_dir)
brand_vectorstore = build_or_load_vectorstore(brand_csv, brand_store_dir)

# gemini_llm = LiteLLMModel(model_id="gemini/gemini-pro", api_key=os.environ.get("GEMINI_API_KEY"))
wellness_rag_chain = build_rag_chain( wellness_vectorstore)
brand_rag_chain = build_rag_chain( brand_vectorstore)

# Function to wrap up and run the chain
def run_with_chain(query: str) -> str:
    return run_pipeline(query)