File size: 3,430 Bytes
0e5b4a4
2e62dd1
 
 
 
 
 
0e5b4a4
2e62dd1
 
 
 
0e5b4a4
2e62dd1
 
 
 
 
 
 
 
 
0e5b4a4
2e62dd1
 
 
 
 
 
 
 
 
 
 
0e5b4a4
2e62dd1
 
 
 
 
 
 
 
 
 
 
 
 
0e5b4a4
2e62dd1
 
 
 
 
 
 
0e5b4a4
2e62dd1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e5b4a4
2e62dd1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e5b4a4
2e62dd1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
import os
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Configuration
DOCS_DIR = "business_docs"
EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.1"

# Initialize components once at startup
def initialize_system():
    # Load and process PDFs from business_docs folder
    if not os.path.exists(DOCS_DIR):
        raise FileNotFoundError(f"Business documents folder '{DOCS_DIR}' not found")
        
    pdf_files = [os.path.join(DOCS_DIR, f) for f in os.listdir(DOCS_DIR) if f.endswith(".pdf")]
    if not pdf_files:
        raise ValueError(f"No PDF files found in {DOCS_DIR} folder")

    # Process documents
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1000,
        chunk_overlap=200
    )
    
    texts = []
    for pdf in pdf_files:
        loader = PyPDFLoader(pdf)
        pages = loader.load_and_split(text_splitter)
        texts.extend(pages)

    # Create vector store
    embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
    vector_store = FAISS.from_documents(texts, embeddings)
    
    # Load model with quantization for faster inference
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_NAME,
        device_map="auto",
        load_in_8bit=True
    )
    
    return vector_store, model, tokenizer

# Initialize system components
try:
    vector_store, model, tokenizer = initialize_system()
    print("System initialized successfully with business documents")
except Exception as e:
    print(f"Initialization error: {str(e)}")
    raise

# Response generation with context
def generate_response(query):
    # Retrieve relevant context
    docs = vector_store.similarity_search(query, k=3)
    context = "\n".join([doc.page_content for doc in docs])
    
    # Create instruction prompt
    prompt = f"""<s>[INST] You are a customer support agent. 
    Answer ONLY using information from the provided business documents.
    If unsure, say "I don't have information about that."
    
    Context: {context}
    Question: {query} [/INST]"""
    
    # Generate response
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    outputs = model.generate(
        **inputs,
        max_new_tokens=500,
        temperature=0.3,
        do_sample=True
    )
    return tokenizer.decode(outputs[0], skip_special_tokens=True).split("[/INST]")[-1].strip()

# Chat interface
with gr.Blocks() as demo:
    gr.Markdown("## Business Support Chatbot\nAsk questions about our services!")
    
    chatbot = gr.Chatbot(label="Conversation")
    msg = gr.Textbox(label="Type your question")
    clear = gr.Button("Clear History")
    
    def respond(message, chat_history):
        try:
            response = generate_response(message)
        except Exception as e:
            response = "Sorry, I'm having trouble answering right now. Please try again later."
        chat_history.append((message, response))
        return "", chat_history
    
    msg.submit(respond, [msg, chatbot], [msg, chatbot])
    clear.click(lambda: None, None, chatbot, queue=False)

demo.launch()