Spaces:
Build error
Build error
import gradio as gr | |
import os | |
from langchain.document_loaders import PyPDFLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.embeddings import HuggingFaceEmbeddings | |
from langchain.vectorstores import FAISS | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
# Configuration | |
DOCS_DIR = "business_docs" | |
EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2" | |
MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.1" | |
# Initialize components once at startup | |
def initialize_system(): | |
# Load and process PDFs from business_docs folder | |
if not os.path.exists(DOCS_DIR): | |
raise FileNotFoundError(f"Business documents folder '{DOCS_DIR}' not found") | |
pdf_files = [os.path.join(DOCS_DIR, f) for f in os.listdir(DOCS_DIR) if f.endswith(".pdf")] | |
if not pdf_files: | |
raise ValueError(f"No PDF files found in {DOCS_DIR} folder") | |
# Process documents | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=1000, | |
chunk_overlap=200 | |
) | |
texts = [] | |
for pdf in pdf_files: | |
loader = PyPDFLoader(pdf) | |
pages = loader.load_and_split(text_splitter) | |
texts.extend(pages) | |
# Create vector store | |
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL) | |
vector_store = FAISS.from_documents(texts, embeddings) | |
# Load model with quantization for faster inference | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
model = AutoModelForCausalLM.from_pretrained( | |
MODEL_NAME, | |
device_map="auto", | |
load_in_8bit=True | |
) | |
return vector_store, model, tokenizer | |
# Initialize system components | |
try: | |
vector_store, model, tokenizer = initialize_system() | |
print("System initialized successfully with business documents") | |
except Exception as e: | |
print(f"Initialization error: {str(e)}") | |
raise | |
# Response generation with context | |
def generate_response(query): | |
# Retrieve relevant context | |
docs = vector_store.similarity_search(query, k=3) | |
context = "\n".join([doc.page_content for doc in docs]) | |
# Create instruction prompt | |
prompt = f"""<s>[INST] You are a customer support agent. | |
Answer ONLY using information from the provided business documents. | |
If unsure, say "I don't have information about that." | |
Context: {context} | |
Question: {query} [/INST]""" | |
# Generate response | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=500, | |
temperature=0.3, | |
do_sample=True | |
) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True).split("[/INST]")[-1].strip() | |
# Chat interface | |
with gr.Blocks() as demo: | |
gr.Markdown("## Business Support Chatbot\nAsk questions about our services!") | |
chatbot = gr.Chatbot(label="Conversation") | |
msg = gr.Textbox(label="Type your question") | |
clear = gr.Button("Clear History") | |
def respond(message, chat_history): | |
try: | |
response = generate_response(message) | |
except Exception as e: | |
response = "Sorry, I'm having trouble answering right now. Please try again later." | |
chat_history.append((message, response)) | |
return "", chat_history | |
msg.submit(respond, [msg, chatbot], [msg, chatbot]) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
demo.launch() |