Spaces:
Build error
Build error
import gradio as gr | |
import os | |
import torch | |
from langchain_community.document_loaders import PyPDFLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain_community.vectorstores import FAISS | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Configuration | |
DOCS_DIR = "business_docs" | |
EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2" | |
MODEL_NAME = "microsoft/phi-2" | |
def initialize_system(): | |
# Document verification | |
if not os.path.exists(DOCS_DIR): | |
raise FileNotFoundError(f"Missing {DOCS_DIR} folder") | |
pdf_files = [os.path.join(DOCS_DIR, f) | |
for f in os.listdir(DOCS_DIR) | |
if f.endswith(".pdf")] | |
if not pdf_files: | |
raise ValueError(f"No PDFs found in {DOCS_DIR}") | |
# Document processing | |
text_splitter = RecursiveCharacterTextSplitter( | |
chunk_size=800, | |
chunk_overlap=100 | |
) | |
texts = [] | |
for pdf in pdf_files: | |
loader = PyPDFLoader(pdf) | |
pages = loader.load_and_split(text_splitter) | |
texts.extend(pages) | |
# Create embeddings | |
embeddings = HuggingFaceEmbeddings( | |
model_name=EMBEDDING_MODEL, | |
model_kwargs={'device': 'cpu'} | |
) | |
# Vector store | |
vector_store = FAISS.from_documents(texts, embeddings) | |
# Model loading | |
tokenizer = AutoTokenizer.from_pretrained( | |
MODEL_NAME, | |
trust_remote_code=True, | |
padding_side="left" | |
) | |
model = AutoModelForCausalLM.from_pretrained( | |
MODEL_NAME, | |
trust_remote_code=True, | |
device_map="auto", | |
load_in_4bit=True, | |
torch_dtype=torch.float16 | |
) | |
return vector_store, model, tokenizer | |
try: | |
vector_store, model, tokenizer = initialize_system() | |
print("System initialized successfully β ") | |
except Exception as e: | |
print(f"Initialization failed β: {str(e)}") | |
raise | |
def generate_response(query): | |
try: | |
# Context retrieval | |
docs = vector_store.similarity_search(query, k=2) | |
context = "\n".join([d.page_content for d in docs]) | |
# Phi-2 optimized prompt | |
prompt = f"""<|system|> | |
You are a customer service bot. Answer only using: | |
{context} | |
- Max 3 sentences | |
- If unsure: "I'll check with the team" | |
</s> | |
<|user|> | |
{query}</s> | |
<|assistant|>""" | |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
outputs = model.generate( | |
**inputs, | |
max_new_tokens=200, | |
temperature=0.1, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return response.split("<|assistant|>")[-1].strip() | |
except Exception as e: | |
return "Please try again later." | |
# Gradio interface | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# Customer Support Chatbot") | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(label="Ask about our services") | |
clear = gr.ClearButton([msg, chatbot]) | |
def respond(message, history): | |
response = generate_response(message) | |
history.append((message, response)) | |
return "", history | |
msg.submit(respond, [msg, chatbot], [msg, chatbot]) | |
demo.launch() |