tryagain / app.py
random2222's picture
Update app.py
2d8c319 verified
raw
history blame
3.67 kB
import gradio as gr
import os
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
# Configuration
DOCS_DIR = "business_docs"
EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
MODEL_NAME = "microsoft/phi-2"
# Initialize system components
def initialize_system():
# Load and process PDFs
if not os.path.exists(DOCS_DIR):
raise FileNotFoundError(f"'{DOCS_DIR}' folder not found")
pdf_files = [os.path.join(DOCS_DIR, f) for f in os.listdir(DOCS_DIR)
if f.endswith(".pdf")]
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200
)
texts = []
for pdf in pdf_files:
loader = PyPDFLoader(pdf)
pages = loader.load_and_split(text_splitter)
texts.extend(pages)
# Create vector store
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
vector_store = FAISS.from_documents(texts, embeddings)
# Load Phi-2 model with 4-bit quantization
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
device_map="auto",
load_in_4bit=True
)
return vector_store, model, tokenizer
try:
vector_store, model, tokenizer = initialize_system()
print("System ready with business documents loaded")
except Exception as e:
raise RuntimeError(f"Initialization failed: {str(e)}")
# Response generation
def generate_response(query):
# Retrieve relevant context
docs = vector_store.similarity_search(query, k=3)
context = "\n".join([doc.page_content for doc in docs])
# Create custom prompt template
prompt = f"""Instruct: Answer the customer's question using only the provided context.
If you don't know the answer, say 'I need to check with our team about that.'
Context: {context}
Question: {query}
Answer:"""
# Generate response
inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False).to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=300,
temperature=0.2,
repetition_penalty=1.2,
do_sample=True
)
# Decode and clean response
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
answer = full_text.split("Answer:")[-1].strip()
return answer.split("\n\n")[0] # Return first paragraph
# Chat interface
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# Customer Care Assistant")
gr.Markdown("Ask questions about our products/services")
chatbot = gr.Chatbot(height=400)
msg = gr.Textbox(label="Type your question here...")
clear = gr.Button("Clear History")
def respond(message, chat_history):
try:
response = generate_response(message)
if not response:
response = "I need to verify that information. Please contact [email protected]"
except Exception as e:
response = "Apologies, I'm experiencing technical difficulties. Please try again later."
chat_history.append((message, response))
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch(server_name="0.0.0.0", server_port=7860)