tryagain / app.py
random2222's picture
Update app.py
2d88065 verified
raw
history blame
3.75 kB
import gradio as gr
import os
import torch
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from transformers import AutoModelForCausalLM, AutoTokenizer
# Configuration
DOCS_DIR = "business_docs"
EMBEDDING_MODEL = "sentence-transformers/all-MiniLM-L6-v2"
MODEL_NAME = "microsoft/phi-2"
def initialize_system():
# Document processing
if not os.path.exists(DOCS_DIR):
raise FileNotFoundError(f"Missing {DOCS_DIR} folder")
pdf_files = [os.path.join(DOCS_DIR, f)
for f in os.listdir(DOCS_DIR)
if f.endswith(".pdf")]
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000, # Increased chunk size for better context
chunk_overlap=200
)
texts = []
for pdf in pdf_files:
loader = PyPDFLoader(pdf)
pages = loader.load_and_split(text_splitter)
texts.extend(pages)
# Create embeddings
embeddings = HuggingFaceEmbeddings(
model_name=EMBEDDING_MODEL,
model_kwargs={'device': 'cpu'}
)
# Vector store
vector_store = FAISS.from_documents(texts, embeddings)
# Load model with memory optimization
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
padding_side="left"
)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
torch_dtype=torch.float16,
device_map="auto",
low_cpu_mem_usage=True
)
return vector_store, model, tokenizer
try:
vector_store, model, tokenizer = initialize_system()
print("βœ… System initialized successfully")
print(f"Memory usage: {torch.cuda.memory_allocated()/1024**3:.1f}GB") if torch.cuda.is_available() else None
except Exception as e:
print(f"❌ Initialization failed: {str(e)}")
raise
def generate_response(query):
try:
# Context retrieval
docs = vector_store.similarity_search(query, k=3)
context = "\n".join([d.page_content for d in docs])
# Optimized prompt
prompt = f"""<|system|>
You are a customer service expert. Answer using:
{context}
- Be concise (2-3 sentences)
- If information is missing: "Let me check with the team"
</s>
<|user|>{query}</s>
<|assistant|>"""
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
inputs.input_ids,
max_new_tokens=300,
temperature=0.3,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("<|assistant|>")[-1].strip()
except Exception as e:
return "Please try again later."
# Enhanced interface
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# Enterprise Customer Support")
with gr.Row():
chatbot = gr.Chatbot(height=500, label="Conversation")
with gr.Row():
msg = gr.Textbox(placeholder="Ask about our services...", scale=7)
submit_btn = gr.Button("Send", variant="primary", scale=1)
clear = gr.ClearButton([msg, chatbot])
def respond(message, history):
response = generate_response(message)
history.append((message, response))
return "", history
submit_btn.click(respond, [msg, chatbot], [msg, chatbot])
msg.submit(respond, [msg, chatbot], [msg, chatbot])
demo.launch(server_port=7860)