Spaces:
Sleeping
Sleeping
import os | |
import streamlit as st | |
import PyPDF2 | |
from pdfminer.high_level import extract_text | |
from transformers import AutoTokenizer | |
from sentence_transformers import SentenceTransformer | |
import faiss | |
import numpy as np | |
from groq import Groq | |
# --- Helper Functions --- | |
def extract_text_from_pdf(pdf_path): | |
try: | |
text = "" | |
with open(pdf_path, 'rb') as file: | |
pdf_reader = PyPDF2.PdfReader(file) | |
for page_num in range(len(pdf_reader.pages)): | |
page = pdf_reader.pages[page_num] | |
page_text = page.extract_text() | |
if page_text: | |
text += page_text | |
return text | |
except Exception as e: | |
st.warning(f"PyPDF2 failed with error: {e}. Trying pdfminer.six...") | |
return extract_text(pdf_path) | |
def chunk_text_with_tokenizer(text, tokenizer, chunk_size=150, chunk_overlap=30): | |
tokens = tokenizer.tokenize(text) | |
chunks = [] | |
start = 0 | |
while start < len(tokens): | |
end = min(start + chunk_size, len(tokens)) | |
chunk_tokens = tokens[start:end] | |
chunk_text = tokenizer.convert_tokens_to_string(chunk_tokens) | |
chunks.append(chunk_text) | |
start += chunk_size - chunk_overlap | |
return chunks | |
def retrieve_relevant_chunks(question, index, embeddings_model, text_chunks, k=3): | |
question_embedding = embeddings_model.encode([question])[0] | |
D, I = index.search(np.array([question_embedding]), k) | |
relevant_chunks = [text_chunks[i] for i in I[0]] | |
return relevant_chunks | |
def generate_answer_with_groq(question, context): | |
prompt = f"Based on the following context, answer the question: '{question}'\n\nContext:\n{context}" | |
model_name = "llama-3.3-70b-versatile" # Adjust model if needed | |
try: | |
groq_client = Groq(api_key=os.environ.get("GROQ_API_KEY")) | |
response = groq_client.chat.completions.create( | |
model=model_name, | |
messages=[{"role": "user", "content": prompt}] | |
) | |
return response.choices[0].message.content | |
except Exception as e: | |
st.error(f"Error generating answer with Groq API: {e}") | |
return "I'm sorry, I couldn't generate an answer at this time." | |
# --- Streamlit UI & Logic --- | |
st.set_page_config(page_title="SMEHelpBot π€", layout="wide") | |
st.title("π€ SMEHelpBot β Your AI Assistant for Small Businesses") | |
# GROQ API key check | |
GROQ_API_KEY = st.secrets.get("GROQ_API_KEY") or os.getenv("GROQ_API_KEY") | |
if not GROQ_API_KEY: | |
st.error("β Please set your GROQ_API_KEY in environment or .streamlit/secrets.toml") | |
st.stop() | |
os.environ["GROQ_API_KEY"] = GROQ_API_KEY | |
# File uploader | |
uploaded_pdf = st.file_uploader("π Upload PDF document(s) for SME knowledge base", type=["pdf"], accept_multiple_files=False) | |
# Text input for question | |
user_question = st.text_input("π¬ Ask your question about SME documents:") | |
# Button to trigger processing | |
if st.button("Get Answer") or (user_question and uploaded_pdf): | |
if not uploaded_pdf: | |
st.warning("Please upload a PDF file first.") | |
elif not user_question: | |
st.warning("Please enter a question.") | |
else: | |
with st.spinner("Processing PDF and generating answer..."): | |
# Save uploaded file temporarily for PyPDF2/pdfminer | |
temp_path = f"/tmp/{uploaded_pdf.name}" | |
with open(temp_path, "wb") as f: | |
f.write(uploaded_pdf.getbuffer()) | |
# Extract text | |
pdf_text = extract_text_from_pdf(temp_path) | |
# Tokenizer + Chunk | |
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased') | |
text_chunks = chunk_text_with_tokenizer(pdf_text, tokenizer) | |
# Embeddings | |
embedding_model = SentenceTransformer('all-mpnet-base-v2') | |
all_embeddings = embedding_model.encode(text_chunks) if text_chunks else [] | |
if not all_embeddings: | |
st.error("No text chunks found to create embeddings.") | |
else: | |
# Create FAISS index | |
embedding_dim = all_embeddings[0].shape[0] | |
index = faiss.IndexFlatL2(embedding_dim) | |
index.add(np.array(all_embeddings)) | |
# Retrieve relevant chunks | |
relevant_chunks = retrieve_relevant_chunks(user_question, index, embedding_model, text_chunks) | |
context = "\n\n".join(relevant_chunks) | |
# Generate answer with Groq | |
answer = generate_answer_with_groq(user_question, context) | |
# Display outputs | |
st.markdown("### Extracted Text Snippet:") | |
st.write(pdf_text[:500] + "...") | |
st.markdown("### Sample Text Chunks:") | |
for i, chunk in enumerate(text_chunks[:3]): | |
st.write(f"Chunk {i+1}: {chunk[:200]}...") | |
st.markdown("### Answer:") | |
st.success(answer) | |