File size: 3,958 Bytes
623e43b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29fca42
 
 
 
623e43b
29fca42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import streamlit as st
import pandas as pd
import tempfile
import os

from langchain.document_loaders import DataFrameLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain import HuggingFacePipeline
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM

def preprocess_excel(file_path: str) -> pd.DataFrame:
    df_raw = pd.read_excel(file_path, sheet_name='Data Base', header=None)
    df = df_raw.iloc[4:].copy()
    df.columns = df.iloc[0]
    df = df[1:]
    df.dropna(how='all', inplace=True)
    df.dropna(axis=1, how='all', inplace=True)
    df.reset_index(drop=True, inplace=True)
    return df

def build_vectorstore_from_dataframe(df: pd.DataFrame):
    df.fillna("", inplace=True)
    df['combined_text'] = df.apply(lambda row: ' | '.join([str(cell) for cell in row]), axis=1)

    docs_loader = DataFrameLoader(df[['combined_text']], page_content_column='combined_text')
    documents = docs_loader.load()

    splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
    split_docs = splitter.split_documents(documents)

    embeddings = HuggingFaceEmbeddings(
        model_name="sentence-transformers/all-MiniLM-l6-v2",
        model_kwargs={"device": "cpu"},
        encode_kwargs={"normalize_embeddings": False}
    )
    vectorstore = FAISS.from_documents(split_docs, embeddings)
    return vectorstore

def create_qa_pipeline(vectorstore):
    model_id = "google/flan-t5-base"
    tokenizer = AutoTokenizer.from_pretrained(model_id)
    model = AutoModelForSeq2SeqLM.from_pretrained(model_id)

    gen_pipeline = pipeline("text2text-generation", model=model, tokenizer=tokenizer, max_length=512)
    llm = HuggingFacePipeline(pipeline=gen_pipeline)

    retriever = vectorstore.as_retriever()
    qa = RetrievalQA.from_chain_type(llm=llm, retriever=retriever, chain_type="stuff", return_source_documents=False)
    return qa

st.set_page_config(page_title="Excel-Aware RAG Chatbot", layout="wide")
st.title("πŸ“Š Excel-Aware RAG Chatbot (Professional QA)")

with st.sidebar:
    uploaded_file = st.file_uploader("Upload your Excel file (.xlsx or .xlsm with 'Data Base' sheet)", type=["xlsx", "xlsm"])

if uploaded_file is not None:
    with st.spinner("Processing and indexing your Excel sheet..."):
        with tempfile.NamedTemporaryFile(delete=False, suffix=".xlsm") as tmp_file:
            tmp_file.write(uploaded_file.read())
            tmp_path = tmp_file.name

        try:
            cleaned_df = preprocess_excel(tmp_path)
            vectorstore = build_vectorstore_from_dataframe(cleaned_df)
            qa = create_qa_pipeline(vectorstore)
            st.success("βœ… File processed and chatbot ready! Ask your questions below.")

            if "chat_history" not in st.session_state:
                st.session_state.chat_history = []

            with st.chat_message("assistant"):
                st.markdown("How can I help you with the inspection data?")

            user_prompt = st.chat_input("Ask a question like 'How many backlog items are marked Yes?' or 'List overdue inspections'.")

            if user_prompt:
                st.chat_message("user").markdown(user_prompt)
                with st.chat_message("assistant"):
                    with st.spinner("Thinking..."):
                        try:
                            answer = qa.run(user_prompt)
                            st.markdown(f"**Answer:** {answer}")
                            st.session_state.chat_history.append((user_prompt, answer))
                        except Exception as e:
                            st.error(f"Error: {e}")
        except Exception as e:
            st.error(f"Error processing file: {e}")
        finally:
            os.remove(tmp_path)
else:
    st.info("Upload a file to get started.")