File size: 2,375 Bytes
2eeebbc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import os
import json
import streamlit as st
from ind_checklist_stlit import load_preprocessed_data, init_vector_store, create_rag_chain

# Prevent Streamlit from auto-reloading on file changes
os.environ["STREAMLIT_WATCHER_TYPE"] = "none"

# Define the preprocessed file path
PREPROCESSED_FILE = "preprocessed_docs.json"

# Caching function to prevent redundant RAG processing
@st.cache_data
def cached_response(question: str):
    """Retrieve cached response if available, otherwise compute response."""
    return st.session_state.rag_chain.invoke({"question": question})["response"]

def main():
    st.title("Appian IND Application Assistant")
    st.markdown("Chat about Investigational New Drug Applications")

    # Button to clear chat history
    if st.button("Clear Chat History"):
        st.session_state.messages = []
        st.rerun()

    # Initialize session state
    if "messages" not in st.session_state:
        st.session_state.messages = []

    # Load preprocessed data and initialize the RAG chain
    if "rag_chain" not in st.session_state:
        if not os.path.exists(PREPROCESSED_FILE):
            st.error(f"❌ Preprocessed file '{PREPROCESSED_FILE}' not found. Please run preprocessing first.")
            return  # Stop execution if preprocessed data is missing

        with st.spinner("πŸ”„ Initializing knowledge base..."):
            documents = load_preprocessed_data(PREPROCESSED_FILE)
            vectorstore = init_vector_store(documents)
            st.session_state.rag_chain = create_rag_chain(vectorstore.as_retriever())

    # Display chat history
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    # Chat input and response handling
    if prompt := st.chat_input("Ask about IND requirements"):
        st.session_state.messages.append({"role": "user", "content": prompt})

        # Display user message
        with st.chat_message("user"):
            st.markdown(prompt)

        # Generate response (cached if already asked before)
        with st.chat_message("assistant"):
            response = cached_response(prompt)
            st.markdown(response)

        # Store bot response in chat history
        st.session_state.messages.append({"role": "assistant", "content": response})

if __name__ == "__main__":
    main()