File size: 3,080 Bytes
d2f1b7c
b102c35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# streamlit_app.py
    import streamlit as st
    import sys
    import os

    # *** Add these two lines at the very top ***
    from dotenv import load_dotenv
    load_dotenv() # Load variables from .env file

    # Add the directory containing app.py to the Python path
    # This assumes app.py is in the same directory as streamlit_app.py
    sys.path.append(os.path.dirname(os.path.abspath(__file__)))

    # Import your respond function and any necessary global variables from app.py
    # Make sure app.py loads the model, tokenizer, etc. when imported
    try:
        from app import respond, model_id # Import your main function and model_id
        # You might also need to import other things if respond relies on globals directly
        # from app import model, tokenizer, embedder, nlp, data, descriptions, embeddings, ...
        print("Successfully imported respond function from app.py")
    except ImportError as e:
        st.error(f"Error importing core logic from app.py: {e}")
        st.stop() # Stop the app if the core logic can't be loaded

    # Set Streamlit page config
    st.set_page_config(page_title="Business Q&A Assistant")

    st.title(f"Business Q&A Assistant with {model_id}")
    st.write("Ask questions about the business (details from Google Sheet) or general knowledge (via search).")

    # Initialize chat history in Streamlit's session state
    # Session state persists across reruns for a single user session
    if "messages" not in st.session_state:
        st.session_state.messages = []

    # Display chat messages from history on app rerun
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])

    # Accept user input
    if prompt := st.chat_input("Your Question"):
        # Add user message to chat history
        st.session_state.messages.append({"role": "user", "content": prompt})
        # Display user message in chat message container
        with st.chat_message("user"):
            st.markdown(prompt)

        # Get the current chat history in the format your respond function expects
        # Gradio's history is [(user, bot), (user, bot), ...]
        # Streamlit's session state is a list of dicts [{"role": "user", "content": "..."}]
        # We need to convert Streamlit's history format to Gradio's format for your respond function
        gradio_chat_history = []
        # Start from the second message if the first was from the system/initial state
        # Or just iterate through pairs, skipping the latest user prompt for history pass
        # The respond function expects history *before* the current turn
        history_for_respond = []
        # Iterate through messages, excluding the very last user prompt which is the current input
        for i in range(len(st.session_state.messages) - 1):
            if st.session_state.messages[i]["role"] == "user" and st.session_state.messages[i+1]["role"] == "assistant":
                history_for_respond.append((st.session_state.messages[i]["content"], st.st