Futuresony commited on
Commit
b102c35
·
verified ·
1 Parent(s): f348ee7

Update streamlit_app.py

Browse files
Files changed (1) hide show
  1. streamlit_app.py +60 -91
streamlit_app.py CHANGED
@@ -1,92 +1,61 @@
1
  # streamlit_app.py
2
- import streamlit as st
3
- import sys
4
- import os
5
-
6
- # Add the directory containing app.py to the Python path
7
- # This assumes app.py is in the same directory as streamlit_app.py
8
- sys.path.append(os.path.dirname(os.path.abspath(__file__)))
9
-
10
- # Import your respond function and any necessary global variables from app.py
11
- # Make sure app.py loads the model, tokenizer, etc. when imported
12
- try:
13
- from app import respond, model_id # Import your main function and model_id
14
- # You might also need to import other things if respond relies on globals directly
15
- # from app import model, tokenizer, embedder, nlp, data, descriptions, embeddings, ...
16
- print("Successfully imported respond function from app.py")
17
- except ImportError as e:
18
- st.error(f"Error importing core logic from app.py: {e}")
19
- st.stop() # Stop the app if the core logic can't be loaded
20
-
21
- # Set Streamlit page config
22
- st.set_page_config(page_title="Business Q&A Assistant")
23
-
24
- st.title(f"Business Q&A Assistant with {model_id}")
25
- st.write("Ask questions about the business (details from Google Sheet) or general knowledge (via search).")
26
-
27
- # Initialize chat history in Streamlit's session state
28
- # Session state persists across reruns for a single user session
29
- if "messages" not in st.session_state:
30
- st.session_state.messages = []
31
-
32
- # Display chat messages from history on app rerun
33
- for message in st.session_state.messages:
34
- with st.chat_message(message["role"]):
35
- st.markdown(message["content"])
36
-
37
- # Accept user input
38
- if prompt := st.chat_input("Your Question"):
39
- # Add user message to chat history
40
- st.session_state.messages.append({"role": "user", "content": prompt})
41
- # Display user message in chat message container
42
- with st.chat_message("user"):
43
- st.markdown(prompt)
44
-
45
- # Get the current chat history in the format your respond function expects
46
- # Gradio's history is [(user, bot), (user, bot), ...]
47
- # Streamlit's session state is a list of dicts [{"role": "user", "content": "..."}]
48
- # We need to convert Streamlit's history format to Gradio's format for your respond function
49
- history_for_respond = []
50
- # Iterate through messages, excluding the very last user prompt which is the current input
51
- for i in range(len(st.session_state.messages) - 1):
52
- if st.session_state.messages[i]["role"] == "user" and st.session_state.messages[i+1]["role"] == "assistant":
53
- history_for_respond.append((st.session_state.messages[i]["content"], st.session_state.messages[i+1]["content"]))
54
-
55
- # Call your respond function
56
- with st.spinner("Thinking..."):
57
- # Your respond function returns ("", updated_chat_history)
58
- # We only need the updated chat history part
59
- # We also need to pass the current user prompt as the first argument
60
- # Ensure we are unpacking the result correctly into two variables
61
- response_tuple = respond(prompt, history_for_respond)
62
- # Check if the returned value is a tuple with at least two elements
63
- if isinstance(response_tuple, tuple) and len(response_tuple) >= 2:
64
- _, updated_gradio_history = response_tuple
65
- else:
66
- # Handle the case where the function did not return the expected tuple
67
- print(f"Warning: respond function returned unexpected value: {response_tuple}")
68
- updated_gradio_history = history_for_respond + [(prompt, "Sorry, I couldn't get a valid response.")]
69
-
70
-
71
- # The respond function modifies and returns the history in Gradio format.
72
- # The last entry in updated_gradio_history should be the current turn's (user_prompt, bot_response).
73
- # We need to extract the bot_response part and add it to Streamlit's session state.
74
-
75
- if updated_gradio_history and updated_gradio_history[-1][0] == prompt:
76
- bot_response = updated_gradio_history[-1][1]
77
- else:
78
- # Fallback if the history structure is unexpected
79
- bot_response = "Sorry, I couldn't get a response from the model."
80
- print("Warning: respond function returned history in an unexpected format.")
81
-
82
-
83
- # Add assistant response to chat history
84
- st.session_state.messages.append({"role": "assistant", "content": bot_response})
85
- # Display assistant response in chat message container
86
- with st.chat_message("assistant"):
87
- st.markdown(bot_response)
88
-
89
- # You might want a clear history button similar to Gradio's
90
- if st.button("Clear Chat History"):
91
- st.session_state.messages = []
92
- st.experimental_rerun() # Rerun the app to clear the display
 
1
  # streamlit_app.py
2
+ import streamlit as st
3
+ import sys
4
+ import os
5
+
6
+ # *** Add these two lines at the very top ***
7
+ from dotenv import load_dotenv
8
+ load_dotenv() # Load variables from .env file
9
+
10
+ # Add the directory containing app.py to the Python path
11
+ # This assumes app.py is in the same directory as streamlit_app.py
12
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
13
+
14
+ # Import your respond function and any necessary global variables from app.py
15
+ # Make sure app.py loads the model, tokenizer, etc. when imported
16
+ try:
17
+ from app import respond, model_id # Import your main function and model_id
18
+ # You might also need to import other things if respond relies on globals directly
19
+ # from app import model, tokenizer, embedder, nlp, data, descriptions, embeddings, ...
20
+ print("Successfully imported respond function from app.py")
21
+ except ImportError as e:
22
+ st.error(f"Error importing core logic from app.py: {e}")
23
+ st.stop() # Stop the app if the core logic can't be loaded
24
+
25
+ # Set Streamlit page config
26
+ st.set_page_config(page_title="Business Q&A Assistant")
27
+
28
+ st.title(f"Business Q&A Assistant with {model_id}")
29
+ st.write("Ask questions about the business (details from Google Sheet) or general knowledge (via search).")
30
+
31
+ # Initialize chat history in Streamlit's session state
32
+ # Session state persists across reruns for a single user session
33
+ if "messages" not in st.session_state:
34
+ st.session_state.messages = []
35
+
36
+ # Display chat messages from history on app rerun
37
+ for message in st.session_state.messages:
38
+ with st.chat_message(message["role"]):
39
+ st.markdown(message["content"])
40
+
41
+ # Accept user input
42
+ if prompt := st.chat_input("Your Question"):
43
+ # Add user message to chat history
44
+ st.session_state.messages.append({"role": "user", "content": prompt})
45
+ # Display user message in chat message container
46
+ with st.chat_message("user"):
47
+ st.markdown(prompt)
48
+
49
+ # Get the current chat history in the format your respond function expects
50
+ # Gradio's history is [(user, bot), (user, bot), ...]
51
+ # Streamlit's session state is a list of dicts [{"role": "user", "content": "..."}]
52
+ # We need to convert Streamlit's history format to Gradio's format for your respond function
53
+ gradio_chat_history = []
54
+ # Start from the second message if the first was from the system/initial state
55
+ # Or just iterate through pairs, skipping the latest user prompt for history pass
56
+ # The respond function expects history *before* the current turn
57
+ history_for_respond = []
58
+ # Iterate through messages, excluding the very last user prompt which is the current input
59
+ for i in range(len(st.session_state.messages) - 1):
60
+ if st.session_state.messages[i]["role"] == "user" and st.session_state.messages[i+1]["role"] == "assistant":
61
+ history_for_respond.append((st.session_state.messages[i]["content"], st.st