Spaces:
Runtime error
Runtime error
import streamlit as st | |
from utils import final_function | |
qa, memory = final_function() | |
import streamlit as st | |
import io | |
import re | |
import sys | |
from typing import Any, Callable | |
def capture_and_display_output(func: Callable[..., Any], args, **kwargs) -> Any: | |
# Capture the standard output | |
original_stdout = sys.stdout | |
sys.stdout = output_catcher = io.StringIO() | |
# Run the given function and capture its output | |
response = func(args, **kwargs) | |
# Reset the standard output to its original value | |
sys.stdout = original_stdout | |
# Clean the captured output | |
output_text = output_catcher.getvalue() | |
clean_text = re.sub(r"\x1b[.?[@-~]", "", output_text) | |
# Custom CSS for the response box | |
st.markdown(""" | |
<style> | |
.response-value { | |
border: 2px solid #6c757d; | |
border-radius: 5px; | |
padding: 20px; | |
background-color: #f8f9fa; | |
color: #3d3d3d; | |
font-size: 20px; # Change this value to adjust the text size | |
font-family: monospace; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
# Create an expander titled "See Verbose" | |
with st.expander("See Langchain Thought Process"): | |
# Display the cleaned text in Streamlit as code | |
st.code(clean_text) | |
return response | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
def chat_ui(qa): | |
# Accept user input | |
if prompt := st.chat_input( | |
"Ask me questions: How can I retrieve data from Deep Lake in Langchain?" | |
): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
# Load the memory variables, which include the chat history | |
memory_variables = memory.load_memory_variables({}) | |
# Predict the AI's response in the conversation | |
with st.spinner("Searching course material"): | |
response = capture_and_display_output( | |
qa, ({"question": prompt, "chat_history": memory_variables}) | |
) | |
# Display chat response | |
full_response += response["answer"] | |
message_placeholder.markdown(full_response + "▌") | |
message_placeholder.markdown(full_response) | |
#Display top 2 retrieved sources | |
source = response["source_documents"][0].metadata | |
source2 = response["source_documents"][1].metadata | |
with st.expander("See Resources"): | |
st.write(f"Title: {source['title'].split('·')[0].strip()}") | |
st.write(f"Source: {source['source']}") | |
st.write(f"Relevance to Query: {source['relevance_score'] * 100}%") | |
st.write(f"Title: {source2['title'].split('·')[0].strip()}") | |
st.write(f"Source: {source2['source']}") | |
st.write(f"Relevance to Query: {source2['relevance_score'] * 100}%") | |
# Append message to session state | |
st.session_state.messages.append( | |
{"role": "assistant", "content": full_response} | |
) | |
# Run function passing the ConversationalRetrievalChain | |
chat_ui(qa) | |