Spaces:
Sleeping
Sleeping
File size: 3,714 Bytes
7f46a81 907ed81 4949582 907ed81 7f46a81 907ed81 7f46a81 907ed81 7f46a81 bf846e9 907ed81 743f11f 907ed81 743f11f 907ed81 7f46a81 48cebc8 7f46a81 907ed81 7f46a81 907ed81 7f46a81 907ed81 7f46a81 907ed81 c7d6750 907ed81 7f46a81 907ed81 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
from omegaconf import OmegaConf
from query import VectaraQuery
import os
import streamlit as st
from PIL import Image
def isTrue(x) -> bool:
if isinstance(x, bool):
return x
return x.strip().lower() == 'true'
def launch_bot():
def generate_response(question):
response = vq.submit_query(question)
return response
def generate_streaming_response(question):
response = vq.submit_query_streaming(question)
return response
def generate_and_display_response(question):
if cfg.streaming:
stream = generate_streaming_response(question)
response = st.write_stream(stream)
else:
with st.spinner("Thinking..."):
response = generate_response(question)
st.write(response)
message = {"role": "assistant", "content": response}
st.session_state.messages.append(message)
def submit_question(question):
st.session_state.messages.append({"role": "user", "content": question})
with st.chat_message("user"):
st.write(question)
generate_and_display_response(question)
if 'cfg' not in st.session_state:
cfg = OmegaConf.create({
'customer_id': str(os.environ['customer_id']),
'corpus_ids': list(str(eval(os.environ['corpus_ids']))),
'api_key': str(os.environ['api_key']),
'title': os.environ['title'],
'description': os.environ['description'],
'source_data_desc': os.environ['source_data_desc'],
'streaming': isTrue(os.environ.get('streaming', False)),
'questions': list(eval(os.environ['questions'])),
'prompt_name': os.environ.get('prompt_name', None)
})
st.session_state.cfg = cfg
st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)
cfg = st.session_state.cfg
vq = st.session_state.vq
st.set_page_config(page_title=cfg.title, layout="wide")
print(cfg)
# left side content
with st.sidebar:
image = Image.open('Vectara-logo.png')
st.markdown(f"## Welcome to {cfg.title}\n\n"
f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
st.markdown("---")
st.markdown(
"## How this works?\n"
"This app was built with [Vectara](https://vectara.com).\n"
"Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
"This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
)
st.markdown("---")
st.image(image, width=250)
st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
for question in cfg.questions:
st.button(question, on_click=lambda q=question: submit_question(q))
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# User-provided prompt
if prompt := st.chat_input():
submit_question(prompt)
if __name__ == "__main__":
launch_bot()
|