Spaces:
Sleeping
Sleeping
File size: 2,508 Bytes
e763e8a 0d179e3 11a9727 e763e8a 55c7d01 4e308cb e763e8a 0d179e3 d14928c fed1aac d41ae8b fed1aac bd92b29 fed1aac 44da4e7 1c2b3eb 93d3a3d 8b063ca 3f4c25c 93d3a3d 7bfe84c 9538882 1c2b3eb bd0797d 01e50b7 bd0797d f29db00 1c2b3eb bd0797d 1c2b3eb bd0797d 9098a3a 1c2b3eb b47e796 a148c7b 4e308cb 4e8b18f 6a868af 50d6f71 d933daa b47e796 b6c9ea3 0d179e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
from omegaconf import OmegaConf
from query import VectaraQuery
import os
import gradio as gr
def isTrue(x) -> bool:
if isinstance(x, bool):
return x
return x.strip().lower() == 'true'
corpus_ids = str(os.environ['corpus_ids']).split(',')
cfg = OmegaConf.create({
'customer_id': str(os.environ['customer_id']),
'corpus_ids': corpus_ids,
'api_key': str(os.environ['api_key']),
'title': os.environ['title'],
'description': os.environ['description'],
'source_data_desc': os.environ['source_data_desc'],
'streaming': isTrue(os.environ.get('streaming', False)),
'prompt_name': os.environ.get('prompt_name', None),
'examples': os.environ.get('examples', None)
})
vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)
def respond(message, history):
if cfg.streaming:
# Call stream response and stream output
stream = vq.submit_query_streaming(message)
outputs = ""
for output in stream:
outputs += output
yield outputs
else:
# Call non-stream response and return message output
response = vq.submit_query(message)
yield response
cfg.description = f'''
<table>
<tr>
<td style="width: 33%; vertical-align: bottom;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
<td style="width: 34%; vertical-align: middle;"> <h1>{cfg.title}</h1> </td>
<td style="width: 33%; vertical-align: bottom; text-align: left"> This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a><br>to ask questions about {cfg.source_data_desc}. </td>
</tr>
</table>
<center> <h2>{cfg.description}</h2></center>
'''
css = """
table {
border: none;
width: 100%;
table-layout: fixed;
border-collapse: separate;
}
td {
border: none;
text-align: center;
}
img {
width: 50%;
}
h1 {
font-size: 3em; /* Adjust the size as needed */
}
"""
if cfg.examples:
app_examples = [example.strip() for example in cfg.examples.split(",")]
else:
app_examples = None
demo = gr.ChatInterface(respond, description = cfg.description, css = css,
chatbot = gr.Chatbot(value = [[None, "How may I help you?"]]), examples = app_examples)
if __name__ == "__main__":
demo.launch() |