File size: 2,793 Bytes
e763e8a
 
 
0d179e3
 
11a9727
 
 
 
 
e763e8a
 
 
 
 
 
 
 
 
55c7d01
4e308cb
e763e8a
0d179e3
d14928c
fed1aac
 
 
 
 
d41ae8b
 
fed1aac
 
 
 
 
 
 
bd92b29
fed1aac
44da4e7
1c2b3eb
93d3a3d
 
bd0797d
f3ae818
bd0797d
93d3a3d
 
44da4e7
9538882
 
1c2b3eb
bd0797d
 
 
1c2b3eb
bd0797d
1c2b3eb
bd0797d
 
 
 
8fc43f7
1c2b3eb
 
93d3a3d
 
 
 
 
 
 
 
 
a148c7b
4e308cb
4e8b18f
b279c78
4e8b18f
50d6f71
d933daa
1c2b3eb
b6c9ea3
0d179e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from omegaconf import OmegaConf
from query import VectaraQuery
import os
import gradio as gr

def isTrue(x) -> bool:
    if isinstance(x, bool):
        return x
    return x.strip().lower() == 'true'

corpus_ids = str(os.environ['corpus_ids']).split(',')
cfg = OmegaConf.create({
    'customer_id': str(os.environ['customer_id']),
    'corpus_ids': corpus_ids,
    'api_key': str(os.environ['api_key']),
    'title': os.environ['title'],
    'description': os.environ['description'],
    'source_data_desc': os.environ['source_data_desc'],
    'streaming': isTrue(os.environ.get('streaming', False)),
    'prompt_name': os.environ.get('prompt_name', None),
    'examples': os.environ.get('examples', None)
})

vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)

def respond(message, history):
    if cfg.streaming:
        # Call stream response and stream output
        stream = vq.submit_query_streaming(message)
        
        
        outputs = ""
        for output in stream:
            outputs += output
            yield outputs
    else:
        # Call non-stream response and return message output
        response = vq.submit_query(message)
        yield response


cfg.description = f'''
                <table>
                  <tr>
                    <td style="width: 33%;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
                    <td style="width: 34%;"> <h1>{cfg.title}</h1> </td>
                    <td style="width: 33%;"> <p>This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}<p> </td>
                  </tr>
                </table>
                <center> <h2>{cfg.description}</h2></center>
                '''

css = """
table {
  width: 100%;
  table-layout: fixed;
}
td {
  border: none;
  text-align: center;
  vertical-align: middle;
}
img {
  width: 40%;
}
"""                  

# cfg.title = f'''<center> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true" width="200px" height="40px">
#                 <h1>{cfg.title}</h1> </center>
#                 '''

# cfg.description = f'''<center> <h2>{cfg.description}</h2>
#                       <br>
#                       This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}</center>
#                       '''

if cfg.examples:
    app_examples = [example.strip() for example in cfg.examples.split(",")]
else:
    app_examples = None

demo = gr.ChatInterface(respond, description = cfg.description, css = css,
                        chatbot = gr.Chatbot(value = [[None, "How may I help you?"]], scale=2), examples = app_examples)


if __name__ == "__main__":
    demo.launch()