File size: 5,860 Bytes
e763e8a
 
 
0d179e3
 
11a9727
 
 
 
 
156af69
e763e8a
156af69
e763e8a
 
 
 
55c7d01
4e308cb
e763e8a
0d179e3
156af69
fed1aac
 
 
 
 
 
bf37fbd
fed1aac
 
 
156af69
 
16de684
 
 
 
 
 
88d9519
bf37fbd
 
 
 
 
 
 
 
 
 
 
 
 
 
9538882
5df36a8
bf37fbd
 
 
 
b47e796
a148c7b
4e308cb
4e8b18f
6a868af
 
50d6f71
bf37fbd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16de684
0d179e3
bf37fbd
ea256e3
bf37fbd
 
 
 
 
 
 
 
9c59395
bf37fbd
 
9c59395
ea256e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
from omegaconf import OmegaConf
from query import VectaraQuery
import os
import gradio as gr

def isTrue(x) -> bool:
    if isinstance(x, bool):
        return x
    return x.strip().lower() == 'true'

corpus_keys = str(os.environ['corpus_keys']).split(',')
cfg = OmegaConf.create({
    'corpus_keys': corpus_keys,
    'api_key': str(os.environ['api_key']),
    'title': os.environ['title'],
    'source_data_desc': os.environ['source_data_desc'],
    'streaming': isTrue(os.environ.get('streaming', False)),
    'prompt_name': os.environ.get('prompt_name', None),
    'examples': os.environ.get('examples', None)
})

vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)

def respond(message, history):
    if cfg.streaming:
        # Call stream response and stream output
        stream = vq.submit_query_streaming(message)
        for output in stream:
            yield output
    else:
        # Call non-stream response and return message output
        response = vq.submit_query(message)
        yield response

def vote(data: gr.LikeData):
    if data.liked:
        print("Received Thumbs up")
    else:
        print("Received Thumbs down")

heading_html = f'''
<table>
    <tr>
        <td style="width: 80%; text-align: left; vertical-align: middle;">
            <h1>Vectara AI Assistant: {cfg.title}</h1>
        </td>
        <td style="width: 20%; text-align: right; vertical-align: middle;">
            <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true">
        </td>
    </tr>
    <tr>
        <td colspan="2" style="font-size: 16px;">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td>
    </tr>
</table>
'''

bot_css = """
table { border: none; width: 100%; table-layout: fixed; border-collapse: separate;}
td { vertical-align: middle; border: none;}
img { width: 75%;}
h1 { font-size: 2em; /* Adjust the size as needed */}
"""

if cfg.examples:
    app_examples = [example.strip() for example in cfg.examples.split(",")]
else:
    app_examples = None

with gr.Blocks(css=bot_css) as demo:
    gr.HTML(heading_html)
    chatbot = gr.Chatbot(value=[[None, "How may I help you?"]])
    msg = gr.Textbox(label="Message")
    clear = gr.Button("Clear")

    def user(message, history):
        return "", history + [[message, None]]

    def bot(history):
        message = history[-1][0]
        bot_message = respond(message, history)

        if cfg.streaming:
            full_response = ""
            for chunk in bot_message:
                full_response += chunk
                history[-1][1] = full_response
                yield history
        else:
            history[-1][1] = next(bot_message)
            yield history

    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
        bot, chatbot, chatbot, api_name="bot_response"
    )
    chatbot.like(vote, None, None)

    clear.click(lambda: None, None, chatbot, queue=False)

    if app_examples:
        gr.Examples(
            app_examples,
            inputs=msg,
            outputs=chatbot,
            fn=user,
            cache_examples=False
        )

if __name__ == "__main__":
    demo.launch()

    

# from omegaconf import OmegaConf
# from query import VectaraQuery
# import os
# import gradio as gr

# def isTrue(x) -> bool:
#     if isinstance(x, bool):
#         return x
#     return x.strip().lower() == 'true'

# corpus_keys = str(os.environ['corpus_keys']).split(',')
# cfg = OmegaConf.create({
#     'corpus_keys': corpus_keys,
#     'api_key': str(os.environ['api_key']),
#     'title': os.environ['title'],
#     'source_data_desc': os.environ['source_data_desc'],
#     'streaming': isTrue(os.environ.get('streaming', False)),
#     'prompt_name': os.environ.get('prompt_name', None),
#     'examples': os.environ.get('examples', None)
# })

# vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)

# def respond(message, history):
#     if cfg.streaming:
#         # Call stream response and stream output
#         stream = vq.submit_query_streaming(message)
        
        
#         outputs = ""
#         for output in stream:
#             outputs += output
#             yield outputs
#     else:
#         # Call non-stream response and return message output
#         response = vq.submit_query(message)
#         yield response

# heading_html = f'''
#                 <table>
#                   <tr>
#                       <td style="width: 80%; text-align: left; vertical-align: middle;"> <h1>Vectara AI Assistant: {cfg.title}</h1> </td>
#                       <td style="width: 20%; text-align: right; vertical-align: middle;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
#                   </tr>
#                   <tr>
#                       <td colspan="2" style="font-size: 16px;">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td>
#                   </tr>
#                 </table>
#                 '''

# bot_css = """
# table {
#   border: none;
#   width: 100%;
#   table-layout: fixed;
#   border-collapse: separate;
# }
# td {
#   vertical-align: middle;
#   border: none;
# }
# img {
#   width: 75%;
# }
# h1 {
#   font-size: 2em; /* Adjust the size as needed */
# }
# """

# if cfg.examples:
#     app_examples = [example.strip() for example in cfg.examples.split(",")]
# else:
#     app_examples = None

# demo = gr.ChatInterface(respond, description = heading_html, css = bot_css,
#                         chatbot = gr.Chatbot(value = [[None, "How may I help you?"]]), examples = app_examples, cache_examples = False)

# if __name__ == "__main__":
#     demo.launch()