Update app.py
Browse files
app.py
CHANGED
@@ -23,6 +23,8 @@ MODELS = [
|
|
23 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
24 |
"mistralai/Mistral-Nemo-Instruct-2407",
|
25 |
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
|
|
|
26 |
"meta-llama/Meta-Llama-3.1-70B-Instruct"
|
27 |
]
|
28 |
|
@@ -64,22 +66,11 @@ def chatbot_interface(message, history, model, temperature, num_calls, use_embed
|
|
64 |
for response in respond(message, history, model, temperature, num_calls, use_embeddings, system_prompt):
|
65 |
history[-1] = (message, response)
|
66 |
yield history
|
67 |
-
except gr.CancelledError:
|
68 |
-
yield history
|
69 |
except Exception as e:
|
70 |
logging.error(f"Unexpected error in chatbot_interface: {str(e)}")
|
71 |
history[-1] = (message, f"An unexpected error occurred: {str(e)}")
|
72 |
yield history
|
73 |
|
74 |
-
def retry_last_response(history, model, temperature, num_calls, use_embeddings, system_prompt):
|
75 |
-
if not history:
|
76 |
-
return history
|
77 |
-
|
78 |
-
last_user_msg = history[-1][0]
|
79 |
-
history = history[:-1] # Remove the last response
|
80 |
-
|
81 |
-
return chatbot_interface(last_user_msg, history, model, temperature, num_calls, use_embeddings, system_prompt)
|
82 |
-
|
83 |
def respond(message, history, model, temperature, num_calls, use_embeddings, system_prompt):
|
84 |
logging.info(f"User Query: {message}")
|
85 |
logging.info(f"Model Used: {model}")
|
@@ -88,7 +79,6 @@ def respond(message, history, model, temperature, num_calls, use_embeddings, sys
|
|
88 |
try:
|
89 |
for main_content, sources in get_response_with_search(message, model, num_calls, temperature, use_embeddings, system_prompt):
|
90 |
response = f"{main_content}\n\n{sources}"
|
91 |
-
first_line = response.split('\n')[0] if response else ''
|
92 |
yield response
|
93 |
except Exception as e:
|
94 |
logging.error(f"Error with {model}: {str(e)}")
|
@@ -194,65 +184,52 @@ def initial_conversation():
|
|
194 |
"To get started, ask me a question!")
|
195 |
]
|
196 |
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
show_copy_button=True,
|
220 |
likeable=True,
|
221 |
layout="bubble",
|
222 |
height=400,
|
223 |
value=initial_conversation()
|
224 |
)
|
225 |
-
|
226 |
-
with gr.Row():
|
227 |
-
msg = gr.Textbox(placeholder="Ask a question", container=False, scale=7)
|
228 |
-
submit = gr.Button("Submit")
|
229 |
-
|
230 |
-
with gr.Accordion("Advanced Options", open=False):
|
231 |
-
model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2])
|
232 |
-
temperature = gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature")
|
233 |
-
num_calls = gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls")
|
234 |
-
use_embeddings = gr.Checkbox(label="Use Embeddings", value=True)
|
235 |
-
system_prompt = gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=5)
|
236 |
-
|
237 |
-
clear = gr.Button("Clear")
|
238 |
-
retry = gr.Button("Retry Last Response")
|
239 |
-
|
240 |
-
# Set up event handlers
|
241 |
-
submit.click(chatbot_interface, [msg, chatbot, model, temperature, num_calls, use_embeddings, system_prompt], chatbot)
|
242 |
-
msg.submit(chatbot_interface, [msg, chatbot, model, temperature, num_calls, use_embeddings, system_prompt], chatbot)
|
243 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
244 |
-
retry.click(retry_last_response, [chatbot, model, temperature, num_calls, use_embeddings, system_prompt], chatbot)
|
245 |
-
|
246 |
-
chatbot.like(vote, None, None)
|
247 |
-
|
248 |
-
gr.Examples(
|
249 |
-
examples=[
|
250 |
-
["What are the latest developments in artificial intelligence?"],
|
251 |
-
["Can you explain the basics of quantum computing?"],
|
252 |
-
["What are the current global economic trends?"]
|
253 |
-
],
|
254 |
-
inputs=msg
|
255 |
-
)
|
256 |
|
257 |
if __name__ == "__main__":
|
258 |
demo.launch(share=True)
|
|
|
23 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
24 |
"mistralai/Mistral-Nemo-Instruct-2407",
|
25 |
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
26 |
+
"meta-llama/Meta-Llama-3.1-70B-Instruct",
|
27 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
28 |
"meta-llama/Meta-Llama-3.1-70B-Instruct"
|
29 |
]
|
30 |
|
|
|
66 |
for response in respond(message, history, model, temperature, num_calls, use_embeddings, system_prompt):
|
67 |
history[-1] = (message, response)
|
68 |
yield history
|
|
|
|
|
69 |
except Exception as e:
|
70 |
logging.error(f"Unexpected error in chatbot_interface: {str(e)}")
|
71 |
history[-1] = (message, f"An unexpected error occurred: {str(e)}")
|
72 |
yield history
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
def respond(message, history, model, temperature, num_calls, use_embeddings, system_prompt):
|
75 |
logging.info(f"User Query: {message}")
|
76 |
logging.info(f"Model Used: {model}")
|
|
|
79 |
try:
|
80 |
for main_content, sources in get_response_with_search(message, model, num_calls, temperature, use_embeddings, system_prompt):
|
81 |
response = f"{main_content}\n\n{sources}"
|
|
|
82 |
yield response
|
83 |
except Exception as e:
|
84 |
logging.error(f"Error with {model}: {str(e)}")
|
|
|
184 |
"To get started, ask me a question!")
|
185 |
]
|
186 |
|
187 |
+
demo = gr.ChatInterface(
|
188 |
+
chatbot_interface,
|
189 |
+
additional_inputs=[
|
190 |
+
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2]),
|
191 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
192 |
+
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
|
193 |
+
gr.Checkbox(label="Use Embeddings", value=True),
|
194 |
+
gr.Textbox(label="System Prompt", value=DEFAULT_SYSTEM_PROMPT, lines=5),
|
195 |
+
],
|
196 |
+
title="AI-powered Web Search Assistant",
|
197 |
+
description="Ask questions and get answers from web search results.",
|
198 |
+
theme=gr.themes.Soft(
|
199 |
+
primary_hue="orange",
|
200 |
+
secondary_hue="amber",
|
201 |
+
neutral_hue="gray",
|
202 |
+
font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]
|
203 |
+
).set(
|
204 |
+
body_background_fill_dark="#0c0505",
|
205 |
+
block_background_fill_dark="#0c0505",
|
206 |
+
block_border_width="1px",
|
207 |
+
block_title_background_fill_dark="#1b0f0f",
|
208 |
+
input_background_fill_dark="#140b0b",
|
209 |
+
button_secondary_background_fill_dark="#140b0b",
|
210 |
+
border_color_accent_dark="#1b0f0f",
|
211 |
+
border_color_primary_dark="#1b0f0f",
|
212 |
+
background_fill_secondary_dark="#0c0505",
|
213 |
+
color_accent_soft_dark="transparent",
|
214 |
+
code_background_fill_dark="#140b0b"
|
215 |
+
),
|
216 |
+
css=css,
|
217 |
+
examples=[
|
218 |
+
["What are the latest developments in artificial intelligence?"],
|
219 |
+
["Can you explain the basics of quantum computing?"],
|
220 |
+
["What are the current global economic trends?"]
|
221 |
+
],
|
222 |
+
cache_examples=False,
|
223 |
+
analytics_enabled=False,
|
224 |
+
textbox=gr.Textbox(placeholder="Ask a question", container=False, scale=7),
|
225 |
+
chatbot = gr.Chatbot(
|
226 |
show_copy_button=True,
|
227 |
likeable=True,
|
228 |
layout="bubble",
|
229 |
height=400,
|
230 |
value=initial_conversation()
|
231 |
)
|
232 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
233 |
|
234 |
if __name__ == "__main__":
|
235 |
demo.launch(share=True)
|