Spaces:
Sleeping
Sleeping
Ganesh Chintalapati
commited on
Commit
·
7f76d3e
1
Parent(s):
db34497
OpenAI Anthropic Gemini vertical
Browse files
app.py
CHANGED
@@ -198,94 +198,127 @@ async def ask_gemini(query: str, history: List[Dict[str, str]]) -> str:
|
|
198 |
logger.error(f"Gemini Error: {str(e)}\nStack trace: {traceback.format_exc()}")
|
199 |
return f"Error: Gemini Error: {str(e)}"
|
200 |
|
201 |
-
async def query_model(query: str, providers: List[str], history: List[Dict[str, str]]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]]], None]:
|
202 |
logger.info(f"Processing query with providers: {providers}")
|
203 |
-
|
204 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
205 |
|
206 |
# Handle OpenAI (streaming)
|
207 |
if "OpenAI" in providers:
|
|
|
208 |
async for chunk in ask_openai(query, history):
|
209 |
-
|
210 |
# Yield streaming updates for OpenAI
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
chatbot_messages.append({"role": "assistant", "content": msg["bot"]})
|
216 |
-
chatbot_messages.append({"role": "user", "content": query})
|
217 |
-
chatbot_messages.append({"role": "assistant", "content": streaming_response})
|
218 |
-
yield "", chatbot_messages, history # Yield partial updates
|
219 |
-
if streaming_response.strip():
|
220 |
-
responses.append(f"[OpenAI]: {streaming_response}")
|
221 |
|
222 |
# Handle Anthropic (non-streaming)
|
223 |
if "Anthropic" in providers:
|
224 |
-
|
225 |
-
|
226 |
-
|
|
|
227 |
|
228 |
# Handle Gemini (non-streaming)
|
229 |
if "Gemini" in providers:
|
230 |
-
|
231 |
-
|
232 |
-
|
|
|
233 |
|
234 |
-
# Combine responses
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
combined_response = "\n\n".join(responses) if responses else "No valid responses received."
|
236 |
-
# Update history with the combined response
|
237 |
updated_history = history + [{"user": query, "bot": combined_response}]
|
238 |
logger.info(f"Updated history: {updated_history}")
|
239 |
|
240 |
# Yield final response
|
241 |
-
|
242 |
-
for msg in updated_history:
|
243 |
-
chatbot_messages.append({"role": "user", "content": msg["user"]})
|
244 |
-
if msg["bot"]:
|
245 |
-
chatbot_messages.append({"role": "assistant", "content": msg["bot"]})
|
246 |
-
yield "", chatbot_messages, updated_history
|
247 |
|
248 |
-
async def submit_query(query: str, providers: List[str],
|
249 |
if not query.strip():
|
250 |
-
|
251 |
-
|
|
|
|
|
252 |
return
|
253 |
|
254 |
if not providers:
|
255 |
-
|
256 |
-
|
|
|
|
|
257 |
return
|
258 |
|
259 |
-
|
260 |
-
|
|
|
|
|
|
|
261 |
# Final yield to clear the query textbox
|
262 |
-
yield "",
|
263 |
|
264 |
# Gradio interface
|
265 |
def clear_history():
|
266 |
-
return [], []
|
267 |
|
268 |
# Define Gradio interface
|
269 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
270 |
gr.Markdown("# Multi-Model Chat")
|
271 |
-
gr.Markdown("Chat with OpenAI, Anthropic, or Gemini. Select providers and
|
272 |
|
273 |
providers = gr.CheckboxGroup(choices=["OpenAI", "Anthropic", "Gemini"], label="Select Providers", value=["OpenAI"])
|
274 |
-
history_state = gr.State(value=[])
|
275 |
-
chatbot = gr.Chatbot(label="Conversation", type="messages")
|
276 |
query = gr.Textbox(label="Enter your query", placeholder="e.g., What is the capital of the United States?")
|
277 |
-
|
278 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
279 |
|
280 |
submit_button.click(
|
281 |
fn=submit_query,
|
282 |
-
inputs=[query, providers
|
283 |
-
outputs=[query,
|
284 |
)
|
285 |
clear_button.click(
|
286 |
fn=clear_history,
|
287 |
inputs=[],
|
288 |
-
outputs=[
|
289 |
)
|
290 |
|
291 |
# Launch the Gradio app
|
|
|
198 |
logger.error(f"Gemini Error: {str(e)}\nStack trace: {traceback.format_exc()}")
|
199 |
return f"Error: Gemini Error: {str(e)}"
|
200 |
|
201 |
+
async def query_model(query: str, providers: List[str], history: List[Dict[str, str]]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]], None]:
|
202 |
logger.info(f"Processing query with providers: {providers}")
|
203 |
+
openai_response = ""
|
204 |
+
anthropic_response = ""
|
205 |
+
gemini_response = ""
|
206 |
+
|
207 |
+
# Initialize chatbot messages for each provider
|
208 |
+
openai_messages = []
|
209 |
+
anthropic_messages = []
|
210 |
+
gemini_messages = []
|
211 |
+
|
212 |
+
# Populate history for each chatbot
|
213 |
+
for msg in history:
|
214 |
+
openai_messages.append({"role": "user", "content": msg["user"]})
|
215 |
+
anthropic_messages.append({"role": "user", "content": msg["user"]})
|
216 |
+
gemini_messages.append({"role": "user", "content": msg["user"]})
|
217 |
+
if msg["bot"]:
|
218 |
+
# Parse the combined response to extract provider-specific responses
|
219 |
+
lines = msg["bot"].split("\n\n")
|
220 |
+
for line in lines:
|
221 |
+
if line.startswith("[OpenAI]:"):
|
222 |
+
openai_messages.append({"role": "assistant", "content": line[len("[OpenAI]:"):].strip()})
|
223 |
+
elif line.startswith("[Anthropic]:"):
|
224 |
+
anthropic_messages.append({"role": "assistant", "content": line[len("[Anthropic]:"):].strip()})
|
225 |
+
elif line.startswith("[Gemini]:"):
|
226 |
+
gemini_messages.append({"role": "assistant", "content": line[len("[Gemini]:"):].strip()})
|
227 |
|
228 |
# Handle OpenAI (streaming)
|
229 |
if "OpenAI" in providers:
|
230 |
+
openai_messages.append({"role": "user", "content": query})
|
231 |
async for chunk in ask_openai(query, history):
|
232 |
+
openai_response += chunk
|
233 |
# Yield streaming updates for OpenAI
|
234 |
+
openai_messages[-1] = {"role": "assistant", "content": openai_response}
|
235 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
236 |
+
if openai_response.strip() and not openai_response.startswith("Error:"):
|
237 |
+
openai_messages[-1] = {"role": "assistant", "content": openai_response}
|
|
|
|
|
|
|
|
|
|
|
|
|
238 |
|
239 |
# Handle Anthropic (non-streaming)
|
240 |
if "Anthropic" in providers:
|
241 |
+
anthropic_messages.append({"role": "user", "content": query})
|
242 |
+
anthropic_response = await ask_anthropic(query, history)
|
243 |
+
if anthropic_response.strip() and not anthropic_response.startswith("Error:"):
|
244 |
+
anthropic_messages.append({"role": "assistant", "content": anthropic_response})
|
245 |
|
246 |
# Handle Gemini (non-streaming)
|
247 |
if "Gemini" in providers:
|
248 |
+
gemini_messages.append({"role": "user", "content": query})
|
249 |
+
gemini_response = await ask_gemini(query, history)
|
250 |
+
if gemini_response.strip() and not gemini_response.startswith("Error:"):
|
251 |
+
gemini_messages.append({"role": "assistant", "content": gemini_response})
|
252 |
|
253 |
+
# Combine responses for history
|
254 |
+
responses = []
|
255 |
+
if openai_response.strip() and not openai_response.startswith("Error:"):
|
256 |
+
responses.append(f"[OpenAI]: {openai_response}")
|
257 |
+
if anthropic_response.strip() and not anthropic_response.startswith("Error:"):
|
258 |
+
responses.append(f"[Anthropic]: {anthropic_response}")
|
259 |
+
if gemini_response.strip() and not gemini_response.startswith("Error:"):
|
260 |
+
responses.append(f"[Gemini]: {gemini_response}")
|
261 |
+
|
262 |
combined_response = "\n\n".join(responses) if responses else "No valid responses received."
|
|
|
263 |
updated_history = history + [{"user": query, "bot": combined_response}]
|
264 |
logger.info(f"Updated history: {updated_history}")
|
265 |
|
266 |
# Yield final response
|
267 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
|
|
|
|
|
|
|
|
|
|
268 |
|
269 |
+
async def submit_query(query: str, providers: List[str]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]], None]:
|
270 |
if not query.strip():
|
271 |
+
openai_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
272 |
+
anthropic_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
273 |
+
gemini_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
274 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
275 |
return
|
276 |
|
277 |
if not providers:
|
278 |
+
openai_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
279 |
+
anthropic_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
280 |
+
gemini_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
281 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
282 |
return
|
283 |
|
284 |
+
# Initialize history
|
285 |
+
history = []
|
286 |
+
|
287 |
+
async for response_chunk, openai_messages, anthropic_messages, gemini_messages in query_model(query, providers, history):
|
288 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
289 |
# Final yield to clear the query textbox
|
290 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
291 |
|
292 |
# Gradio interface
|
293 |
def clear_history():
|
294 |
+
return [], [], []
|
295 |
|
296 |
# Define Gradio interface
|
297 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
298 |
gr.Markdown("# Multi-Model Chat")
|
299 |
+
gr.Markdown("Chat with OpenAI, Anthropic, or Gemini. Select providers and compare responses side by side!")
|
300 |
|
301 |
providers = gr.CheckboxGroup(choices=["OpenAI", "Anthropic", "Gemini"], label="Select Providers", value=["OpenAI"])
|
|
|
|
|
302 |
query = gr.Textbox(label="Enter your query", placeholder="e.g., What is the capital of the United States?")
|
303 |
+
|
304 |
+
with gr.Row():
|
305 |
+
openai_chatbot = gr.Chatbot(label="OpenAI", type="messages", scale=1)
|
306 |
+
anthropic_chatbot = gr.Chatbot(label="Anthropic", type="messages", scale=1)
|
307 |
+
gemini_chatbot = gr.Chatbot(label="Gemini", type="messages", scale=1)
|
308 |
+
|
309 |
+
with gr.Row():
|
310 |
+
submit_button = gr.Button("Submit")
|
311 |
+
clear_button = gr.Button("Clear History")
|
312 |
|
313 |
submit_button.click(
|
314 |
fn=submit_query,
|
315 |
+
inputs=[query, providers],
|
316 |
+
outputs=[query, openai_chatbot, anthropic_chatbot, gemini_chatbot]
|
317 |
)
|
318 |
clear_button.click(
|
319 |
fn=clear_history,
|
320 |
inputs=[],
|
321 |
+
outputs=[openai_chatbot, anthropic_chatbot, gemini_chatbot]
|
322 |
)
|
323 |
|
324 |
# Launch the Gradio app
|