Spaces:
Sleeping
Sleeping
import gradio as gr | |
import openai | |
import anthropic | |
import google.generativeai as genai | |
import os | |
import asyncio | |
from dotenv import load_dotenv | |
# Load environment variables | |
load_dotenv() | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
anthropic_client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY")) | |
genai.configure(api_key=os.getenv("GEMINI_API_KEY")) | |
# Initialize conversation history | |
def initialize_chat(): | |
return [{"role": "system", "content": "You are a helpful assistant."}] | |
# Async functions for API calls | |
async def get_openai_response(messages): | |
try: | |
response = await asyncio.get_event_loop().run_in_executor( | |
None, | |
lambda: openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=messages, | |
temperature=0.7, | |
max_tokens=1000 | |
) | |
) | |
return "ChatGPT (OpenAI)", response.choices[0].message["content"] | |
except Exception as e: | |
return "ChatGPT (OpenAI)", f"Error: {str(e)}" | |
async def get_claude_response(messages): | |
try: | |
user_message = messages[-1]["content"] | |
response = await asyncio.get_event_loop().run_in_executor( | |
None, | |
lambda: anthropic_client.messages.create( | |
model="claude-3-5-sonnet-20241022", | |
max_tokens=1000, | |
temperature=0.7, | |
messages=[{"role": "user", "content": user_message}] | |
) | |
) | |
return "Claude (Anthropic)", response.content[0].text | |
except Exception as e: | |
return "Claude (Anthropic)", f"Error: {str(e)}" | |
async def get_gemini_response(messages): | |
try: | |
model = genai.GenerativeModel("gemini-1.5-pro") | |
user_message = messages[-1]["content"] | |
response = await asyncio.get_event_loop().run_in_executor( | |
None, | |
lambda: model.generate_content( | |
user_message, | |
generation_config={"max_output_tokens": 1000, "temperature": 0.7} | |
) | |
) | |
return "Gemini (Google)", response.text | |
except Exception as e: | |
return "Gemini (Google)", f"Error: {str(e)}" | |
# Main async function to query selected models | |
async def query_selected_models(message, history, use_openai, use_claude, use_gemini): | |
if not any([use_openai, use_claude, use_gemini]): | |
return "Please select at least one model.", history | |
# Initialize or retrieve conversation history | |
if not history: | |
messages = initialize_chat() | |
else: | |
messages = initialize_chat() + [ | |
{"role": "user" if i % 2 == 0 else "assistant", "content": msg[0] if i % 2 == 0 else msg[1]} | |
for i, msg in enumerate(history) | |
] | |
# Append new user message | |
messages.append({"role": "user", "content": message}) | |
# Create tasks for selected models | |
tasks = [] | |
if use_openai: | |
tasks.append(get_openai_response(messages)) | |
if use_claude: | |
tasks.append(get_claude_response(messages)) | |
if use_gemini: | |
tasks.append(get_gemini_response(messages)) | |
# Run selected API calls concurrently | |
responses = await asyncio.gather(*tasks, return_exceptions=True) | |
# Format responses | |
response_text = "" | |
for model_name, response in responses: | |
response_text += f"**{model_name}**:\n{response}\n\n" | |
# Update history | |
history.append((message, response_text.strip())) | |
return "", history | |
# Gradio interface | |
with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Model AI Selector") as demo: | |
gr.Markdown( | |
""" | |
# Multi-Model AI Chat Interface | |
Select one or more models to query and enter your question below. Responses will appear in the chat window. | |
""" | |
) | |
# Model selection checkboxes | |
with gr.Row(): | |
use_openai = gr.Checkbox(label="ChatGPT (OpenAI)", value=True) | |
use_claude = gr.Checkbox(label="Claude (Anthropic)", value=True) | |
use_gemini = gr.Checkbox(label="Gemini (Google)", value=True) | |
# Chat interface | |
chatbot = gr.Chatbot(label="Conversation", height=400) | |
msg = gr.Textbox(placeholder="Type your query...", label="Your Query") | |
with gr.Row(): | |
submit = gr.Button("Submit Query") | |
clear = gr.Button("Clear Chat") | |
# Bind query function to submit button and textbox (Enter key) | |
submit.click( | |
fn=query_selected_models, | |
inputs=[msg, chatbot, use_openai, use_claude, use_gemini], | |
outputs=[msg, chatbot] | |
) | |
msg.submit( | |
fn=query_selected_models, | |
inputs=[msg, chatbot, use_openai, use_claude, use_gemini], | |
outputs=[msg, chatbot] | |
) | |
# Clear chat history | |
clear.click( | |
fn=lambda: (None, []), | |
inputs=None, | |
outputs=[msg, chatbot] | |
) | |
# Launch the app (commented out for Hugging Face deployment) | |
# demo.launch() |