File size: 4,881 Bytes
f9b91f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import gradio as gr
import openai
import anthropic
import google.generativeai as genai
import os
import asyncio
from dotenv import load_dotenv

# Load environment variables
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
anthropic_client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))

# Initialize conversation history
def initialize_chat():
    return [{"role": "system", "content": "You are a helpful assistant."}]

# Async functions for API calls
async def get_openai_response(messages):
    try:
        response = await asyncio.get_event_loop().run_in_executor(
            None,
            lambda: openai.ChatCompletion.create(
                model="gpt-3.5-turbo",
                messages=messages,
                temperature=0.7,
                max_tokens=1000
            )
        )
        return "ChatGPT (OpenAI)", response.choices[0].message["content"]
    except Exception as e:
        return "ChatGPT (OpenAI)", f"Error: {str(e)}"

async def get_claude_response(messages):
    try:
        user_message = messages[-1]["content"]
        response = await asyncio.get_event_loop().run_in_executor(
            None,
            lambda: anthropic_client.messages.create(
                model="claude-3-5-sonnet-20241022",
                max_tokens=1000,
                temperature=0.7,
                messages=[{"role": "user", "content": user_message}]
            )
        )
        return "Claude (Anthropic)", response.content[0].text
    except Exception as e:
        return "Claude (Anthropic)", f"Error: {str(e)}"

async def get_gemini_response(messages):
    try:
        model = genai.GenerativeModel("gemini-1.5-pro")
        user_message = messages[-1]["content"]
        response = await asyncio.get_event_loop().run_in_executor(
            None,
            lambda: model.generate_content(
                user_message,
                generation_config={"max_output_tokens": 1000, "temperature": 0.7}
            )
        )
        return "Gemini (Google)", response.text
    except Exception as e:
        return "Gemini (Google)", f"Error: {str(e)}"

# Main async function to query selected models
async def query_selected_models(message, history, use_openai, use_claude, use_gemini):
    if not any([use_openai, use_claude, use_gemini]):
        return "Please select at least one model.", history

    # Initialize or retrieve conversation history
    if not history:
        messages = initialize_chat()
    else:
        messages = initialize_chat() + [
            {"role": "user" if i % 2 == 0 else "assistant", "content": msg[0] if i % 2 == 0 else msg[1]}
            for i, msg in enumerate(history)
        ]

    # Append new user message
    messages.append({"role": "user", "content": message})

    # Create tasks for selected models
    tasks = []
    if use_openai:
        tasks.append(get_openai_response(messages))
    if use_claude:
        tasks.append(get_claude_response(messages))
    if use_gemini:
        tasks.append(get_gemini_response(messages))

    # Run selected API calls concurrently
    responses = await asyncio.gather(*tasks, return_exceptions=True)

    # Format responses
    response_text = ""
    for model_name, response in responses:
        response_text += f"**{model_name}**:\n{response}\n\n"

    # Update history
    history.append((message, response_text.strip()))

    return "", history

# Gradio interface
with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Model AI Selector") as demo:
    gr.Markdown(
        """
        # Multi-Model AI Chat Interface
        Select one or more models to query and enter your question below. Responses will appear in the chat window.
        """
    )
    
    # Model selection checkboxes
    with gr.Row():
        use_openai = gr.Checkbox(label="ChatGPT (OpenAI)", value=True)
        use_claude = gr.Checkbox(label="Claude (Anthropic)", value=True)
        use_gemini = gr.Checkbox(label="Gemini (Google)", value=True)

    # Chat interface
    chatbot = gr.Chatbot(label="Conversation", height=400)
    msg = gr.Textbox(placeholder="Type your query...", label="Your Query")
    with gr.Row():
        submit = gr.Button("Submit Query")
        clear = gr.Button("Clear Chat")

    # Bind query function to submit button and textbox (Enter key)
    submit.click(
        fn=query_selected_models,
        inputs=[msg, chatbot, use_openai, use_claude, use_gemini],
        outputs=[msg, chatbot]
    )
    msg.submit(
        fn=query_selected_models,
        inputs=[msg, chatbot, use_openai, use_claude, use_gemini],
        outputs=[msg, chatbot]
    )

    # Clear chat history
    clear.click(
        fn=lambda: (None, []),
        inputs=None,
        outputs=[msg, chatbot]
    )

# Launch the app (commented out for Hugging Face deployment)
# demo.launch()