Spaces:
Sleeping
Sleeping
import gradio as gr | |
import threading | |
import os | |
from openai import OpenAI | |
# Load API Keys from environment variables | |
API_KEY_LLAMA = os.getenv("OPENROUTER_API_KEY1") | |
API_KEY_GEMMA = os.getenv("OPENROUTER_API_KEY2") | |
API_KEY_DEEPSEEK1 = os.getenv("OPENROUTER_API_KEY3") | |
API_KEY_DEEPSEEK2 = os.getenv("OPENROUTER_API_KEY4") | |
# Initialize OpenAI clients for each API key | |
llama_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_LLAMA) | |
gemma_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_GEMMA) | |
deepseek_client1 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK1) | |
deepseek_client2 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK2) | |
# Function to query Llama | |
def query_llama(user_input, results): | |
try: | |
response = llama_client.chat.completions.create( | |
model="meta-llama/llama-3.2-3b-instruct:free", | |
messages=[{"role": "user", "content": user_input}] | |
) | |
results["Llama"] = response.choices[0].message.content | |
except Exception as e: | |
results["Llama"] = f"Error: {str(e)}" | |
# Function to query Gemma | |
def query_gemma(user_input, results): | |
try: | |
response = gemma_client.chat.completions.create( | |
model="google/gemma-2-9b-it:free", | |
messages=[{"role": "user", "content": user_input}] | |
) | |
results["Gemma"] = response.choices[0].message.content | |
except Exception as e: | |
results["Gemma"] = f"Error: {str(e)}" | |
# Function to query DeepSeek-1 | |
def query_deepseek_1(user_input, results): | |
try: | |
response = deepseek_client1.chat.completions.create( | |
model="deepseek/deepseek-r1:free", | |
messages=[{"role": "user", "content": user_input}] | |
) | |
results["DeepSeek1"] = response.choices[0].message.content | |
except Exception as e: | |
results["DeepSeek1"] = f"Error: {str(e)}" | |
# Function to refine responses using DeepSeek-2 | |
def refine_response(user_input): | |
try: | |
results = {} | |
# Start threads for parallel API calls | |
threads = [ | |
threading.Thread(target=query_llama, args=(user_input, results)), | |
threading.Thread(target=query_gemma, args=(user_input, results)), | |
threading.Thread(target=query_deepseek_1, args=(user_input, results)) | |
] | |
# Start all threads | |
for thread in threads: | |
thread.start() | |
# Wait for all threads to finish | |
for thread in threads: | |
thread.join() | |
# Filter valid responses | |
valid_responses = {k: v.strip() for k, v in results.items() if v and "Error" not in v} | |
if len(valid_responses) < 2: | |
return "\n\n".join(f"{k} Response: {v}" for k, v in valid_responses.items()) | |
# Prepare refined prompt | |
improvement_prompt = f""" | |
Here are AI-generated responses: | |
Response 1 (Llama): {results.get("Llama", "N/A")} | |
Response 2 (Gemma): {results.get("Gemma", "N/A")} | |
Response 3 (DeepSeek1): {results.get("DeepSeek1", "N/A")} | |
Please improve the clarity and coherence, and generate a refined response. | |
""" | |
# Send to DeepSeek-2 for final refinement | |
try: | |
refined_completion = deepseek_client2.chat.completions.create( | |
model="deepseek/deepseek-r1:free", | |
messages=[{"role": "user", "content": improvement_prompt}] | |
) | |
refined_content = refined_completion.choices[0].message.content | |
return refined_content if refined_content.strip() else "Refinement failed, returning best response." | |
except Exception as e: | |
return f"Error refining response: {str(e)}" | |
except Exception as e: | |
return f"Unexpected error: {str(e)}" | |
# Gradio Interface | |
interface = gr.Interface( | |
fn=refine_response, | |
inputs=gr.Textbox(label="Enter your question"), | |
outputs=gr.Textbox(label="AI Response"), | |
title="Multi-API AI Chat", | |
description="Ask a question and receive a response refined by multiple AI models.", | |
) | |
# Run the app | |
if __name__ == "__main__": | |
interface.launch(debug=True) |