Spaces:
Sleeping
Sleeping
File size: 4,170 Bytes
1f52162 bc0925b a63c2da 0cc372f bc0925b cacc649 bc0925b 0cc372f bc0925b 0cc372f a63c2da bc0925b 0cc372f a63c2da bc0925b a63c2da 0cc372f a63c2da bc0925b 0cc372f a63c2da bc0925b 0cc372f a63c2da bc0925b a63c2da 0cc372f a63c2da bc0925b 0cc372f a63c2da bc0925b a63c2da bc0925b a63c2da 0cc372f a63c2da bc0925b 1f52162 e96884e 0cc372f bc0925b 0cc372f bc0925b 0cc372f bc0925b 0cc372f a63c2da 1f52162 bc0925b e96884e bc0925b 1f52162 0cc372f e96884e bc0925b e96884e bc0925b 0cc372f bc0925b 0cc372f e96884e 0cc372f 1f52162 bc0925b 1f52162 bc0925b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import gradio as gr
import threading
import os
from openai import OpenAI
# Load API Keys from environment variables
API_KEY_LLAMA = os.getenv("OPENROUTER_API_KEY1")
API_KEY_GEMMA = os.getenv("OPENROUTER_API_KEY2")
API_KEY_DEEPSEEK1 = os.getenv("OPENROUTER_API_KEY3")
API_KEY_DEEPSEEK2 = os.getenv("OPENROUTER_API_KEY4")
# Initialize OpenAI clients for each API key
llama_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_LLAMA)
gemma_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_GEMMA)
deepseek_client1 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK1)
deepseek_client2 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK2)
# Function to query Llama
def query_llama(user_input, results):
try:
response = llama_client.chat.completions.create(
model="meta-llama/llama-3.2-3b-instruct:free",
messages=[{"role": "user", "content": user_input}]
)
results["Llama"] = response.choices[0].message.content
except Exception as e:
results["Llama"] = f"Error: {str(e)}"
# Function to query Gemma
def query_gemma(user_input, results):
try:
response = gemma_client.chat.completions.create(
model="google/gemma-2-9b-it:free",
messages=[{"role": "user", "content": user_input}]
)
results["Gemma"] = response.choices[0].message.content
except Exception as e:
results["Gemma"] = f"Error: {str(e)}"
# Function to query DeepSeek-1
def query_deepseek_1(user_input, results):
try:
response = deepseek_client1.chat.completions.create(
model="deepseek/deepseek-r1:free",
messages=[{"role": "user", "content": user_input}]
)
results["DeepSeek1"] = response.choices[0].message.content
except Exception as e:
results["DeepSeek1"] = f"Error: {str(e)}"
# Function to refine responses using DeepSeek-2
def refine_response(user_input):
try:
results = {}
# Start threads for parallel API calls
threads = [
threading.Thread(target=query_llama, args=(user_input, results)),
threading.Thread(target=query_gemma, args=(user_input, results)),
threading.Thread(target=query_deepseek_1, args=(user_input, results))
]
# Start all threads
for thread in threads:
thread.start()
# Wait for all threads to finish
for thread in threads:
thread.join()
# Filter valid responses
valid_responses = {k: v.strip() for k, v in results.items() if v and "Error" not in v}
if len(valid_responses) < 2:
return "\n\n".join(f"{k} Response: {v}" for k, v in valid_responses.items())
# Prepare refined prompt
improvement_prompt = f"""
Here are AI-generated responses:
Response 1 (Llama): {results.get("Llama", "N/A")}
Response 2 (Gemma): {results.get("Gemma", "N/A")}
Response 3 (DeepSeek1): {results.get("DeepSeek1", "N/A")}
Please improve the clarity and coherence, and generate a refined response.
"""
# Send to DeepSeek-2 for final refinement
try:
refined_completion = deepseek_client2.chat.completions.create(
model="deepseek/deepseek-r1:free",
messages=[{"role": "user", "content": improvement_prompt}]
)
refined_content = refined_completion.choices[0].message.content
return refined_content if refined_content.strip() else "Refinement failed, returning best response."
except Exception as e:
return f"Error refining response: {str(e)}"
except Exception as e:
return f"Unexpected error: {str(e)}"
# Gradio Interface
interface = gr.Interface(
fn=refine_response,
inputs=gr.Textbox(label="Enter your question"),
outputs=gr.Textbox(label="AI Response"),
title="Multi-API AI Chat",
description="Ask a question and receive a response refined by multiple AI models.",
)
# Run the app
if __name__ == "__main__":
interface.launch(debug=True) |