File size: 4,914 Bytes
1f52162
0cc372f
1f52162
a63c2da
1f52162
 
 
cacc649
0cc372f
cacc649
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0cc372f
 
 
 
 
 
 
a63c2da
0cc372f
 
a63c2da
 
0cc372f
a63c2da
0cc372f
a63c2da
0cc372f
 
a63c2da
0cc372f
 
a63c2da
 
0cc372f
a63c2da
0cc372f
a63c2da
cacc649
0cc372f
a63c2da
0cc372f
a63c2da
 
 
0cc372f
a63c2da
0cc372f
a63c2da
cacc649
1f52162
e96884e
0cc372f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a63c2da
 
 
1f52162
e96884e
 
04c06dd
1f52162
0cc372f
 
 
e96884e
a63c2da
e96884e
 
0cc372f
 
 
 
 
 
 
 
 
 
e96884e
 
0cc372f
1f52162
 
 
 
 
 
0cc372f
 
1f52162
 
 
59b0fd7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import os
import threading
import gradio as gr
from openai import OpenAI
from dotenv import load_dotenv

# Load API keys from .env file
load_dotenv(override=True)

# Retrieve API keys (Default to "MISSING_KEY" for debugging)
API_KEY_LLAMA = os.getenv("OPENROUTER_API_KEY1", "MISSING_KEY")
API_KEY_GEMMA = os.getenv("OPENROUTER_API_KEY2", "MISSING_KEY")
API_KEY_DEEPSEEK1 = os.getenv("OPENROUTER_API_KEY3", "MISSING_KEY")
API_KEY_DEEPSEEK2 = os.getenv("OPENROUTER_API_KEY4", "MISSING_KEY")

# Debugging: Check API key values
print(f"Llama API Key: {API_KEY_LLAMA[:5]}...")  # Show only first 5 characters
print(f"Gemma API Key: {API_KEY_GEMMA[:5]}...")
print(f"DeepSeek API Key 1: {API_KEY_DEEPSEEK1[:5]}...")
print(f"DeepSeek API Key 2: {API_KEY_DEEPSEEK2[:5]}...")

# Ensure all API keys are loaded
if "MISSING_KEY" in [API_KEY_LLAMA, API_KEY_GEMMA, API_KEY_DEEPSEEK1, API_KEY_DEEPSEEK2]:
    raise ValueError("❌ ERROR: One or more API keys are missing from the .env file!")

# Create OpenAI clients for each model
llama_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_LLAMA)
gemma_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_GEMMA)
deepseek_client1 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK1)
deepseek_client2 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK2)

# Function to query Llama model
def query_llama(user_input, results):
    try:
        completion = llama_client.chat.completions.create(
            model="meta-llama/llama-3.2-3b-instruct:free",
            messages=[{"role": "user", "content": user_input}]
        )
        results["Llama"] = completion.choices[0].message.content
    except Exception as e:
        results["Llama"] = f"Error: {str(e)}"

# Function to query Gemma model
def query_gemma(user_input, results):
    try:
        completion = gemma_client.chat.completions.create(
            model="google/gemma-2-9b-it:free",
            messages=[{"role": "user", "content": user_input}]
        )
        results["Gemma"] = completion.choices[0].message.content
    except Exception as e:
        results["Gemma"] = f"Error: {str(e)}"

# Function to query DeepSeek (First Query)
def query_deepseek_1(user_input, results):
    try:
        completion = deepseek_client1.chat.completions.create(
            model="deepseek/deepseek-r1:free",
            messages=[{"role": "user", "content": user_input}]
        )
        results["DeepSeek1"] = completion.choices[0].message.content
    except Exception as e:
        results["DeepSeek1"] = f"Error: {str(e)}"

# Function to refine responses using DeepSeek (Final API)
def refine_response(user_input):
    try:
        results = {}

        # Create threads for parallel API calls
        threads = [
            threading.Thread(target=query_llama, args=(user_input, results)),
            threading.Thread(target=query_gemma, args=(user_input, results)),
            threading.Thread(target=query_deepseek_1, args=(user_input, results))
        ]

        # Start all threads
        for thread in threads:
            thread.start()

        # Wait for all threads to complete
        for thread in threads:
            thread.join()

        # Ensure all responses are received
        valid_responses = {k: v.strip() for k, v in results.items() if v and "Error" not in v}

        if len(valid_responses) < 2:
            return "\n\n".join(f"{k} Response: {v}" for k, v in valid_responses.items())

        # Prepare refinement prompt
        improvement_prompt = f"""
        Here are three AI-generated responses:

        Response 1 (Llama): {results.get("Llama", "N/A")}
        Response 2 (Gemma): {results.get("Gemma", "N/A")}
        Response 3 (DeepSeek1): {results.get("DeepSeek1", "N/A")}

        Please combine the best elements of all three, improve clarity, and provide a final refined answer.
        """

        # Query DeepSeek-R1 for refinement using API key 4
        try:
            refined_completion = deepseek_client2.chat.completions.create(
                model="deepseek/deepseek-r1:free",
                messages=[{"role": "user", "content": improvement_prompt}]
            )
            refined_content = refined_completion.choices[0].message.content
            return refined_content if refined_content.strip() else "Refinement failed, returning best response."
        except Exception as e:
            return f"Error refining response: {str(e)}"

    except Exception as e:
        return f"Unexpected error: {str(e)}"

# Create Gradio interface
iface = gr.Interface(
    fn=refine_response, 
    inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."), 
    outputs="text",
    title="Multi-Model AI Enhancer (4 API Keys)",
    description="Llama (API 1) + Gemma (API 2) + DeepSeek (API 3) β†’ Final Refinement with DeepSeek (API 4)"
)

# Launch app
iface.launch()