Athspi commited on
Commit
bc0925b
·
verified ·
1 Parent(s): ca2eb2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -49
app.py CHANGED
@@ -1,73 +1,59 @@
1
- import os
2
- import threading
3
  import gradio as gr
 
 
4
  from openai import OpenAI
5
- from dotenv import load_dotenv
6
-
7
- # Load API keys from .env file
8
- load_dotenv(override=True)
9
 
10
- # Retrieve API keys (Default to "MISSING_KEY" for debugging)
11
- API_KEY_LLAMA = os.getenv("OPENROUTER_API_KEY1", "MISSING_KEY")
12
- API_KEY_GEMMA = os.getenv("OPENROUTER_API_KEY2", "MISSING_KEY")
13
- API_KEY_DEEPSEEK1 = os.getenv("OPENROUTER_API_KEY3", "MISSING_KEY")
14
- API_KEY_DEEPSEEK2 = os.getenv("OPENROUTER_API_KEY4", "MISSING_KEY")
15
 
16
- # Debugging: Check API key values
17
- print(f"Llama API Key: {API_KEY_LLAMA[:5]}...") # Show only first 5 characters
18
- print(f"Gemma API Key: {API_KEY_GEMMA[:5]}...")
19
- print(f"DeepSeek API Key 1: {API_KEY_DEEPSEEK1[:5]}...")
20
- print(f"DeepSeek API Key 2: {API_KEY_DEEPSEEK2[:5]}...")
21
-
22
- # Ensure all API keys are loaded
23
- if "MISSING_KEY" in [API_KEY_LLAMA, API_KEY_GEMMA, API_KEY_DEEPSEEK1, API_KEY_DEEPSEEK2]:
24
- raise ValueError("❌ ERROR: One or more API keys are missing from the .env file!")
25
-
26
- # Create OpenAI clients for each model
27
  llama_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_LLAMA)
28
  gemma_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_GEMMA)
29
  deepseek_client1 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK1)
30
  deepseek_client2 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK2)
31
 
32
- # Function to query Llama model
33
  def query_llama(user_input, results):
34
  try:
35
- completion = llama_client.chat.completions.create(
36
  model="meta-llama/llama-3.2-3b-instruct:free",
37
  messages=[{"role": "user", "content": user_input}]
38
  )
39
- results["Llama"] = completion.choices[0].message.content
40
  except Exception as e:
41
  results["Llama"] = f"Error: {str(e)}"
42
 
43
- # Function to query Gemma model
44
  def query_gemma(user_input, results):
45
  try:
46
- completion = gemma_client.chat.completions.create(
47
  model="google/gemma-2-9b-it:free",
48
  messages=[{"role": "user", "content": user_input}]
49
  )
50
- results["Gemma"] = completion.choices[0].message.content
51
  except Exception as e:
52
  results["Gemma"] = f"Error: {str(e)}"
53
 
54
- # Function to query DeepSeek (First Query)
55
  def query_deepseek_1(user_input, results):
56
  try:
57
- completion = deepseek_client1.chat.completions.create(
58
  model="deepseek/deepseek-r1:free",
59
  messages=[{"role": "user", "content": user_input}]
60
  )
61
- results["DeepSeek1"] = completion.choices[0].message.content
62
  except Exception as e:
63
  results["DeepSeek1"] = f"Error: {str(e)}"
64
 
65
- # Function to refine responses using DeepSeek (Final API)
66
  def refine_response(user_input):
67
  try:
68
  results = {}
69
 
70
- # Create threads for parallel API calls
71
  threads = [
72
  threading.Thread(target=query_llama, args=(user_input, results)),
73
  threading.Thread(target=query_gemma, args=(user_input, results)),
@@ -78,33 +64,33 @@ def refine_response(user_input):
78
  for thread in threads:
79
  thread.start()
80
 
81
- # Wait for all threads to complete
82
  for thread in threads:
83
  thread.join()
84
 
85
- # Ensure all responses are received
86
  valid_responses = {k: v.strip() for k, v in results.items() if v and "Error" not in v}
87
-
88
  if len(valid_responses) < 2:
89
  return "\n\n".join(f"{k} Response: {v}" for k, v in valid_responses.items())
90
 
91
- # Prepare refinement prompt
92
  improvement_prompt = f"""
93
- Here are three AI-generated responses:
94
 
95
  Response 1 (Llama): {results.get("Llama", "N/A")}
96
  Response 2 (Gemma): {results.get("Gemma", "N/A")}
97
  Response 3 (DeepSeek1): {results.get("DeepSeek1", "N/A")}
98
 
99
- Please combine the best elements of all three, improve clarity, and provide a final refined answer.
100
  """
101
 
102
- # Query DeepSeek-R1 for refinement using API key 4
103
  try:
104
  refined_completion = deepseek_client2.chat.completions.create(
105
  model="deepseek/deepseek-r1:free",
106
  messages=[{"role": "user", "content": improvement_prompt}]
107
  )
 
108
  refined_content = refined_completion.choices[0].message.content
109
  return refined_content if refined_content.strip() else "Refinement failed, returning best response."
110
  except Exception as e:
@@ -113,14 +99,15 @@ def refine_response(user_input):
113
  except Exception as e:
114
  return f"Unexpected error: {str(e)}"
115
 
116
- # Create Gradio interface
117
- iface = gr.Interface(
118
- fn=refine_response,
119
- inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
120
- outputs="text",
121
- title="Multi-Model AI Enhancer (4 API Keys)",
122
- description="Llama (API 1) + Gemma (API 2) + DeepSeek (API 3) → Final Refinement with DeepSeek (API 4)"
123
  )
124
 
125
- # Launch app
126
- iface.launch()
 
 
 
 
1
  import gradio as gr
2
+ import threading
3
+ import os
4
  from openai import OpenAI
 
 
 
 
5
 
6
+ # Load API Keys from environment variables
7
+ API_KEY_LLAMA = os.getenv("OPENROUTER_API_KEY1")
8
+ API_KEY_GEMMA = os.getenv("OPENROUTER_API_KEY2")
9
+ API_KEY_DEEPSEEK1 = os.getenv("OPENROUTER_API_KEY3")
10
+ API_KEY_DEEPSEEK2 = os.getenv("OPENROUTER_API_KEY4")
11
 
12
+ # Initialize OpenAI clients for each API key
 
 
 
 
 
 
 
 
 
 
13
  llama_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_LLAMA)
14
  gemma_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_GEMMA)
15
  deepseek_client1 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK1)
16
  deepseek_client2 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK2)
17
 
18
+ # Function to query Llama
19
  def query_llama(user_input, results):
20
  try:
21
+ response = llama_client.chat.completions.create(
22
  model="meta-llama/llama-3.2-3b-instruct:free",
23
  messages=[{"role": "user", "content": user_input}]
24
  )
25
+ results["Llama"] = response.choices[0].message.content
26
  except Exception as e:
27
  results["Llama"] = f"Error: {str(e)}"
28
 
29
+ # Function to query Gemma
30
  def query_gemma(user_input, results):
31
  try:
32
+ response = gemma_client.chat.completions.create(
33
  model="google/gemma-2-9b-it:free",
34
  messages=[{"role": "user", "content": user_input}]
35
  )
36
+ results["Gemma"] = response.choices[0].message.content
37
  except Exception as e:
38
  results["Gemma"] = f"Error: {str(e)}"
39
 
40
+ # Function to query DeepSeek-1
41
  def query_deepseek_1(user_input, results):
42
  try:
43
+ response = deepseek_client1.chat.completions.create(
44
  model="deepseek/deepseek-r1:free",
45
  messages=[{"role": "user", "content": user_input}]
46
  )
47
+ results["DeepSeek1"] = response.choices[0].message.content
48
  except Exception as e:
49
  results["DeepSeek1"] = f"Error: {str(e)}"
50
 
51
+ # Function to refine responses using DeepSeek-2
52
  def refine_response(user_input):
53
  try:
54
  results = {}
55
 
56
+ # Start threads for parallel API calls
57
  threads = [
58
  threading.Thread(target=query_llama, args=(user_input, results)),
59
  threading.Thread(target=query_gemma, args=(user_input, results)),
 
64
  for thread in threads:
65
  thread.start()
66
 
67
+ # Wait for all threads to finish
68
  for thread in threads:
69
  thread.join()
70
 
71
+ # Filter valid responses
72
  valid_responses = {k: v.strip() for k, v in results.items() if v and "Error" not in v}
 
73
  if len(valid_responses) < 2:
74
  return "\n\n".join(f"{k} Response: {v}" for k, v in valid_responses.items())
75
 
76
+ # Prepare refined prompt
77
  improvement_prompt = f"""
78
+ Here are AI-generated responses:
79
 
80
  Response 1 (Llama): {results.get("Llama", "N/A")}
81
  Response 2 (Gemma): {results.get("Gemma", "N/A")}
82
  Response 3 (DeepSeek1): {results.get("DeepSeek1", "N/A")}
83
 
84
+ Please improve the clarity and coherence, and generate a refined response.
85
  """
86
 
87
+ # Send to DeepSeek-2 for final refinement
88
  try:
89
  refined_completion = deepseek_client2.chat.completions.create(
90
  model="deepseek/deepseek-r1:free",
91
  messages=[{"role": "user", "content": improvement_prompt}]
92
  )
93
+
94
  refined_content = refined_completion.choices[0].message.content
95
  return refined_content if refined_content.strip() else "Refinement failed, returning best response."
96
  except Exception as e:
 
99
  except Exception as e:
100
  return f"Unexpected error: {str(e)}"
101
 
102
+ # Gradio Interface
103
+ interface = gr.Interface(
104
+ fn=refine_response,
105
+ inputs=gr.Textbox(label="Enter your question"),
106
+ outputs=gr.Textbox(label="AI Response"),
107
+ title="Multi-API AI Chat",
108
+ description="Ask a question and receive a response refined by multiple AI models.",
109
  )
110
 
111
+ # Run the app
112
+ if __name__ == "__main__":
113
+ interface.launch(debug=True)