Athspi commited on
Commit
0cc372f
·
verified ·
1 Parent(s): a63c2da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -69
app.py CHANGED
@@ -1,68 +1,77 @@
1
  import os
2
- import time
3
- import json
4
  import gradio as gr
5
  from openai import OpenAI
6
  from dotenv import load_dotenv
7
 
8
  # Load API keys from .env file
9
  load_dotenv()
10
- OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") # OpenRouter API Key
11
-
12
- # Initialize OpenRouter Client
13
- openrouter_client = OpenAI(
14
- base_url="https://openrouter.ai/api/v1",
15
- api_key=OPENROUTER_API_KEY
16
- )
17
-
18
- # Query OpenRouter (Gemma-2-9B)
19
- def query_gemma_openrouter(user_input):
 
 
 
20
  try:
21
- completion = openrouter_client.chat.completions.create(
22
- model="google/gemma-2-9b-it:free",
23
  messages=[{"role": "user", "content": user_input}]
24
  )
25
- return completion.choices[0].message.content
26
  except Exception as e:
27
- return f"Error querying Gemma-2-9B: {str(e)}"
28
 
29
- # Query OpenRouter (Llama-3.2-3B)
30
- def query_llama_openrouter(user_input):
31
  try:
32
- completion = openrouter_client.chat.completions.create(
33
- model="meta-llama/llama-3.2-3b-instruct:free",
34
  messages=[{"role": "user", "content": user_input}]
35
  )
36
- return completion.choices[0].message.content
37
  except Exception as e:
38
- return f"Error querying Llama-3.2-3B: {str(e)}"
39
 
40
- # Query OpenRouter (DeepSeek-R1)
41
- def query_deepseek_openrouter(user_input):
42
  try:
43
- completion = openrouter_client.chat.completions.create(
44
  model="deepseek/deepseek-r1:free",
45
  messages=[{"role": "user", "content": user_input}]
46
  )
47
- return completion.choices[0].message.content
48
  except Exception as e:
49
- return f"Error querying DeepSeek-R1: {str(e)}"
50
 
51
- # Function to refine responses using DeepSeek-R1
52
  def refine_response(user_input):
53
  try:
54
- # Get responses from all three models
55
- gemma_response = query_gemma_openrouter(user_input)
56
- llama_response = query_llama_openrouter(user_input)
57
- deepseek_response = query_deepseek_openrouter(user_input)
58
-
59
- # If any response is missing, return the available ones
60
- responses = {
61
- "Gemma": gemma_response.strip(),
62
- "Llama": llama_response.strip(),
63
- "DeepSeek-R1": deepseek_response.strip()
64
- }
65
- valid_responses = {k: v for k, v in responses.items() if v}
 
 
 
 
 
 
 
66
 
67
  if len(valid_responses) < 2:
68
  return "\n\n".join(f"{k} Response: {v}" for k, v in valid_responses.items())
@@ -71,47 +80,34 @@ def refine_response(user_input):
71
  improvement_prompt = f"""
72
  Here are three AI-generated responses:
73
 
74
- Response 1 (Gemma): {gemma_response}
75
- Response 2 (Llama 3.2): {llama_response}
76
- Response 3 (DeepSeek-R1): {deepseek_response}
77
 
78
  Please combine the best elements of all three, improve clarity, and provide a final refined answer.
79
  """
80
 
81
- # Retry loop for DeepSeek-R1 refinement
82
- max_retries = 3
83
- for attempt in range(max_retries):
84
- try:
85
- messages = [{"role": "user", "content": improvement_prompt}]
86
- refined_completion = openrouter_client.chat.completions.create(
87
- model="deepseek/deepseek-r1:free",
88
- messages=messages
89
- )
90
-
91
- refined_content = refined_completion.choices[0].message.content
92
-
93
- if refined_content.strip():
94
- return refined_content
95
- else:
96
- print("Received empty response from DeepSeek-R1, retrying...")
97
- time.sleep(2)
98
-
99
- except Exception as e:
100
- print(f"Error on attempt {attempt + 1}: {str(e)}")
101
- time.sleep(2)
102
-
103
- return f"Refinement failed. Here’s the best available response:\n\n{max(valid_responses.values(), key=len)}"
104
 
105
  except Exception as e:
106
- return f"Error refining response: {str(e)}"
107
 
108
  # Create Gradio interface
109
  iface = gr.Interface(
110
  fn=refine_response,
111
  inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
112
  outputs="text",
113
- title="Multi-Model AI Enhancer",
114
- description="Get responses from Gemma, Llama-3.2, and DeepSeek-R1. Then receive an improved final answer."
115
  )
116
 
117
  # Launch app
 
1
  import os
2
+ import threading
 
3
  import gradio as gr
4
  from openai import OpenAI
5
  from dotenv import load_dotenv
6
 
7
  # Load API keys from .env file
8
  load_dotenv()
9
+ API_KEY_LLAMA = os.getenv("OPENROUTER_API_KEY1") # Llama API Key
10
+ API_KEY_GEMMA = os.getenv("OPENROUTER_API_KEY2") # Gemma API Key
11
+ API_KEY_DEEPSEEK1 = os.getenv("OPENROUTER_API_KEY3") # DeepSeek First Query
12
+ API_KEY_DEEPSEEK2 = os.getenv("OPENROUTER_API_KEY4") # DeepSeek Final Refinement
13
+
14
+ # Create OpenAI Clients for each model
15
+ llama_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_LLAMA)
16
+ gemma_client = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_GEMMA)
17
+ deepseek_client1 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK1)
18
+ deepseek_client2 = OpenAI(base_url="https://openrouter.ai/api/v1", api_key=API_KEY_DEEPSEEK2)
19
+
20
+ # Function to query Llama model
21
+ def query_llama(user_input, results):
22
  try:
23
+ completion = llama_client.chat.completions.create(
24
+ model="meta-llama/llama-3.2-3b-instruct:free",
25
  messages=[{"role": "user", "content": user_input}]
26
  )
27
+ results["Llama"] = completion.choices[0].message.content
28
  except Exception as e:
29
+ results["Llama"] = f"Error: {str(e)}"
30
 
31
+ # Function to query Gemma model
32
+ def query_gemma(user_input, results):
33
  try:
34
+ completion = gemma_client.chat.completions.create(
35
+ model="google/gemma-2-9b-it:free",
36
  messages=[{"role": "user", "content": user_input}]
37
  )
38
+ results["Gemma"] = completion.choices[0].message.content
39
  except Exception as e:
40
+ results["Gemma"] = f"Error: {str(e)}"
41
 
42
+ # Function to query DeepSeek for additional context
43
+ def query_deepseek_1(user_input, results):
44
  try:
45
+ completion = deepseek_client1.chat.completions.create(
46
  model="deepseek/deepseek-r1:free",
47
  messages=[{"role": "user", "content": user_input}]
48
  )
49
+ results["DeepSeek1"] = completion.choices[0].message.content
50
  except Exception as e:
51
+ results["DeepSeek1"] = f"Error: {str(e)}"
52
 
53
+ # Function to refine responses using DeepSeek-R1 (Final API)
54
  def refine_response(user_input):
55
  try:
56
+ results = {}
57
+
58
+ # Create threads for parallel API calls
59
+ threads = [
60
+ threading.Thread(target=query_llama, args=(user_input, results)),
61
+ threading.Thread(target=query_gemma, args=(user_input, results)),
62
+ threading.Thread(target=query_deepseek_1, args=(user_input, results))
63
+ ]
64
+
65
+ # Start all threads
66
+ for thread in threads:
67
+ thread.start()
68
+
69
+ # Wait for all threads to complete
70
+ for thread in threads:
71
+ thread.join()
72
+
73
+ # Ensure all responses are received
74
+ valid_responses = {k: v.strip() for k, v in results.items() if v and "Error" not in v}
75
 
76
  if len(valid_responses) < 2:
77
  return "\n\n".join(f"{k} Response: {v}" for k, v in valid_responses.items())
 
80
  improvement_prompt = f"""
81
  Here are three AI-generated responses:
82
 
83
+ Response 1 (Llama): {results.get("Llama", "N/A")}
84
+ Response 2 (Gemma): {results.get("Gemma", "N/A")}
85
+ Response 3 (DeepSeek1): {results.get("DeepSeek1", "N/A")}
86
 
87
  Please combine the best elements of all three, improve clarity, and provide a final refined answer.
88
  """
89
 
90
+ # Query DeepSeek-R1 for refinement using API key 4
91
+ try:
92
+ refined_completion = deepseek_client2.chat.completions.create(
93
+ model="deepseek/deepseek-r1:free",
94
+ messages=[{"role": "user", "content": improvement_prompt}]
95
+ )
96
+ refined_content = refined_completion.choices[0].message.content
97
+ return refined_content if refined_content.strip() else "Refinement failed, returning best response."
98
+ except Exception as e:
99
+ return f"Error refining response: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
  except Exception as e:
102
+ return f"Unexpected error: {str(e)}"
103
 
104
  # Create Gradio interface
105
  iface = gr.Interface(
106
  fn=refine_response,
107
  inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
108
  outputs="text",
109
+ title="Multi-Model AI Enhancer (4 API Keys)",
110
+ description="Llama (API 1) + Gemma (API 2) + DeepSeek (API 3) Final Refinement with DeepSeek (API 4)"
111
  )
112
 
113
  # Launch app