Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,11 +12,8 @@ load_dotenv()
|
|
12 |
HF_API_KEY = os.getenv("HF_API_KEY")
|
13 |
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
|
14 |
|
15 |
-
# Initialize Hugging Face
|
16 |
-
hf_client = InferenceClient(
|
17 |
-
provider="hf-inference",
|
18 |
-
api_key=HF_API_KEY
|
19 |
-
)
|
20 |
|
21 |
# Initialize OpenRouter DeepSeek Client
|
22 |
openrouter_client = OpenAI(
|
@@ -24,20 +21,20 @@ openrouter_client = OpenAI(
|
|
24 |
api_key=OPENROUTER_API_KEY
|
25 |
)
|
26 |
|
27 |
-
#
|
28 |
-
def
|
29 |
try:
|
30 |
messages = [{"role": "user", "content": user_input}]
|
31 |
completion = hf_client.chat.completions.create(
|
32 |
-
model=
|
33 |
messages=messages,
|
34 |
max_tokens=500
|
35 |
)
|
36 |
return completion.choices[0].message["content"]
|
37 |
except Exception as e:
|
38 |
-
return f"Error querying
|
39 |
|
40 |
-
#
|
41 |
def query_deepseek(user_input):
|
42 |
try:
|
43 |
completion = openrouter_client.chat.completions.create(
|
@@ -51,24 +48,31 @@ def query_deepseek(user_input):
|
|
51 |
# Function to refine responses using DeepSeek
|
52 |
def refine_response(user_input):
|
53 |
try:
|
54 |
-
# Get responses from
|
55 |
-
gemma_response =
|
|
|
56 |
deepseek_response = query_deepseek(user_input)
|
57 |
|
58 |
-
# If
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
# Prepare refinement prompt
|
65 |
improvement_prompt = f"""
|
66 |
-
Here are
|
67 |
|
68 |
Response 1 (Gemma): {gemma_response}
|
69 |
-
Response 2 (
|
|
|
70 |
|
71 |
-
Please combine the best elements of
|
72 |
"""
|
73 |
|
74 |
# Retry loop for OpenRouter API
|
@@ -85,31 +89,24 @@ def refine_response(user_input):
|
|
85 |
"model": "deepseek/deepseek-r1:free",
|
86 |
"messages": [{"role": "user", "content": improvement_prompt}]
|
87 |
},
|
88 |
-
timeout=30
|
89 |
)
|
90 |
|
91 |
-
# Debugging: Print response
|
92 |
print(f"Attempt {attempt + 1}: OpenRouter Response:", response.text)
|
93 |
-
|
94 |
-
# Check if the response is valid JSON
|
95 |
response_json = response.json()
|
96 |
-
|
97 |
-
# Extract the refined response
|
98 |
refined_content = response_json["choices"][0]["message"]["content"]
|
99 |
|
100 |
-
# If DeepSeek gives an empty response, retry
|
101 |
if refined_content.strip():
|
102 |
return refined_content
|
103 |
else:
|
104 |
print("Received empty response from DeepSeek, retrying...")
|
105 |
-
time.sleep(2)
|
106 |
|
107 |
except Exception as e:
|
108 |
print(f"Error on attempt {attempt + 1}: {str(e)}")
|
109 |
-
time.sleep(2)
|
110 |
|
111 |
-
|
112 |
-
return f"Refinement failed. Here’s the best available response:\n\n{max(gemma_response, deepseek_response, key=len)}"
|
113 |
|
114 |
except Exception as e:
|
115 |
return f"Error refining response: {str(e)}"
|
@@ -119,8 +116,8 @@ iface = gr.Interface(
|
|
119 |
fn=refine_response,
|
120 |
inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
|
121 |
outputs="text",
|
122 |
-
title="AI
|
123 |
-
description="Get responses from
|
124 |
)
|
125 |
|
126 |
# Launch app
|
|
|
12 |
HF_API_KEY = os.getenv("HF_API_KEY")
|
13 |
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
|
14 |
|
15 |
+
# Initialize Hugging Face Clients
|
16 |
+
hf_client = InferenceClient(provider="hf-inference", api_key=HF_API_KEY)
|
|
|
|
|
|
|
17 |
|
18 |
# Initialize OpenRouter DeepSeek Client
|
19 |
openrouter_client = OpenAI(
|
|
|
21 |
api_key=OPENROUTER_API_KEY
|
22 |
)
|
23 |
|
24 |
+
# Query Hugging Face Models
|
25 |
+
def query_huggingface_model(user_input, model_name):
|
26 |
try:
|
27 |
messages = [{"role": "user", "content": user_input}]
|
28 |
completion = hf_client.chat.completions.create(
|
29 |
+
model=model_name,
|
30 |
messages=messages,
|
31 |
max_tokens=500
|
32 |
)
|
33 |
return completion.choices[0].message["content"]
|
34 |
except Exception as e:
|
35 |
+
return f"Error querying {model_name}: {str(e)}"
|
36 |
|
37 |
+
# Query DeepSeek-R1 (OpenRouter)
|
38 |
def query_deepseek(user_input):
|
39 |
try:
|
40 |
completion = openrouter_client.chat.completions.create(
|
|
|
48 |
# Function to refine responses using DeepSeek
|
49 |
def refine_response(user_input):
|
50 |
try:
|
51 |
+
# Get responses from all three models
|
52 |
+
gemma_response = query_huggingface_model(user_input, "google/gemma-2-27b-it")
|
53 |
+
llama_response = query_huggingface_model(user_input, "meta-llama/Llama-3.3-70B-Instruct")
|
54 |
deepseek_response = query_deepseek(user_input)
|
55 |
|
56 |
+
# If any response is missing, return the available ones
|
57 |
+
responses = {
|
58 |
+
"Gemma": gemma_response.strip(),
|
59 |
+
"Llama": llama_response.strip(),
|
60 |
+
"DeepSeek": deepseek_response.strip()
|
61 |
+
}
|
62 |
+
valid_responses = {k: v for k, v in responses.items() if v}
|
63 |
+
|
64 |
+
if len(valid_responses) < 2:
|
65 |
+
return "\n\n".join(f"{k} Response: {v}" for k, v in valid_responses.items())
|
66 |
|
67 |
# Prepare refinement prompt
|
68 |
improvement_prompt = f"""
|
69 |
+
Here are three AI-generated responses:
|
70 |
|
71 |
Response 1 (Gemma): {gemma_response}
|
72 |
+
Response 2 (Llama 3.3): {llama_response}
|
73 |
+
Response 3 (DeepSeek): {deepseek_response}
|
74 |
|
75 |
+
Please combine the best elements of all three, improve clarity, and provide a final refined answer.
|
76 |
"""
|
77 |
|
78 |
# Retry loop for OpenRouter API
|
|
|
89 |
"model": "deepseek/deepseek-r1:free",
|
90 |
"messages": [{"role": "user", "content": improvement_prompt}]
|
91 |
},
|
92 |
+
timeout=30
|
93 |
)
|
94 |
|
|
|
95 |
print(f"Attempt {attempt + 1}: OpenRouter Response:", response.text)
|
|
|
|
|
96 |
response_json = response.json()
|
|
|
|
|
97 |
refined_content = response_json["choices"][0]["message"]["content"]
|
98 |
|
|
|
99 |
if refined_content.strip():
|
100 |
return refined_content
|
101 |
else:
|
102 |
print("Received empty response from DeepSeek, retrying...")
|
103 |
+
time.sleep(2)
|
104 |
|
105 |
except Exception as e:
|
106 |
print(f"Error on attempt {attempt + 1}: {str(e)}")
|
107 |
+
time.sleep(2)
|
108 |
|
109 |
+
return f"Refinement failed. Here’s the best available response:\n\n{max(valid_responses.values(), key=len)}"
|
|
|
110 |
|
111 |
except Exception as e:
|
112 |
return f"Error refining response: {str(e)}"
|
|
|
116 |
fn=refine_response,
|
117 |
inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
|
118 |
outputs="text",
|
119 |
+
title="Multi-Model AI Enhancer",
|
120 |
+
description="Get responses from Gemma, Llama 3.3, and DeepSeek. Then receive an improved final answer."
|
121 |
)
|
122 |
|
123 |
# Launch app
|