Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
import os
|
|
|
|
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
4 |
from openai import OpenAI
|
@@ -21,46 +23,76 @@ openrouter_client = OpenAI(
|
|
21 |
api_key=OPENROUTER_API_KEY
|
22 |
)
|
23 |
|
24 |
-
# Function to query Gemma
|
25 |
def query_gemma(user_input):
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
33 |
|
34 |
-
# Function to query DeepSeek
|
35 |
def query_deepseek(user_input):
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
41 |
|
42 |
-
# Function to refine
|
43 |
def refine_response(user_input):
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
51 |
|
52 |
-
|
53 |
-
|
|
|
54 |
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
# Create Gradio interface
|
66 |
iface = gr.Interface(
|
|
|
1 |
import os
|
2 |
+
import json
|
3 |
+
import httpx
|
4 |
import gradio as gr
|
5 |
from huggingface_hub import InferenceClient
|
6 |
from openai import OpenAI
|
|
|
23 |
api_key=OPENROUTER_API_KEY
|
24 |
)
|
25 |
|
26 |
+
# Function to query Gemma-2-27B (Hugging Face)
|
27 |
def query_gemma(user_input):
|
28 |
+
try:
|
29 |
+
messages = [{"role": "user", "content": user_input}]
|
30 |
+
completion = hf_client.chat.completions.create(
|
31 |
+
model="google/gemma-2-27b-it",
|
32 |
+
messages=messages,
|
33 |
+
max_tokens=500
|
34 |
+
)
|
35 |
+
return completion.choices[0].message["content"]
|
36 |
+
except Exception as e:
|
37 |
+
return f"Error querying Gemma: {str(e)}"
|
38 |
|
39 |
+
# Function to query DeepSeek-R1 (OpenRouter)
|
40 |
def query_deepseek(user_input):
|
41 |
+
try:
|
42 |
+
completion = openrouter_client.chat.completions.create(
|
43 |
+
model="deepseek/deepseek-r1:free",
|
44 |
+
messages=[{"role": "user", "content": user_input}]
|
45 |
+
)
|
46 |
+
return completion.choices[0].message.content
|
47 |
+
except Exception as e:
|
48 |
+
return f"Error querying DeepSeek: {str(e)}"
|
49 |
|
50 |
+
# Function to refine responses using DeepSeek
|
51 |
def refine_response(user_input):
|
52 |
+
try:
|
53 |
+
# Get responses from both models
|
54 |
+
gemma_response = query_gemma(user_input)
|
55 |
+
deepseek_response = query_deepseek(user_input)
|
56 |
|
57 |
+
# If either response failed, return the available one
|
58 |
+
if "Error" in gemma_response:
|
59 |
+
return f"Only DeepSeek Response:\n{deepseek_response}"
|
60 |
+
if "Error" in deepseek_response:
|
61 |
+
return f"Only Gemma Response:\n{gemma_response}"
|
62 |
|
63 |
+
# Prepare refinement prompt
|
64 |
+
improvement_prompt = f"""
|
65 |
+
Here are two AI-generated responses:
|
66 |
|
67 |
+
Response 1 (Gemma): {gemma_response}
|
68 |
+
Response 2 (DeepSeek): {deepseek_response}
|
69 |
+
|
70 |
+
Please combine the best elements of both, improve clarity, and provide a final refined answer.
|
71 |
+
"""
|
72 |
+
|
73 |
+
# Send request to OpenRouter
|
74 |
+
response = httpx.post(
|
75 |
+
"https://openrouter.ai/api/v1/chat/completions",
|
76 |
+
headers={
|
77 |
+
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
|
78 |
+
"Content-Type": "application/json"
|
79 |
+
},
|
80 |
+
json={
|
81 |
+
"model": "deepseek/deepseek-r1:free",
|
82 |
+
"messages": [{"role": "user", "content": improvement_prompt}]
|
83 |
+
}
|
84 |
+
)
|
85 |
+
|
86 |
+
# Print raw response for debugging
|
87 |
+
print("OpenRouter Response:", response.text)
|
88 |
+
|
89 |
+
# Check if response is valid JSON
|
90 |
+
response_json = response.json()
|
91 |
+
|
92 |
+
return response_json["choices"][0]["message"]["content"]
|
93 |
+
|
94 |
+
except Exception as e:
|
95 |
+
return f"Error refining response: {str(e)}"
|
96 |
|
97 |
# Create Gradio interface
|
98 |
iface = gr.Interface(
|