Athspi-promax / app.py
Athspi's picture
Update app.py
e96884e verified
raw
history blame
3.37 kB
import os
import json
import httpx
import gradio as gr
from huggingface_hub import InferenceClient
from openai import OpenAI
from dotenv import load_dotenv
# Load API keys from .env file
load_dotenv()
HF_API_KEY = os.getenv("HF_API_KEY")
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
# Initialize Hugging Face Gemma Client
hf_client = InferenceClient(
provider="hf-inference",
api_key=HF_API_KEY
)
# Initialize OpenRouter DeepSeek Client
openrouter_client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=OPENROUTER_API_KEY
)
# Function to query Gemma-2-27B (Hugging Face)
def query_gemma(user_input):
try:
messages = [{"role": "user", "content": user_input}]
completion = hf_client.chat.completions.create(
model="google/gemma-2-27b-it",
messages=messages,
max_tokens=500
)
return completion.choices[0].message["content"]
except Exception as e:
return f"Error querying Gemma: {str(e)}"
# Function to query DeepSeek-R1 (OpenRouter)
def query_deepseek(user_input):
try:
completion = openrouter_client.chat.completions.create(
model="deepseek/deepseek-r1:free",
messages=[{"role": "user", "content": user_input}]
)
return completion.choices[0].message.content
except Exception as e:
return f"Error querying DeepSeek: {str(e)}"
# Function to refine responses using DeepSeek
def refine_response(user_input):
try:
# Get responses from both models
gemma_response = query_gemma(user_input)
deepseek_response = query_deepseek(user_input)
# If either response failed, return the available one
if "Error" in gemma_response:
return f"Only DeepSeek Response:\n{deepseek_response}"
if "Error" in deepseek_response:
return f"Only Gemma Response:\n{gemma_response}"
# Prepare refinement prompt
improvement_prompt = f"""
Here are two AI-generated responses:
Response 1 (Gemma): {gemma_response}
Response 2 (DeepSeek): {deepseek_response}
Please combine the best elements of both, improve clarity, and provide a final refined answer.
"""
# Send request to OpenRouter
response = httpx.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"Content-Type": "application/json"
},
json={
"model": "deepseek/deepseek-r1:free",
"messages": [{"role": "user", "content": improvement_prompt}]
}
)
# Print raw response for debugging
print("OpenRouter Response:", response.text)
# Check if response is valid JSON
response_json = response.json()
return response_json["choices"][0]["message"]["content"]
except Exception as e:
return f"Error refining response: {str(e)}"
# Create Gradio interface
iface = gr.Interface(
fn=refine_response,
inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
outputs="text",
title="AI Response Enhancer",
description="Get responses from both Gemma and DeepSeek, then receive an improved final answer."
)
# Launch app
iface.launch()