Spaces:
Sleeping
Sleeping
import os | |
import logging | |
import httpx | |
from dotenv import load_dotenv | |
import gradio as gr | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# Load environment variables | |
load_dotenv() | |
logger.info("Environment variables loaded from .env file") | |
logger.info(f"OPENAI_API_KEY present: {'OPENAI_API_KEY' in os.environ}") | |
logger.info(f"ANTHROPIC_API_KEY present: {'ANTHROPIC_API_KEY' in os.environ}") | |
logger.info(f"GEMINI_API_KEY present: {'GEMINI_API_KEY' in os.environ}") | |
async def ask_openai(query: str): | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
if not openai_api_key: | |
logger.error("OpenAI API key not provided") | |
return "Error: OpenAI API key not provided." | |
headers = { | |
"Authorization": f"Bearer {openai_api_key}", | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"model": "gpt-3.5-turbo", | |
"messages": [{"role": "user", "content": query}] | |
} | |
try: | |
async with httpx.AsyncClient() as client: | |
response = await client.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) | |
response.raise_for_status() | |
answer = response.json()['choices'][0]['message']['content'] | |
return answer | |
except httpx.HTTPStatusError as e: | |
logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}") | |
return f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}" | |
except Exception as e: | |
logger.error(f"OpenAI Error: {str(e)}") | |
return f"Error: OpenAI Error: {str(e)}" | |
async def ask_anthropic(query: str): | |
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") | |
if not anthropic_api_key: | |
logger.error("Anthropic API key not provided") | |
return "Error: Anthropic API key not provided." | |
headers = { | |
"x-api-key": anthropic_api_key, | |
"anthropic-version": "2023-06-01", | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"model": "claude-3-5-sonnet-20241022", | |
"max_tokens": 1024, | |
"messages": [{"role": "user", "content": query}] | |
} | |
try: | |
async with httpx.AsyncClient() as client: | |
logger.info(f"Sending Anthropic request: {payload}") | |
response = await client.post("https://api.anthropic.com/v1/messages", headers=headers, json=payload) | |
response.raise_for_status() | |
logger.info(f"Anthropic response: {response.json()}") | |
answer = response.json()['content'][0]['text'] | |
return answer | |
except httpx.HTTPStatusError as e: | |
logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}") | |
return f"Error: Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}" | |
except Exception as e: | |
logger.error(f"Anthropic Error: {str(e)}") | |
return f"Error: Anthropic Error: {str(e)}" | |
async def ask_gemini(query: str): | |
gemini_api_key = os.getenv("GEMINI_API_KEY") | |
if not gemini_api_key: | |
logger.error("Gemini API key not provided") | |
return "Error: Gemini API key not provided." | |
headers = { | |
"Content-Type": "application/json" | |
} | |
payload = { | |
"contents": [{"parts": [{"text": query}]}] | |
} | |
try: | |
async with httpx.AsyncClient() as client: | |
response = await client.post( | |
f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key={gemini_api_key}", | |
headers=headers, | |
json=payload | |
) | |
response.raise_for_status() | |
answer = response.json()['candidates'][0]['content']['parts'][0]['text'] | |
return answer | |
except httpx.HTTPStatusError as e: | |
logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}") | |
return f"Error: Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}" | |
except Exception as e: | |
logger.error(f"Gemini Error: {str(e)}") | |
return f"Error: Gemini Error: {str(e)}" | |
async def query_model(query: str, provider: str): | |
provider = provider.lower() | |
if provider == "openai": | |
return await ask_openai(query) | |
elif provider == "anthropic": | |
return await ask_anthropic(query) | |
elif provider == "gemini": | |
return await ask_gemini(query) | |
else: | |
return f"Error: Unknown provider: {provider}" | |
# Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# Multi-Model Selector") | |
gr.Markdown("Select a provider and enter a query to get a response from the chosen AI model.") | |
provider = gr.Dropdown(choices=["OpenAI", "Anthropic", "Gemini"], label="Select Provider") | |
query = gr.Textbox(label="Enter your query", placeholder="e.g., What is the capital of the United States?") | |
submit_button = gr.Button("Submit") | |
output = gr.Textbox(label="Response", interactive=False) | |
submit_button.click( | |
fn=query_model, | |
inputs=[query, provider], | |
outputs=output | |
) | |
# Launch the Gradio app | |
demo.launch() |