Spaces:
Sleeping
Sleeping
Ganesh Chintalapati
commited on
Commit
·
76b6f27
1
Parent(s):
bc2875c
Convert to Gradiog
Browse files- Procfile.txt +1 -0
- app.py +44 -44
- requirements.txt +4 -0
Procfile.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
web: uvicorn app:app --host 0.0.0.0 --port $PORT
|
app.py
CHANGED
@@ -1,55 +1,25 @@
|
|
1 |
import os
|
2 |
import logging
|
3 |
-
from fastapi import FastAPI, Request
|
4 |
-
from pydantic import BaseModel
|
5 |
-
from fastapi.responses import JSONResponse
|
6 |
import httpx
|
7 |
from dotenv import load_dotenv
|
|
|
8 |
|
9 |
# Configure logging
|
10 |
logging.basicConfig(level=logging.INFO)
|
11 |
logger = logging.getLogger(__name__)
|
12 |
|
13 |
-
# Load environment variables
|
14 |
load_dotenv()
|
15 |
logger.info("Environment variables loaded from .env file")
|
16 |
logger.info(f"OPENAI_API_KEY present: {'OPENAI_API_KEY' in os.environ}")
|
17 |
logger.info(f"ANTHROPIC_API_KEY present: {'ANTHROPIC_API_KEY' in os.environ}")
|
18 |
logger.info(f"GEMINI_API_KEY present: {'GEMINI_API_KEY' in os.environ}")
|
19 |
|
20 |
-
app = FastAPI()
|
21 |
-
|
22 |
-
class QueryRequest(BaseModel):
|
23 |
-
query: str
|
24 |
-
provider: str # e.g., "openai", "anthropic", "gemini"
|
25 |
-
|
26 |
-
@app.get("/")
|
27 |
-
def read_root():
|
28 |
-
return {"message": "Multi-Model Selector is running"}
|
29 |
-
|
30 |
-
@app.post("/ask")
|
31 |
-
async def ask_question(request: QueryRequest):
|
32 |
-
query = request.query
|
33 |
-
provider = request.provider.lower()
|
34 |
-
|
35 |
-
try:
|
36 |
-
if provider == "openai":
|
37 |
-
return await ask_openai(query)
|
38 |
-
elif provider == "anthropic":
|
39 |
-
return await ask_anthropic(query)
|
40 |
-
elif provider == "gemini":
|
41 |
-
return await ask_gemini(query)
|
42 |
-
else:
|
43 |
-
return JSONResponse(content={"error": f"Unknown provider: {provider}"}, status_code=400)
|
44 |
-
except Exception as e:
|
45 |
-
logger.error(f"Error processing request: {str(e)}")
|
46 |
-
return JSONResponse(content={"error": str(e)}, status_code=500)
|
47 |
-
|
48 |
async def ask_openai(query: str):
|
49 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
50 |
if not openai_api_key:
|
51 |
logger.error("OpenAI API key not provided")
|
52 |
-
return
|
53 |
|
54 |
headers = {
|
55 |
"Authorization": f"Bearer {openai_api_key}",
|
@@ -67,20 +37,20 @@ async def ask_openai(query: str):
|
|
67 |
|
68 |
response.raise_for_status()
|
69 |
answer = response.json()['choices'][0]['message']['content']
|
70 |
-
return
|
71 |
|
72 |
except httpx.HTTPStatusError as e:
|
73 |
logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
74 |
-
return
|
75 |
except Exception as e:
|
76 |
logger.error(f"OpenAI Error: {str(e)}")
|
77 |
-
return
|
78 |
|
79 |
async def ask_anthropic(query: str):
|
80 |
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
81 |
if not anthropic_api_key:
|
82 |
logger.error("Anthropic API key not provided")
|
83 |
-
return
|
84 |
|
85 |
headers = {
|
86 |
"x-api-key": anthropic_api_key,
|
@@ -102,20 +72,20 @@ async def ask_anthropic(query: str):
|
|
102 |
response.raise_for_status()
|
103 |
logger.info(f"Anthropic response: {response.json()}")
|
104 |
answer = response.json()['content'][0]['text']
|
105 |
-
return
|
106 |
|
107 |
except httpx.HTTPStatusError as e:
|
108 |
logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
109 |
-
return
|
110 |
except Exception as e:
|
111 |
logger.error(f"Anthropic Error: {str(e)}")
|
112 |
-
return
|
113 |
|
114 |
async def ask_gemini(query: str):
|
115 |
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
116 |
if not gemini_api_key:
|
117 |
logger.error("Gemini API key not provided")
|
118 |
-
return
|
119 |
|
120 |
headers = {
|
121 |
"Content-Type": "application/json"
|
@@ -135,11 +105,41 @@ async def ask_gemini(query: str):
|
|
135 |
|
136 |
response.raise_for_status()
|
137 |
answer = response.json()['candidates'][0]['content']['parts'][0]['text']
|
138 |
-
return
|
139 |
|
140 |
except httpx.HTTPStatusError as e:
|
141 |
logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
142 |
-
return
|
143 |
except Exception as e:
|
144 |
logger.error(f"Gemini Error: {str(e)}")
|
145 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import logging
|
|
|
|
|
|
|
3 |
import httpx
|
4 |
from dotenv import load_dotenv
|
5 |
+
import gradio as gr
|
6 |
|
7 |
# Configure logging
|
8 |
logging.basicConfig(level=logging.INFO)
|
9 |
logger = logging.getLogger(__name__)
|
10 |
|
11 |
+
# Load environment variables
|
12 |
load_dotenv()
|
13 |
logger.info("Environment variables loaded from .env file")
|
14 |
logger.info(f"OPENAI_API_KEY present: {'OPENAI_API_KEY' in os.environ}")
|
15 |
logger.info(f"ANTHROPIC_API_KEY present: {'ANTHROPIC_API_KEY' in os.environ}")
|
16 |
logger.info(f"GEMINI_API_KEY present: {'GEMINI_API_KEY' in os.environ}")
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
async def ask_openai(query: str):
|
19 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
20 |
if not openai_api_key:
|
21 |
logger.error("OpenAI API key not provided")
|
22 |
+
return "Error: OpenAI API key not provided."
|
23 |
|
24 |
headers = {
|
25 |
"Authorization": f"Bearer {openai_api_key}",
|
|
|
37 |
|
38 |
response.raise_for_status()
|
39 |
answer = response.json()['choices'][0]['message']['content']
|
40 |
+
return answer
|
41 |
|
42 |
except httpx.HTTPStatusError as e:
|
43 |
logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
44 |
+
return f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}"
|
45 |
except Exception as e:
|
46 |
logger.error(f"OpenAI Error: {str(e)}")
|
47 |
+
return f"Error: OpenAI Error: {str(e)}"
|
48 |
|
49 |
async def ask_anthropic(query: str):
|
50 |
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
51 |
if not anthropic_api_key:
|
52 |
logger.error("Anthropic API key not provided")
|
53 |
+
return "Error: Anthropic API key not provided."
|
54 |
|
55 |
headers = {
|
56 |
"x-api-key": anthropic_api_key,
|
|
|
72 |
response.raise_for_status()
|
73 |
logger.info(f"Anthropic response: {response.json()}")
|
74 |
answer = response.json()['content'][0]['text']
|
75 |
+
return answer
|
76 |
|
77 |
except httpx.HTTPStatusError as e:
|
78 |
logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
79 |
+
return f"Error: Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}"
|
80 |
except Exception as e:
|
81 |
logger.error(f"Anthropic Error: {str(e)}")
|
82 |
+
return f"Error: Anthropic Error: {str(e)}"
|
83 |
|
84 |
async def ask_gemini(query: str):
|
85 |
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
86 |
if not gemini_api_key:
|
87 |
logger.error("Gemini API key not provided")
|
88 |
+
return "Error: Gemini API key not provided."
|
89 |
|
90 |
headers = {
|
91 |
"Content-Type": "application/json"
|
|
|
105 |
|
106 |
response.raise_for_status()
|
107 |
answer = response.json()['candidates'][0]['content']['parts'][0]['text']
|
108 |
+
return answer
|
109 |
|
110 |
except httpx.HTTPStatusError as e:
|
111 |
logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
112 |
+
return f"Error: Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}"
|
113 |
except Exception as e:
|
114 |
logger.error(f"Gemini Error: {str(e)}")
|
115 |
+
return f"Error: Gemini Error: {str(e)}"
|
116 |
+
|
117 |
+
async def query_model(query: str, provider: str):
|
118 |
+
provider = provider.lower()
|
119 |
+
if provider == "openai":
|
120 |
+
return await ask_openai(query)
|
121 |
+
elif provider == "anthropic":
|
122 |
+
return await ask_anthropic(query)
|
123 |
+
elif provider == "gemini":
|
124 |
+
return await ask_gemini(query)
|
125 |
+
else:
|
126 |
+
return f"Error: Unknown provider: {provider}"
|
127 |
+
|
128 |
+
# Gradio interface
|
129 |
+
with gr.Blocks() as demo:
|
130 |
+
gr.Markdown("# Multi-Model Selector")
|
131 |
+
gr.Markdown("Select a provider and enter a query to get a response from the chosen AI model.")
|
132 |
+
|
133 |
+
provider = gr.Dropdown(choices=["OpenAI", "Anthropic", "Gemini"], label="Select Provider")
|
134 |
+
query = gr.Textbox(label="Enter your query", placeholder="e.g., What is the capital of the United States?")
|
135 |
+
submit_button = gr.Button("Submit")
|
136 |
+
output = gr.Textbox(label="Response", interactive=False)
|
137 |
+
|
138 |
+
submit_button.click(
|
139 |
+
fn=query_model,
|
140 |
+
inputs=[query, provider],
|
141 |
+
outputs=output
|
142 |
+
)
|
143 |
+
|
144 |
+
# Launch the Gradio app
|
145 |
+
demo.launch()
|
requirements.txt
CHANGED
@@ -5,3 +5,7 @@ google-generativeai
|
|
5 |
anthropic
|
6 |
python-dotenv
|
7 |
requests
|
|
|
|
|
|
|
|
|
|
5 |
anthropic
|
6 |
python-dotenv
|
7 |
requests
|
8 |
+
gradio
|
9 |
+
httpx
|
10 |
+
|
11 |
+
|