Ganesh Chintalapati commited on
Commit
56c26c5
·
1 Parent(s): c406c1a
Files changed (2) hide show
  1. app.py +39 -157
  2. requirements.txt +7 -6
app.py CHANGED
@@ -1,169 +1,51 @@
1
- import gradio as gr
2
- import openai
3
- import anthropic
4
- import google.generativeai as genai
5
- import os
6
- import asyncio
7
- import logging
8
- from dotenv import load_dotenv
9
 
10
- # Set up logging to diagnose initialization issues
11
- logging.basicConfig(level=logging.INFO)
12
- logger = logging.getLogger(__name__)
13
 
14
- # Load environment variables
15
- load_dotenv()
16
- try:
17
- openai.api_key = os.getenv("OPENAI_API_KEY")
18
- anthropic_client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
19
- genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
20
- logger.info("Environment variables loaded successfully")
21
- except Exception as e:
22
- logger.error(f"Error loading environment variables: {str(e)}")
23
- raise
24
-
25
- # Initialize conversation history
26
- def initialize_chat():
27
- return [{"role": "system", "content": "You are a helpful assistant."}]
28
-
29
- # Async functions for API calls
30
- async def get_openai_response(messages):
31
- try:
32
- response = await asyncio.get_event_loop().run_in_executor(
33
- None,
34
- lambda: openai.ChatCompletion.create(
35
- model="gpt-3.5-turbo",
36
- messages=messages,
37
- temperature=0.7,
38
- max_tokens=1000
39
- )
40
- )
41
- return "ChatGPT (OpenAI)", response.choices[0].message["content"]
42
- except Exception as e:
43
- logger.error(f"OpenAI error: {str(e)}")
44
- return "ChatGPT (OpenAI)", f"Error: {str(e)}"
45
 
46
- async def get_claude_response(messages):
47
- try:
48
- user_message = messages[-1]["content"]
49
- response = await asyncio.get_event_loop().run_in_executor(
50
- None,
51
- lambda: anthropic_client.messages.create(
52
- model="claude-3-5-sonnet-20241022",
53
- max_tokens=1000,
54
- temperature=0.7,
55
- messages=[{"role": "user", "content": user_message}]
56
- )
57
- )
58
- return "Claude (Anthropic)", response.content[0].text
59
- except Exception as e:
60
- logger.error(f"Claude error: {str(e)}")
61
- return "Claude (Anthropic)", f"Error: {str(e)}"
62
 
63
- async def get_gemini_response(messages):
64
- try:
65
- model = genai.GenerativeModel("gemini-1.5-pro")
66
- user_message = messages[-1]["content"]
67
- response = await asyncio.get_event_loop().run_in_executor(
68
- None,
69
- lambda: model.generate_content(
70
- user_message,
71
- generation_config={"max_output_tokens": 1000, "temperature": 0.7}
72
- )
73
- )
74
- return "Gemini (Google)", response.text
75
- except Exception as e:
76
- logger.error(f"Gemini error: {str(e)}")
77
- return "Gemini (Google)", f"Error: {str(e)}"
78
 
79
- # Main async function to query selected models
80
- async def query_selected_models(message, history, use_openai, use_claude, use_gemini):
81
- if not any([use_openai, use_claude, use_gemini]):
82
- return "Please select at least one model.", history
83
-
84
- # Initialize or retrieve conversation history
85
- if not history:
86
- messages = initialize_chat()
87
  else:
88
- messages = initialize_chat() + [
89
- {"role": "user" if i % 2 == 0 else "assistant", "content": msg[0] if i % 2 == 0 else msg[1]}
90
- for i, msg in enumerate(history)
91
- ]
92
-
93
- # Append new user message
94
- messages.append({"role": "user", "content": message})
95
-
96
- # Create tasks for selected models
97
- tasks = []
98
- if use_openai:
99
- tasks.append(get_openai_response(messages))
100
- if use_claude:
101
- tasks.append(get_claude_response(messages))
102
- if use_gemini:
103
- tasks.append(get_gemini_response(messages))
104
-
105
- # Run selected API calls concurrently
106
- try:
107
- responses = await asyncio.gather(*tasks, return_exceptions=True)
108
- except Exception as e:
109
- logger.error(f"Error in asyncio.gather: {str(e)}")
110
- return f"Error querying models: {str(e)}", history
111
 
112
- # Format responses
113
- response_text = ""
114
- for model_name, response in responses:
115
- response_text += f"**{model_name}**:\n{response}\n\n"
116
 
117
- # Update history
118
- history.append((message, response_text.strip()))
119
- logger.info("Query processed successfully")
 
120
 
121
- return "", history
 
 
 
122
 
123
- # Gradio interface
124
  try:
125
- with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Model AI Selector") as demo:
126
- gr.Markdown(
127
- """
128
- # Multi-Model AI Chat Interface
129
- Select one or more models to query and enter your question below. Responses will appear in the chat window.
130
- """
131
- )
132
-
133
- # Model selection checkboxes
134
- with gr.Row():
135
- use_openai = gr.Checkbox(label="ChatGPT (OpenAI)", value=True)
136
- use_claude = gr.Checkbox(label="Claude (Anthropic)", value=True)
137
- use_gemini = gr.Checkbox(label="Gemini (Google)", value=True)
138
-
139
- # Chat interface
140
- chatbot = gr.Chatbot(label="Conversation", height=400)
141
- msg = gr.Textbox(placeholder="Type your query...", label="Your Query")
142
- with gr.Row():
143
- submit = gr.Button("Submit Query")
144
- clear = gr.Button("Clear Chat")
145
-
146
- # Bind query function to submit button and textbox (Enter key)
147
- submit.click(
148
- fn=query_selected_models,
149
- inputs=[msg, chatbot, use_openai, use_claude, use_gemini],
150
- outputs=[msg, chatbot]
151
- )
152
- msg.submit(
153
- fn=query_selected_models,
154
- inputs=[msg, chatbot, use_openai, use_claude, use_gemini],
155
- outputs=[msg, chatbot]
156
- )
157
-
158
- # Clear chat history
159
- clear.click(
160
- fn=lambda: (None, []),
161
- inputs=None,
162
- outputs=[msg, chatbot]
163
- )
164
- logger.info("Gradio interface initialized successfully")
165
  except Exception as e:
166
- logger.error(f"Error initializing Gradio interface: {str(e)}")
167
- raise
168
-
169
- # Do not include demo.launch() for Hugging Face Spaces
 
1
+ python import os from fastapi import FastAPI, Request from pydantic import BaseModel from fastapi.responses import JSONResponse import httpx # using httpx for the OpenAI API request
 
 
 
 
 
 
 
2
 
3
+ app = FastAPI()
 
 
4
 
5
+ class QueryRequest(BaseModel): query: str provider: str # e.g., "openai", "anthropic", "gemini"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ @app.get("/") def read_root(): return {"message": "Multi-Model Selector is running"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ @app.post("/ask") async def ask_question(request: QueryRequest): query = request.query provider = request.provider.lower()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ try:
12
+ if provider == "openai":
13
+ return await ask_openai(query)
14
+ elif provider == "anthropic":
15
+ return {"error": "Anthropic support not implemented yet"}
16
+ elif provider == "gemini":
17
+ return {"error": "Gemini support not implemented yet"}
 
18
  else:
19
+ return JSONResponse(content={"error": f"Unknown provider: {provider}"}, status_code=400)
20
+ except Exception as e:
21
+ return JSONResponse(content={"error": str(e)}, status_code=500)
22
+ async def ask_openai(query: str): # Define the OpenAI API key openai_api_key = os.getenv("OPENAI_API_KEY")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ if not openai_api_key:
25
+ return {"error": "API key not provided."}
 
 
26
 
27
+ headers = {
28
+ "Authorization": f"Bearer {openai_api_key}",
29
+ "Content-Type": "application/json"
30
+ }
31
 
32
+ payload = {
33
+ "model": "gpt-3.5-turbo",
34
+ "messages": [{"role": "user", "content": query}]
35
+ }
36
 
 
37
  try:
38
+ async with httpx.AsyncClient() as client:
39
+ response = await client.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
40
+
41
+ # Check for success status
42
+ response.raise_for_status()
43
+
44
+ # Extract the answer
45
+ answer = response.json()['choices'][0]['message']['content']
46
+ return {"response": answer}
47
+
48
+ except httpx.HTTPStatusError as e:
49
+ return {"error": f"HTTP Status Error: {e.response.status_code}, {e.response.text}"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  except Exception as e:
51
+ return {"error": f"An error occurred: {str(e)}"}
 
 
 
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
- gradio==4.42.0
2
- openai==1.51.0
3
- anthropic==0.37.1
4
- google-generativeai==0.8.3
5
- python-dotenv==1.0.1
6
- httpx==0.27.2
 
 
1
+ fastapi
2
+ uvicorn
3
+ openai
4
+ google-generativeai
5
+ anthropic
6
+ python-dotenv
7
+ requests