Spaces:
Sleeping
Sleeping
Ganesh Chintalapati
commited on
Commit
·
7a83934
1
Parent(s):
89ac1a2
OpenAI and Gemini works
Browse files
app.py
CHANGED
@@ -24,7 +24,7 @@ async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerato
|
|
24 |
yield "Error: OpenAI API key not provided."
|
25 |
return
|
26 |
|
27 |
-
# Build message history
|
28 |
messages = []
|
29 |
for msg in history:
|
30 |
messages.append({"role": "user", "content": msg["user"]})
|
@@ -38,7 +38,7 @@ async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerato
|
|
38 |
}
|
39 |
|
40 |
payload = {
|
41 |
-
"model": "gpt-
|
42 |
"messages": messages,
|
43 |
"stream": True
|
44 |
}
|
@@ -47,11 +47,13 @@ async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerato
|
|
47 |
async with httpx.AsyncClient() as client:
|
48 |
async with client.stream("POST", "https://api.openai.com/v1/chat/completions", headers=headers, json=payload) as response:
|
49 |
response.raise_for_status()
|
|
|
50 |
async for chunk in response.aiter_text():
|
51 |
if chunk:
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
55 |
if line.startswith("data: "):
|
56 |
data = line[6:] # Remove "data: " prefix
|
57 |
if data == "[DONE]":
|
@@ -59,7 +61,7 @@ async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerato
|
|
59 |
if not data.strip():
|
60 |
continue
|
61 |
try:
|
62 |
-
json_data = json.loads(data)
|
63 |
if "choices" in json_data and json_data["choices"]:
|
64 |
delta = json_data["choices"][0].get("delta", {})
|
65 |
if "content" in delta and delta["content"] is not None:
|
@@ -72,7 +74,6 @@ async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerato
|
|
72 |
yield f"Error in stream: {str(e)}"
|
73 |
|
74 |
except httpx.HTTPStatusError as e:
|
75 |
-
# Read the response body for streaming responses
|
76 |
response_text = await e.response.aread()
|
77 |
logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {response_text}")
|
78 |
yield f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
|
@@ -86,7 +87,7 @@ async def ask_anthropic(query: str, history: List[Dict[str, str]]) -> str:
|
|
86 |
logger.error("Anthropic API key not provided")
|
87 |
return "Error: Anthropic API key not provided."
|
88 |
|
89 |
-
# Build message history
|
90 |
messages = []
|
91 |
for msg in history:
|
92 |
messages.append({"role": "user", "content": msg["user"]})
|
@@ -114,7 +115,6 @@ async def ask_anthropic(query: str, history: List[Dict[str, str]]) -> str:
|
|
114 |
response.raise_for_status()
|
115 |
logger.info(f"Anthropic response: {response.json()}")
|
116 |
return response.json()['content'][0]['text']
|
117 |
-
|
118 |
except httpx.HTTPStatusError as e:
|
119 |
logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
120 |
return f"Error: Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}"
|
@@ -128,7 +128,7 @@ async def ask_gemini(query: str, history: List[Dict[str, str]]) -> str:
|
|
128 |
logger.error("Gemini API key not provided")
|
129 |
return "Error: Gemini API key not provided."
|
130 |
|
131 |
-
#
|
132 |
history_text = ""
|
133 |
for msg in history:
|
134 |
history_text += f"User: {msg['user']}\nAssistant: {msg['bot']}\n" if msg["bot"] else f"User: {msg['user']}\n"
|
@@ -152,7 +152,6 @@ async def ask_gemini(query: str, history: List[Dict[str, str]]) -> str:
|
|
152 |
|
153 |
response.raise_for_status()
|
154 |
return response.json()['candidates'][0]['content']['parts'][0]['text']
|
155 |
-
|
156 |
except httpx.HTTPStatusError as e:
|
157 |
logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
158 |
return f"Error: Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}"
|
@@ -160,58 +159,46 @@ async def ask_gemini(query: str, history: List[Dict[str, str]]) -> str:
|
|
160 |
logger.error(f"Gemini Error: {str(e)}")
|
161 |
return f"Error: Gemini Error: {str(e)}"
|
162 |
|
163 |
-
async def query_model(query: str,
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
|
|
169 |
async for chunk in ask_openai(query, history):
|
170 |
-
|
171 |
-
|
172 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
response = await ask_anthropic(query, history)
|
174 |
-
|
175 |
-
|
|
|
|
|
|
|
176 |
response = await ask_gemini(query, history)
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
updated_history = history + [{"user": query, "bot": response}]
|
185 |
logger.info(f"Updated history: {updated_history}")
|
186 |
-
yield response, updated_history # Final yield with updated history
|
187 |
|
188 |
-
|
189 |
-
if not query.strip():
|
190 |
-
yield "", [{"role": "assistant", "content": "Please enter a query."}], history
|
191 |
-
return
|
192 |
-
|
193 |
-
response = ""
|
194 |
-
chatbot_messages = []
|
195 |
-
for msg in history:
|
196 |
-
chatbot_messages.append({"role": "user", "content": msg["user"]})
|
197 |
-
if msg["bot"]:
|
198 |
-
chatbot_messages.append({"role": "assistant", "content": msg["bot"]})
|
199 |
-
async for response_chunk, updated_history in query_model(query, provider, history):
|
200 |
-
response += response_chunk
|
201 |
-
# Update chatbot messages for streaming
|
202 |
-
chatbot_messages = []
|
203 |
-
for msg in updated_history:
|
204 |
-
chatbot_messages.append({"role": "user", "content": msg["user"]})
|
205 |
-
if msg["bot"]:
|
206 |
-
chatbot_messages.append({"role": "assistant", "content": msg["bot"]})
|
207 |
-
if response and provider == "openai":
|
208 |
-
# For streaming, show partial response
|
209 |
-
if chatbot_messages and chatbot_messages[-1]["role"] == "user":
|
210 |
-
chatbot_messages.append({"role": "assistant", "content": response})
|
211 |
-
else:
|
212 |
-
chatbot_messages[-1] = {"role": "assistant", "content": response}
|
213 |
-
yield "", chatbot_messages, updated_history # Yield to chatbot, not query
|
214 |
-
# Final yield with complete response
|
215 |
chatbot_messages = []
|
216 |
for msg in updated_history:
|
217 |
chatbot_messages.append({"role": "user", "content": msg["user"]})
|
@@ -219,6 +206,22 @@ async def submit_query(query: str, provider: str, history: List[Dict[str, str]])
|
|
219 |
chatbot_messages.append({"role": "assistant", "content": msg["bot"]})
|
220 |
yield "", chatbot_messages, updated_history
|
221 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
# Gradio interface
|
223 |
def clear_history():
|
224 |
return [], []
|
@@ -226,9 +229,9 @@ def clear_history():
|
|
226 |
# Define Gradio interface
|
227 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
228 |
gr.Markdown("# Multi-Model Chat")
|
229 |
-
gr.Markdown("Chat with OpenAI, Anthropic, or Gemini. Select
|
230 |
|
231 |
-
|
232 |
history_state = gr.State(value=[])
|
233 |
chatbot = gr.Chatbot(label="Conversation", type="messages")
|
234 |
query = gr.Textbox(label="Enter your query", placeholder="e.g., What is the capital of the United States?")
|
@@ -237,7 +240,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
237 |
|
238 |
submit_button.click(
|
239 |
fn=submit_query,
|
240 |
-
inputs=[query,
|
241 |
outputs=[query, chatbot, history_state]
|
242 |
)
|
243 |
clear_button.click(
|
|
|
24 |
yield "Error: OpenAI API key not provided."
|
25 |
return
|
26 |
|
27 |
+
# Build message history with user and assistant roles
|
28 |
messages = []
|
29 |
for msg in history:
|
30 |
messages.append({"role": "user", "content": msg["user"]})
|
|
|
38 |
}
|
39 |
|
40 |
payload = {
|
41 |
+
"model": "gpt-3.5-turbo",
|
42 |
"messages": messages,
|
43 |
"stream": True
|
44 |
}
|
|
|
47 |
async with httpx.AsyncClient() as client:
|
48 |
async with client.stream("POST", "https://api.openai.com/v1/chat/completions", headers=headers, json=payload) as response:
|
49 |
response.raise_for_status()
|
50 |
+
buffer = ""
|
51 |
async for chunk in response.aiter_text():
|
52 |
if chunk:
|
53 |
+
buffer += chunk
|
54 |
+
# Process complete JSON lines
|
55 |
+
while "\n" in buffer:
|
56 |
+
line, buffer = buffer.split("\n", 1)
|
57 |
if line.startswith("data: "):
|
58 |
data = line[6:] # Remove "data: " prefix
|
59 |
if data == "[DONE]":
|
|
|
61 |
if not data.strip():
|
62 |
continue
|
63 |
try:
|
64 |
+
json_data = json.loads(data)
|
65 |
if "choices" in json_data and json_data["choices"]:
|
66 |
delta = json_data["choices"][0].get("delta", {})
|
67 |
if "content" in delta and delta["content"] is not None:
|
|
|
74 |
yield f"Error in stream: {str(e)}"
|
75 |
|
76 |
except httpx.HTTPStatusError as e:
|
|
|
77 |
response_text = await e.response.aread()
|
78 |
logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {response_text}")
|
79 |
yield f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
|
|
|
87 |
logger.error("Anthropic API key not provided")
|
88 |
return "Error: Anthropic API key not provided."
|
89 |
|
90 |
+
# Build message history with user and assistant roles
|
91 |
messages = []
|
92 |
for msg in history:
|
93 |
messages.append({"role": "user", "content": msg["user"]})
|
|
|
115 |
response.raise_for_status()
|
116 |
logger.info(f"Anthropic response: {response.json()}")
|
117 |
return response.json()['content'][0]['text']
|
|
|
118 |
except httpx.HTTPStatusError as e:
|
119 |
logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
120 |
return f"Error: Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}"
|
|
|
128 |
logger.error("Gemini API key not provided")
|
129 |
return "Error: Gemini API key not provided."
|
130 |
|
131 |
+
# Concatenate history as text for Gemini
|
132 |
history_text = ""
|
133 |
for msg in history:
|
134 |
history_text += f"User: {msg['user']}\nAssistant: {msg['bot']}\n" if msg["bot"] else f"User: {msg['user']}\n"
|
|
|
152 |
|
153 |
response.raise_for_status()
|
154 |
return response.json()['candidates'][0]['content']['parts'][0]['text']
|
|
|
155 |
except httpx.HTTPStatusError as e:
|
156 |
logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}")
|
157 |
return f"Error: Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}"
|
|
|
159 |
logger.error(f"Gemini Error: {str(e)}")
|
160 |
return f"Error: Gemini Error: {str(e)}"
|
161 |
|
162 |
+
async def query_model(query: str, providers: List[str], history: List[Dict[str, str]]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]]], None]:
|
163 |
+
logger.info(f"Processing query with providers: {providers}")
|
164 |
+
responses = [] # To collect responses from each provider
|
165 |
+
streaming_response = ""
|
166 |
+
|
167 |
+
# Handle OpenAI (streaming)
|
168 |
+
if "OpenAI" in providers:
|
169 |
async for chunk in ask_openai(query, history):
|
170 |
+
streaming_response += chunk
|
171 |
+
# Yield streaming updates for OpenAI
|
172 |
+
chatbot_messages = []
|
173 |
+
for msg in history:
|
174 |
+
chatbot_messages.append({"role": "user", "content": msg["user"]})
|
175 |
+
if msg["bot"]:
|
176 |
+
chatbot_messages.append({"role": "assistant", "content": msg["bot"]})
|
177 |
+
chatbot_messages.append({"role": "user", "content": query})
|
178 |
+
chatbot_messages.append({"role": "assistant", "content": streaming_response})
|
179 |
+
yield "", chatbot_messages, history # Yield partial updates
|
180 |
+
if streaming_response.strip():
|
181 |
+
responses.append(f"[OpenAI]: {streaming_response}")
|
182 |
+
|
183 |
+
# Handle Anthropic (non-streaming)
|
184 |
+
if "Anthropic" in providers:
|
185 |
response = await ask_anthropic(query, history)
|
186 |
+
if response.strip():
|
187 |
+
responses.append(f"[Anthropic]: {response}")
|
188 |
+
|
189 |
+
# Handle Gemini (non-streaming)
|
190 |
+
if "Gemini" in providers:
|
191 |
response = await ask_gemini(query, history)
|
192 |
+
if response.strip():
|
193 |
+
responses.append(f"[Gemini]: {response}")
|
194 |
+
|
195 |
+
# Combine responses
|
196 |
+
combined_response = "\n\n".join(responses) if responses else "No valid responses received."
|
197 |
+
# Update history with the combined response
|
198 |
+
updated_history = history + [{"user": query, "bot": combined_response}]
|
|
|
199 |
logger.info(f"Updated history: {updated_history}")
|
|
|
200 |
|
201 |
+
# Yield final response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
202 |
chatbot_messages = []
|
203 |
for msg in updated_history:
|
204 |
chatbot_messages.append({"role": "user", "content": msg["user"]})
|
|
|
206 |
chatbot_messages.append({"role": "assistant", "content": msg["bot"]})
|
207 |
yield "", chatbot_messages, updated_history
|
208 |
|
209 |
+
async def submit_query(query: str, providers: List[str], history: List[Dict[str, str]]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]], List[Dict[str, str]]], None]:
|
210 |
+
if not query.strip():
|
211 |
+
chatbot_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
212 |
+
yield "", chatbot_messages, history
|
213 |
+
return
|
214 |
+
|
215 |
+
if not providers:
|
216 |
+
chatbot_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
217 |
+
yield "", chatbot_messages, history
|
218 |
+
return
|
219 |
+
|
220 |
+
async for response_chunk, chatbot_messages, updated_history in query_model(query, providers, history):
|
221 |
+
yield "", chatbot_messages, updated_history # Keep query textbox unchanged during streaming
|
222 |
+
# Final yield to clear the query textbox
|
223 |
+
yield "", chatbot_messages, updated_history
|
224 |
+
|
225 |
# Gradio interface
|
226 |
def clear_history():
|
227 |
return [], []
|
|
|
229 |
# Define Gradio interface
|
230 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
231 |
gr.Markdown("# Multi-Model Chat")
|
232 |
+
gr.Markdown("Chat with OpenAI, Anthropic, or Gemini. Select providers and start typing!")
|
233 |
|
234 |
+
providers = gr.CheckboxGroup(choices=["OpenAI", "Anthropic", "Gemini"], label="Select Providers", value=["OpenAI"])
|
235 |
history_state = gr.State(value=[])
|
236 |
chatbot = gr.Chatbot(label="Conversation", type="messages")
|
237 |
query = gr.Textbox(label="Enter your query", placeholder="e.g., What is the capital of the United States?")
|
|
|
240 |
|
241 |
submit_button.click(
|
242 |
fn=submit_query,
|
243 |
+
inputs=[query, providers, history_state],
|
244 |
outputs=[query, chatbot, history_state]
|
245 |
)
|
246 |
clear_button.click(
|