Spaces:
Sleeping
Sleeping
Ganesh Chintalapati
commited on
Commit
·
27bfa71
1
Parent(s):
27f3c72
All working modular
Browse files
api.py
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import httpx
|
3 |
+
import json
|
4 |
+
import traceback
|
5 |
+
from typing import AsyncGenerator, List, Dict
|
6 |
+
from config import logger
|
7 |
+
|
8 |
+
async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
|
9 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
10 |
+
if not openai_api_key:
|
11 |
+
logger.error("OpenAI API key not provided")
|
12 |
+
yield "Error: OpenAI API key not provided."
|
13 |
+
return
|
14 |
+
|
15 |
+
# Build message history with user and assistant roles
|
16 |
+
messages = []
|
17 |
+
for msg in history:
|
18 |
+
messages.append({"role": "user", "content": msg["user"]})
|
19 |
+
if msg["bot"]:
|
20 |
+
messages.append({"role": "assistant", "content": msg["bot"]})
|
21 |
+
messages.append({"role": "user", "content": query})
|
22 |
+
|
23 |
+
headers = {
|
24 |
+
"Authorization": f"Bearer {openai_api_key}",
|
25 |
+
"Content-Type": "application/json"
|
26 |
+
}
|
27 |
+
|
28 |
+
payload = {
|
29 |
+
"model": "gpt-3.5-turbo",
|
30 |
+
"messages": messages,
|
31 |
+
"stream": True
|
32 |
+
}
|
33 |
+
|
34 |
+
try:
|
35 |
+
async with httpx.AsyncClient() as client:
|
36 |
+
async with client.stream("POST", "https://api.openai.com/v1/chat/completions", headers=headers, json=payload) as response:
|
37 |
+
response.raise_for_status()
|
38 |
+
buffer = ""
|
39 |
+
async for chunk in response.aiter_text():
|
40 |
+
if chunk:
|
41 |
+
buffer += chunk
|
42 |
+
while "\n" in buffer:
|
43 |
+
line, buffer = buffer.split("\n", 1)
|
44 |
+
if line.startswith("data: "):
|
45 |
+
data = line[6:]
|
46 |
+
if data == "[DONE]":
|
47 |
+
break
|
48 |
+
if not data.strip():
|
49 |
+
continue
|
50 |
+
try:
|
51 |
+
json_data = json.loads(data)
|
52 |
+
if "choices" in json_data and json_data["choices"]:
|
53 |
+
delta = json_data["choices"][0].get("delta", {})
|
54 |
+
if "content" in delta and delta["content"] is not None:
|
55 |
+
logger.info(f"OpenAI yielding chunk: {delta['content']}")
|
56 |
+
yield delta["content"]
|
57 |
+
except json.JSONDecodeError as e:
|
58 |
+
logger.error(f"Error parsing OpenAI stream chunk: {str(e)} - Data: {data}")
|
59 |
+
yield f"Error parsing stream: {str(e)}"
|
60 |
+
except Exception as e:
|
61 |
+
logger.error(f"Unexpected error in OpenAI stream: {str(e)} - Data: {data}")
|
62 |
+
yield f"Error in stream: {str(e)}"
|
63 |
+
|
64 |
+
except httpx.HTTPStatusError as e:
|
65 |
+
response_text = await e.response.aread()
|
66 |
+
logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {response_text}")
|
67 |
+
yield f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
|
68 |
+
except Exception as e:
|
69 |
+
logger.error(f"OpenAI Error: {str(e)}")
|
70 |
+
yield f"Error: OpenAI Error: {str(e)}"
|
71 |
+
|
72 |
+
async def ask_anthropic(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
|
73 |
+
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
74 |
+
if not anthropic_api_key:
|
75 |
+
logger.error("Anthropic API key not provided")
|
76 |
+
yield "Error: Anthropic API key not provided."
|
77 |
+
return
|
78 |
+
|
79 |
+
messages = []
|
80 |
+
for msg in history:
|
81 |
+
messages.append({"role": "user", "content": msg["user"]})
|
82 |
+
if msg["bot"]:
|
83 |
+
messages.append({"role": "assistant", "content": msg["bot"]})
|
84 |
+
messages.append({"role": "user", "content": query})
|
85 |
+
|
86 |
+
headers = {
|
87 |
+
"x-api-key": anthropic_api_key,
|
88 |
+
"anthropic-version": "2023-06-01",
|
89 |
+
"Content-Type": "application/json"
|
90 |
+
}
|
91 |
+
|
92 |
+
payload = {
|
93 |
+
"model": "claude-3-5-sonnet-20241022",
|
94 |
+
"max_tokens": 1024,
|
95 |
+
"messages": messages,
|
96 |
+
"stream": True
|
97 |
+
}
|
98 |
+
|
99 |
+
try:
|
100 |
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
101 |
+
logger.info(f"Sending Anthropic streaming request: {payload}")
|
102 |
+
async with client.stream("POST", "https://api.anthropic.com/v1/messages", headers=headers, json=payload) as response:
|
103 |
+
response.raise_for_status()
|
104 |
+
buffer = ""
|
105 |
+
async for chunk in response.aiter_text():
|
106 |
+
if chunk:
|
107 |
+
buffer += chunk
|
108 |
+
while "\n" in buffer:
|
109 |
+
line, buffer = buffer.split("\n", 1)
|
110 |
+
if line.startswith("data: "):
|
111 |
+
data = line[6:]
|
112 |
+
if data.strip() == "[DONE]":
|
113 |
+
break
|
114 |
+
if not data.strip():
|
115 |
+
continue
|
116 |
+
try:
|
117 |
+
json_data = json.loads(data)
|
118 |
+
if json_data.get("type") == "content_block_delta" and "delta" in json_data and "text" in json_data["delta"]:
|
119 |
+
logger.info(f"Anthropic yielding chunk: {json_data['delta']['text']}")
|
120 |
+
yield json_data["delta"]["text"]
|
121 |
+
elif json_data.get("type") == "message_start" or json_data.get("type") == "message_delta":
|
122 |
+
continue
|
123 |
+
except json.JSONDecodeError as e:
|
124 |
+
logger.error(f"Error parsing Anthropic stream chunk: {str(e)} - Data: {data}")
|
125 |
+
yield f"Error parsing stream: {str(e)}"
|
126 |
+
except Exception as e:
|
127 |
+
logger.error(f"Unexpected error in Anthropic stream: {str(e)} - Data: {data}")
|
128 |
+
yield f"Error in stream: {str(e)}"
|
129 |
+
|
130 |
+
except httpx.HTTPStatusError as e:
|
131 |
+
response_text = await e.response.aread()
|
132 |
+
logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}")
|
133 |
+
yield f"Error: Anthropic HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
|
134 |
+
except Exception as e:
|
135 |
+
logger.error(f"Anthropic Error: {str(e)}\nStack trace: {traceback.format_exc()}")
|
136 |
+
yield f"Error: Anthropic Error: {str(e)}"
|
137 |
+
|
138 |
+
async def ask_gemini(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
|
139 |
+
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
140 |
+
if not gemini_api_key:
|
141 |
+
logger.error("Gemini API key not provided")
|
142 |
+
yield "Error: Gemini API key not provided."
|
143 |
+
return
|
144 |
+
|
145 |
+
history_text = ""
|
146 |
+
for msg in history:
|
147 |
+
history_text += f"User: {msg['user']}\nAssistant: {msg['bot']}\n" if msg['bot'] else f"User: {msg['user']}\n"
|
148 |
+
full_query = history_text + f"User: {query}\n"
|
149 |
+
|
150 |
+
headers = {
|
151 |
+
"Content-Type": "application/json"
|
152 |
+
}
|
153 |
+
|
154 |
+
payload = {
|
155 |
+
"contents": [{"parts": [{"text": full_query}]}]
|
156 |
+
}
|
157 |
+
|
158 |
+
try:
|
159 |
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
160 |
+
logger.info(f"Sending Gemini streaming request: {payload}")
|
161 |
+
async with client.stream(
|
162 |
+
"POST",
|
163 |
+
f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key={gemini_api_key}",
|
164 |
+
headers=headers,
|
165 |
+
json=payload
|
166 |
+
) as response:
|
167 |
+
response.raise_for_status()
|
168 |
+
buffer = ""
|
169 |
+
async for chunk in response.aiter_text():
|
170 |
+
if chunk:
|
171 |
+
buffer += chunk
|
172 |
+
logger.info(f"Gemini stream chunk: {chunk}")
|
173 |
+
while buffer.strip():
|
174 |
+
try:
|
175 |
+
json_data = json.loads(buffer)
|
176 |
+
logger.info(f"Parsed Gemini JSON: {json_data}")
|
177 |
+
buffer = ""
|
178 |
+
objects = json_data if isinstance(json_data, list) else [json_data]
|
179 |
+
for obj in objects:
|
180 |
+
if isinstance(obj, dict) and "candidates" in obj and obj["candidates"]:
|
181 |
+
content = obj["candidates"][0].get("content", {})
|
182 |
+
if "parts" in content and content["parts"]:
|
183 |
+
text = content["parts"][0].get("text", "")
|
184 |
+
if text:
|
185 |
+
logger.info(f"Gemini yielding chunk: {text}")
|
186 |
+
yield text
|
187 |
+
break
|
188 |
+
except json.JSONDecodeError:
|
189 |
+
brace_count = 0
|
190 |
+
split_index = -1
|
191 |
+
for i, char in enumerate(buffer):
|
192 |
+
if char == '{':
|
193 |
+
brace_count += 1
|
194 |
+
elif char == '}':
|
195 |
+
brace_count -= 1
|
196 |
+
if brace_count == 0:
|
197 |
+
split_index = i + 1
|
198 |
+
if split_index > 0:
|
199 |
+
try:
|
200 |
+
json_str = buffer[:split_index]
|
201 |
+
json_data = json.loads(json_str)
|
202 |
+
logger.info(f"Parsed Gemini JSON: {json_data}")
|
203 |
+
buffer = buffer[split_index:].lstrip(',')
|
204 |
+
objects = json_data if isinstance(json_data, list) else [json_data]
|
205 |
+
for obj in objects:
|
206 |
+
if isinstance(obj, dict) and "candidates" in obj and obj["candidates"]:
|
207 |
+
content = obj["candidates"][0].get("content", {})
|
208 |
+
if "parts" in content and content["parts"]:
|
209 |
+
text = content["parts"][0].get("text", "")
|
210 |
+
if text:
|
211 |
+
logger.info(f"Gemini yielding chunk: {text}")
|
212 |
+
yield text
|
213 |
+
continue
|
214 |
+
except json.JSONDecodeError:
|
215 |
+
pass
|
216 |
+
break
|
217 |
+
except Exception as e:
|
218 |
+
logger.error(f"Unexpected error in Gemini stream: {str(e)} - Buffer: {buffer}")
|
219 |
+
yield f"Error in stream: {str(e)}"
|
220 |
+
buffer = ""
|
221 |
+
break
|
222 |
+
|
223 |
+
except httpx.HTTPStatusError as e:
|
224 |
+
response_text = await e.response.aread()
|
225 |
+
logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}")
|
226 |
+
yield f"Error: Gemini HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
|
227 |
+
except Exception as e:
|
228 |
+
logger.error(f"Gemini Error: {str(e)}\nStack trace: {traceback.format_exc()}")
|
229 |
+
yield f"Error: Gemini Error: {str(e)}"
|
app.py
CHANGED
@@ -1,364 +1,25 @@
|
|
1 |
-
import os
|
2 |
-
import logging
|
3 |
-
import httpx
|
4 |
-
import json
|
5 |
-
from dotenv import load_dotenv
|
6 |
import gradio as gr
|
7 |
-
from
|
8 |
-
import traceback
|
9 |
-
|
10 |
-
# Configure logging
|
11 |
-
logging.basicConfig(level=logging.INFO)
|
12 |
-
logger = logging.getLogger(__name__)
|
13 |
-
|
14 |
-
# Load environment variables
|
15 |
-
load_dotenv()
|
16 |
-
logger.info("Environment variables loaded from .env file")
|
17 |
-
logger.info(f"OPENAI_API_KEY present: {'OPENAI_API_KEY' in os.environ}")
|
18 |
-
logger.info(f"ANTHROPIC_API_KEY present: {'ANTHROPIC_API_KEY' in os.environ}")
|
19 |
-
logger.info(f"GEMINI_API_KEY present: {'GEMINI_API_KEY' in os.environ}")
|
20 |
-
|
21 |
-
async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
|
22 |
-
openai_api_key = os.getenv("OPENAI_API_KEY")
|
23 |
-
if not openai_api_key:
|
24 |
-
logger.error("OpenAI API key not provided")
|
25 |
-
yield "Error: OpenAI API key not provided."
|
26 |
-
return
|
27 |
-
|
28 |
-
# Build message history with user and assistant roles
|
29 |
-
messages = []
|
30 |
-
for msg in history:
|
31 |
-
messages.append({"role": "user", "content": msg["user"]})
|
32 |
-
if msg["bot"]:
|
33 |
-
messages.append({"role": "assistant", "content": msg["bot"]})
|
34 |
-
messages.append({"role": "user", "content": query})
|
35 |
-
|
36 |
-
headers = {
|
37 |
-
"Authorization": f"Bearer {openai_api_key}",
|
38 |
-
"Content-Type": "application/json"
|
39 |
-
}
|
40 |
-
|
41 |
-
payload = {
|
42 |
-
"model": "gpt-3.5-turbo",
|
43 |
-
"messages": messages,
|
44 |
-
"stream": True
|
45 |
-
}
|
46 |
-
|
47 |
-
try:
|
48 |
-
async with httpx.AsyncClient() as client:
|
49 |
-
async with client.stream("POST", "https://api.openai.com/v1/chat/completions", headers=headers, json=payload) as response:
|
50 |
-
response.raise_for_status()
|
51 |
-
buffer = ""
|
52 |
-
async for chunk in response.aiter_text():
|
53 |
-
if chunk:
|
54 |
-
buffer += chunk
|
55 |
-
# Process complete JSON lines
|
56 |
-
while "\n" in buffer:
|
57 |
-
line, buffer = buffer.split("\n", 1)
|
58 |
-
if line.startswith("data: "):
|
59 |
-
data = line[6:] # Remove "data: " prefix
|
60 |
-
if data == "[DONE]":
|
61 |
-
break
|
62 |
-
if not data.strip():
|
63 |
-
continue
|
64 |
-
try:
|
65 |
-
json_data = json.loads(data)
|
66 |
-
if "choices" in json_data and json_data["choices"]:
|
67 |
-
delta = json_data["choices"][0].get("delta", {})
|
68 |
-
if "content" in delta and delta["content"] is not None:
|
69 |
-
yield delta["content"]
|
70 |
-
except json.JSONDecodeError as e:
|
71 |
-
logger.error(f"Error parsing OpenAI stream chunk: {str(e)} - Data: {data}")
|
72 |
-
yield f"Error parsing stream: {str(e)}"
|
73 |
-
except Exception as e:
|
74 |
-
logger.error(f"Unexpected error in OpenAI stream: {str(e)} - Data: {data}")
|
75 |
-
yield f"Error in stream: {str(e)}"
|
76 |
-
|
77 |
-
except httpx.HTTPStatusError as e:
|
78 |
-
response_text = await e.response.aread()
|
79 |
-
logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {response_text}")
|
80 |
-
yield f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
|
81 |
-
except Exception as e:
|
82 |
-
logger.error(f"OpenAI Error: {str(e)}")
|
83 |
-
yield f"Error: OpenAI Error: {str(e)}"
|
84 |
-
|
85 |
-
async def ask_anthropic(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
|
86 |
-
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
|
87 |
-
if not anthropic_api_key:
|
88 |
-
logger.error("Anthropic API key not provided")
|
89 |
-
yield "Error: Anthropic API key not provided."
|
90 |
-
return
|
91 |
-
|
92 |
-
# Build message history with user and assistant roles
|
93 |
-
messages = []
|
94 |
-
for msg in history:
|
95 |
-
messages.append({"role": "user", "content": msg["user"]})
|
96 |
-
if msg["bot"]:
|
97 |
-
messages.append({"role": "assistant", "content": msg["bot"]})
|
98 |
-
messages.append({"role": "user", "content": query})
|
99 |
-
|
100 |
-
headers = {
|
101 |
-
"x-api-key": anthropic_api_key,
|
102 |
-
"anthropic-version": "2023-06-01",
|
103 |
-
"Content-Type": "application/json"
|
104 |
-
}
|
105 |
-
|
106 |
-
payload = {
|
107 |
-
"model": "claude-3-5-sonnet-20241022",
|
108 |
-
"max_tokens": 1024,
|
109 |
-
"messages": messages,
|
110 |
-
"stream": True
|
111 |
-
}
|
112 |
-
|
113 |
-
try:
|
114 |
-
async with httpx.AsyncClient(timeout=30.0) as client:
|
115 |
-
logger.info(f"Sending Anthropic streaming request: {payload}")
|
116 |
-
async with client.stream("POST", "https://api.anthropic.com/v1/messages", headers=headers, json=payload) as response:
|
117 |
-
response.raise_for_status()
|
118 |
-
buffer = ""
|
119 |
-
async for chunk in response.aiter_text():
|
120 |
-
if chunk:
|
121 |
-
buffer += chunk
|
122 |
-
# Process complete JSON lines
|
123 |
-
while "\n" in buffer:
|
124 |
-
line, buffer = buffer.split("\n", 1)
|
125 |
-
if line.startswith("data: "):
|
126 |
-
data = line[6:] # Remove "data: " prefix
|
127 |
-
if data.strip() == "[DONE]":
|
128 |
-
break
|
129 |
-
if not data.strip():
|
130 |
-
continue
|
131 |
-
try:
|
132 |
-
json_data = json.loads(data)
|
133 |
-
if json_data.get("type") == "content_block_delta" and "delta" in json_data and "text" in json_data["delta"]:
|
134 |
-
yield json_data["delta"]["text"]
|
135 |
-
elif json_data.get("type") == "message_start" or json_data.get("type") == "message_delta":
|
136 |
-
continue # Skip metadata events
|
137 |
-
except json.JSONDecodeError as e:
|
138 |
-
logger.error(f"Error parsing Anthropic stream chunk: {str(e)} - Data: {data}")
|
139 |
-
yield f"Error parsing stream: {str(e)}"
|
140 |
-
except Exception as e:
|
141 |
-
logger.error(f"Unexpected error in Anthropic stream: {str(e)} - Data: {data}")
|
142 |
-
yield f"Error in stream: {str(e)}"
|
143 |
-
|
144 |
-
except httpx.HTTPStatusError as e:
|
145 |
-
response_text = await e.response.aread()
|
146 |
-
logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}")
|
147 |
-
yield f"Error: Anthropic HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
|
148 |
-
except Exception as e:
|
149 |
-
logger.error(f"Anthropic Error: {str(e)}\nStack trace: {traceback.format_exc()}")
|
150 |
-
yield f"Error: Anthropic Error: {str(e)}"
|
151 |
-
|
152 |
-
async def ask_gemini(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
|
153 |
-
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
154 |
-
if not gemini_api_key:
|
155 |
-
logger.error("Gemini API key not provided")
|
156 |
-
yield "Error: Gemini API key not provided."
|
157 |
-
return
|
158 |
-
|
159 |
-
# Concatenate history as text for Gemini
|
160 |
-
history_text = ""
|
161 |
-
for msg in history:
|
162 |
-
history_text += f"User: {msg['user']}\nAssistant: {msg['bot']}\n" if msg["bot"] else f"User: {msg['user']}\n"
|
163 |
-
full_query = history_text + f"User: {query}\n"
|
164 |
-
|
165 |
-
headers = {
|
166 |
-
"Content-Type": "application/json"
|
167 |
-
}
|
168 |
-
|
169 |
-
payload = {
|
170 |
-
"contents": [{"parts": [{"text": full_query}]}]
|
171 |
-
}
|
172 |
-
|
173 |
-
try:
|
174 |
-
async with httpx.AsyncClient(timeout=30.0) as client:
|
175 |
-
logger.info(f"Sending Gemini streaming request: {payload}")
|
176 |
-
async with client.stream(
|
177 |
-
"POST",
|
178 |
-
f"https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key={gemini_api_key}",
|
179 |
-
headers=headers,
|
180 |
-
json=payload
|
181 |
-
) as response:
|
182 |
-
response.raise_for_status()
|
183 |
-
buffer = ""
|
184 |
-
async for chunk in response.aiter_text():
|
185 |
-
if chunk:
|
186 |
-
buffer += chunk
|
187 |
-
logger.info(f"Gemini stream chunk: {chunk}")
|
188 |
-
# Try to parse buffer as multiple JSON objects
|
189 |
-
while buffer.strip():
|
190 |
-
try:
|
191 |
-
# Attempt to parse the buffer as JSON
|
192 |
-
json_data = json.loads(buffer)
|
193 |
-
logger.info(f"Parsed Gemini JSON: {json_data}")
|
194 |
-
buffer = "" # Reset buffer after successful parse
|
195 |
-
# Handle both single object and list of objects
|
196 |
-
objects = json_data if isinstance(json_data, list) else [json_data]
|
197 |
-
for obj in objects:
|
198 |
-
if isinstance(obj, dict) and "candidates" in obj and obj["candidates"]:
|
199 |
-
content = obj["candidates"][0].get("content", {})
|
200 |
-
if "parts" in content and content["parts"]:
|
201 |
-
text = content["parts"][0].get("text", "")
|
202 |
-
if text:
|
203 |
-
yield text
|
204 |
-
break # Exit loop after successful parse
|
205 |
-
except json.JSONDecodeError as e:
|
206 |
-
# Check if buffer might contain a partial object followed by a comma
|
207 |
-
comma_index = buffer.rfind(",")
|
208 |
-
if comma_index != -1:
|
209 |
-
# Try parsing up to the last comma
|
210 |
-
try:
|
211 |
-
json_data = json.loads(buffer[:comma_index])
|
212 |
-
logger.info(f"Parsed Gemini JSON (before comma): {json_data}")
|
213 |
-
buffer = buffer[comma_index + 1:].strip()
|
214 |
-
objects = json_data if isinstance(json_data, list) else [json_data]
|
215 |
-
for obj in objects:
|
216 |
-
if isinstance(obj, dict) and "candidates" in obj and obj["candidates"]:
|
217 |
-
content = obj["candidates"][0].get("content", {})
|
218 |
-
if "parts" in content and content["parts"]:
|
219 |
-
text = content["parts"][0].get("text", "")
|
220 |
-
if text:
|
221 |
-
yield text
|
222 |
-
continue # Continue processing remaining buffer
|
223 |
-
except json.JSONDecodeError:
|
224 |
-
pass # Continue accumulating buffer
|
225 |
-
# If parsing fails, accumulate more data
|
226 |
-
break
|
227 |
-
except Exception as e:
|
228 |
-
logger.error(f"Unexpected error in Gemini stream: {str(e)} - Data: {buffer}")
|
229 |
-
yield f"Error in stream: {str(e)}"
|
230 |
-
buffer = ""
|
231 |
-
break
|
232 |
-
|
233 |
-
except httpx.HTTPStatusError as e:
|
234 |
-
response_text = await e.response.aread()
|
235 |
-
logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}")
|
236 |
-
yield f"Error: Gemini HTTP Status Error: {e.response.status_code}, {response_text.decode('utf-8')}"
|
237 |
-
except Exception as e:
|
238 |
-
logger.error(f"Gemini Error: {str(e)}\nStack trace: {traceback.format_exc()}")
|
239 |
-
yield f"Error: Gemini Error: {str(e)}"
|
240 |
-
|
241 |
-
async def query_model(query: str, providers: List[str], history: List[Dict[str, str]]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]], None]:
|
242 |
-
logger.info(f"Processing query with providers: {providers}")
|
243 |
-
openai_response = ""
|
244 |
-
anthropic_response = ""
|
245 |
-
gemini_response = ""
|
246 |
-
|
247 |
-
# Initialize chatbot messages for each provider
|
248 |
-
openai_messages = []
|
249 |
-
anthropic_messages = []
|
250 |
-
gemini_messages = []
|
251 |
-
|
252 |
-
# Populate history for each chatbot
|
253 |
-
for msg in history:
|
254 |
-
openai_messages.append({"role": "user", "content": msg["user"]})
|
255 |
-
anthropic_messages.append({"role": "user", "content": msg["user"]})
|
256 |
-
gemini_messages.append({"role": "user", "content": msg["user"]})
|
257 |
-
if msg["bot"]:
|
258 |
-
# Parse the combined response to extract provider-specific responses
|
259 |
-
lines = msg["bot"].split("\n\n")
|
260 |
-
for line in lines:
|
261 |
-
if line.startswith("[OpenAI]:"):
|
262 |
-
openai_messages.append({"role": "assistant", "content": line[len("[OpenAI]:"):].strip()})
|
263 |
-
elif line.startswith("[Anthropic]:"):
|
264 |
-
anthropic_messages.append({"role": "assistant", "content": line[len("[Anthropic]:"):].strip()})
|
265 |
-
elif line.startswith("[Gemini]:"):
|
266 |
-
gemini_messages.append({"role": "assistant", "content": line[len("[Gemini]:"):].strip()})
|
267 |
-
|
268 |
-
# Append the current query to all chatbots
|
269 |
-
if "OpenAI" in providers:
|
270 |
-
openai_messages.append({"role": "user", "content": query})
|
271 |
-
openai_messages.append({"role": "assistant", "content": ""})
|
272 |
-
if "Anthropic" in providers:
|
273 |
-
anthropic_messages.append({"role": "user", "content": query})
|
274 |
-
anthropic_messages.append({"role": "assistant", "content": ""})
|
275 |
-
if "Gemini" in providers:
|
276 |
-
gemini_messages.append({"role": "user", "content": query})
|
277 |
-
gemini_messages.append({"role": "assistant", "content": ""})
|
278 |
-
|
279 |
-
# Handle OpenAI (streaming)
|
280 |
-
if "OpenAI" in providers:
|
281 |
-
async for chunk in ask_openai(query, history):
|
282 |
-
openai_response += chunk
|
283 |
-
# Update OpenAI chatbot with streaming response
|
284 |
-
openai_messages[-1] = {"role": "assistant", "content": openai_response}
|
285 |
-
yield "", openai_messages, anthropic_messages, gemini_messages
|
286 |
-
|
287 |
-
# Handle Anthropic (streaming)
|
288 |
-
if "Anthropic" in providers:
|
289 |
-
async for chunk in ask_anthropic(query, history):
|
290 |
-
anthropic_response += chunk
|
291 |
-
# Update Anthropic chatbot with streaming response
|
292 |
-
anthropic_messages[-1] = {"role": "assistant", "content": anthropic_response}
|
293 |
-
yield "", openai_messages, anthropic_messages, gemini_messages
|
294 |
-
|
295 |
-
# Handle Gemini (streaming)
|
296 |
-
if "Gemini" in providers:
|
297 |
-
async for chunk in ask_gemini(query, history):
|
298 |
-
gemini_response += chunk
|
299 |
-
# Update Gemini chatbot with streaming response
|
300 |
-
gemini_messages[-1] = {"role": "assistant", "content": gemini_response}
|
301 |
-
yield "", openai_messages, anthropic_messages, gemini_messages
|
302 |
-
|
303 |
-
# Combine responses for history
|
304 |
-
responses = []
|
305 |
-
if openai_response.strip() and not openai_response.startswith("Error:"):
|
306 |
-
responses.append(f"[OpenAI]: {openai_response}")
|
307 |
-
if anthropic_response.strip() and not anthropic_response.startswith("Error:"):
|
308 |
-
responses.append(f"[Anthropic]: {anthropic_response}")
|
309 |
-
if gemini_response.strip() and not gemini_response.startswith("Error:"):
|
310 |
-
responses.append(f"[Gemini]: {gemini_response}")
|
311 |
-
|
312 |
-
combined_response = "\n\n".join(responses) if responses else "No valid responses received."
|
313 |
-
updated_history = history + [{"user": query, "bot": combined_response}]
|
314 |
-
logger.info(f"Updated history: {updated_history}")
|
315 |
-
|
316 |
-
# Yield final response
|
317 |
-
yield "", openai_messages, anthropic_messages, gemini_messages
|
318 |
-
|
319 |
-
async def submit_query(query: str, providers: List[str]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]], None]:
|
320 |
-
if not query.strip():
|
321 |
-
openai_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
322 |
-
anthropic_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
323 |
-
gemini_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
324 |
-
yield "", openai_messages, anthropic_messages, gemini_messages
|
325 |
-
return
|
326 |
-
|
327 |
-
if not providers:
|
328 |
-
openai_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
329 |
-
anthropic_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
330 |
-
gemini_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
331 |
-
yield "", openai_messages, anthropic_messages, gemini_messages
|
332 |
-
return
|
333 |
-
|
334 |
-
# Initialize history
|
335 |
-
history = []
|
336 |
-
|
337 |
-
async for response_chunk, openai_messages, anthropic_messages, gemini_messages in query_model(query, providers, history):
|
338 |
-
yield "", openai_messages, anthropic_messages, gemini_messages
|
339 |
-
# Final yield to clear the query textbox
|
340 |
-
yield "", openai_messages, anthropic_messages, gemini_messages
|
341 |
-
|
342 |
-
# Gradio interface
|
343 |
-
def clear_history():
|
344 |
-
return [], [], []
|
345 |
|
346 |
# Define Gradio interface
|
347 |
-
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
348 |
gr.Markdown("# Multi-Model Chat")
|
349 |
gr.Markdown("Chat with OpenAI, Anthropic, or Gemini. Select providers and compare responses side by side!")
|
350 |
|
351 |
-
|
352 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
353 |
|
354 |
with gr.Row():
|
355 |
openai_chatbot = gr.Chatbot(label="OpenAI", type="messages", scale=1)
|
356 |
anthropic_chatbot = gr.Chatbot(label="Anthropic", type="messages", scale=1)
|
357 |
gemini_chatbot = gr.Chatbot(label="Gemini", type="messages", scale=1)
|
358 |
-
|
359 |
-
with gr.Row():
|
360 |
-
submit_button = gr.Button("Submit")
|
361 |
-
clear_button = gr.Button("Clear History")
|
362 |
|
363 |
submit_button.click(
|
364 |
fn=submit_query,
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from core import submit_query, clear_history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
# Define Gradio interface
|
5 |
+
with gr.Blocks(theme=gr.themes.Soft(), css=".full-height { height: 100%; display: flex; align-items: stretch; min-height: 40px; } .full-height button { height: 100%; padding: 8px 16px; } .providers-row { height: 100%; display: flex; align-items: stretch; min-height: 40px; } .providers-row .checkbox-group { height: 100%; display: flex; flex-direction: row; align-items: center; gap: 10px; }") as demo:
|
6 |
gr.Markdown("# Multi-Model Chat")
|
7 |
gr.Markdown("Chat with OpenAI, Anthropic, or Gemini. Select providers and compare responses side by side!")
|
8 |
|
9 |
+
with gr.Row(elem_classes="providers-row"):
|
10 |
+
providers = gr.CheckboxGroup(choices=["OpenAI", "Anthropic", "Gemini"], label="Select Providers", value=["OpenAI"], elem_classes="checkbox-group")
|
11 |
+
|
12 |
+
with gr.Row(elem_classes="full-height"):
|
13 |
+
query = gr.Textbox(label="Enter your query", placeholder="e.g., What is the capital of the United States?", scale=4)
|
14 |
+
submit_button = gr.Button("Submit", scale=1)
|
15 |
+
|
16 |
+
with gr.Row():
|
17 |
+
clear_button = gr.Button("Clear History")
|
18 |
|
19 |
with gr.Row():
|
20 |
openai_chatbot = gr.Chatbot(label="OpenAI", type="messages", scale=1)
|
21 |
anthropic_chatbot = gr.Chatbot(label="Anthropic", type="messages", scale=1)
|
22 |
gemini_chatbot = gr.Chatbot(label="Gemini", type="messages", scale=1)
|
|
|
|
|
|
|
|
|
23 |
|
24 |
submit_button.click(
|
25 |
fn=submit_query,
|
config.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import logging
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
|
5 |
+
# Configure logging
|
6 |
+
logging.basicConfig(level=logging.INFO)
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
# Load environment variables
|
10 |
+
load_dotenv()
|
11 |
+
logger.info("Environment variables loaded from .env file")
|
12 |
+
logger.info(f"OPENAI_API_KEY present: {'OPENAI_API_KEY' in os.environ}")
|
13 |
+
logger.info(f"ANTHROPIC_API_KEY present: {'ANTHROPIC_API_KEY' in os.environ}")
|
14 |
+
logger.info(f"GEMINI_API_KEY present: {'GEMINI_API_KEY' in os.environ}")
|
core.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
from typing import AsyncGenerator, List, Dict, Tuple
|
3 |
+
from config import logger
|
4 |
+
from api import ask_openai, ask_anthropic, ask_gemini
|
5 |
+
|
6 |
+
async def query_model(query: str, providers: List[str], history: List[Dict[str, str]]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]], None]:
|
7 |
+
logger.info(f"Processing query with providers: {providers}")
|
8 |
+
openai_response = ""
|
9 |
+
anthropic_response = ""
|
10 |
+
gemini_response = ""
|
11 |
+
|
12 |
+
openai_messages = []
|
13 |
+
anthropic_messages = []
|
14 |
+
gemini_messages = []
|
15 |
+
|
16 |
+
for msg in history:
|
17 |
+
openai_messages.append({"role": "user", "content": msg["user"]})
|
18 |
+
anthropic_messages.append({"role": "user", "content": msg["user"]})
|
19 |
+
gemini_messages.append({"role": "user", "content": msg["user"]})
|
20 |
+
if msg["bot"]:
|
21 |
+
lines = msg["bot"].split("\n\n")
|
22 |
+
for line in lines:
|
23 |
+
if line.startswith("[OpenAI]:"):
|
24 |
+
openai_messages.append({"role": "assistant", "content": line[len("[OpenAI]:"):].strip()})
|
25 |
+
elif line.startswith("[Anthropic]:"):
|
26 |
+
anthropic_messages.append({"role": "assistant", "content": line[len("[Anthropic]:"):].strip()})
|
27 |
+
elif line.startswith("[Gemini]:"):
|
28 |
+
gemini_messages.append({"role": "assistant", "content": line[len("[Gemini]:"):].strip()})
|
29 |
+
|
30 |
+
if "OpenAI" in providers:
|
31 |
+
openai_messages.append({"role": "user", "content": query})
|
32 |
+
openai_messages.append({"role": "assistant", "content": ""})
|
33 |
+
if "Anthropic" in providers:
|
34 |
+
anthropic_messages.append({"role": "user", "content": query})
|
35 |
+
anthropic_messages.append({"role": "assistant", "content": ""})
|
36 |
+
if "Gemini" in providers:
|
37 |
+
gemini_messages.append({"role": "user", "content": query})
|
38 |
+
gemini_messages.append({"role": "assistant", "content": ""})
|
39 |
+
|
40 |
+
tasks = []
|
41 |
+
if "OpenAI" in providers:
|
42 |
+
tasks.append(("OpenAI", ask_openai(query, history), openai_response, openai_messages))
|
43 |
+
if "Anthropic" in providers:
|
44 |
+
tasks.append(("Anthropic", ask_anthropic(query, history), anthropic_response, anthropic_messages))
|
45 |
+
if "Gemini" in providers:
|
46 |
+
tasks.append(("Gemini", ask_gemini(query, history), gemini_response, gemini_messages))
|
47 |
+
|
48 |
+
async def collect_chunks(provider: str, generator: AsyncGenerator, response: str, messages: List[Dict[str, str]]) -> AsyncGenerator[Tuple[str, str, List[Dict[str, str]]], None]:
|
49 |
+
async for chunk in generator:
|
50 |
+
response += chunk
|
51 |
+
messages[-1] = {"role": "assistant", "content": response}
|
52 |
+
yield provider, response, messages
|
53 |
+
|
54 |
+
generator_states = [(provider, collect_chunks(provider, gen, resp, msgs), None) for provider, gen, resp, msgs in tasks]
|
55 |
+
active_generators = generator_states[:]
|
56 |
+
|
57 |
+
while active_generators:
|
58 |
+
tasks_to_wait = []
|
59 |
+
new_generator_states = []
|
60 |
+
|
61 |
+
for provider, gen, active_task in active_generators:
|
62 |
+
if active_task is None or active_task.done():
|
63 |
+
try:
|
64 |
+
task = asyncio.create_task(gen.__anext__())
|
65 |
+
new_generator_states.append((provider, gen, task))
|
66 |
+
tasks_to_wait.append(task)
|
67 |
+
logger.debug(f"Created task for {provider}")
|
68 |
+
except StopAsyncIteration:
|
69 |
+
logger.info(f"Generator for {provider} completed")
|
70 |
+
continue
|
71 |
+
else:
|
72 |
+
new_generator_states.append((provider, gen, active_task))
|
73 |
+
tasks_to_wait.append(active_task)
|
74 |
+
|
75 |
+
if not tasks_to_wait:
|
76 |
+
break
|
77 |
+
|
78 |
+
done, _ = await asyncio.wait(tasks_to_wait, return_when=asyncio.FIRST_COMPLETED)
|
79 |
+
|
80 |
+
for provider, gen, task in new_generator_states:
|
81 |
+
if task in done:
|
82 |
+
try:
|
83 |
+
provider, response, messages = task.result()
|
84 |
+
if provider == "OpenAI":
|
85 |
+
openai_response = response
|
86 |
+
openai_messages = messages
|
87 |
+
elif provider == "Anthropic":
|
88 |
+
anthropic_response = response
|
89 |
+
anthropic_messages = messages
|
90 |
+
elif provider == "Gemini":
|
91 |
+
gemini_response = response
|
92 |
+
gemini_messages = messages
|
93 |
+
logger.info(f"Yielding update for {provider}: {response[:50]}...")
|
94 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
95 |
+
new_generator_states[new_generator_states.index((provider, gen, task))] = (provider, gen, None)
|
96 |
+
except StopAsyncIteration:
|
97 |
+
logger.info(f"Generator for {provider} completed")
|
98 |
+
new_generator_states.remove((provider, gen, task))
|
99 |
+
else:
|
100 |
+
if (provider, gen, task) not in new_generator_states:
|
101 |
+
new_generator_states.append((provider, gen, task))
|
102 |
+
|
103 |
+
active_generators = new_generator_states
|
104 |
+
|
105 |
+
responses = []
|
106 |
+
if openai_response.strip() and not openai_response.startswith("Error:"):
|
107 |
+
responses.append(f"[OpenAI]: {openai_response}")
|
108 |
+
if anthropic_response.strip() and not anthropic_response.startswith("Error:"):
|
109 |
+
responses.append(f"[Anthropic]: {anthropic_response}")
|
110 |
+
if gemini_response.strip() and not gemini_response.startswith("Error:"):
|
111 |
+
responses.append(f"[Gemini]: {gemini_response}")
|
112 |
+
|
113 |
+
combined_response = "\n\n".join(responses) if responses else "No valid responses received."
|
114 |
+
updated_history = history + [{"user": query, "bot": combined_response}]
|
115 |
+
logger.info(f"Updated history: {updated_history}")
|
116 |
+
|
117 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
118 |
+
|
119 |
+
async def submit_query(query: str, providers: List[str]) -> AsyncGenerator[Tuple[str, List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]], None]:
|
120 |
+
if not query.strip():
|
121 |
+
openai_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
122 |
+
anthropic_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
123 |
+
gemini_messages = [{"role": "assistant", "content": "Please enter a query."}]
|
124 |
+
logger.info("Yielding empty query response")
|
125 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
126 |
+
return
|
127 |
+
|
128 |
+
if not providers:
|
129 |
+
openai_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
130 |
+
anthropic_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
131 |
+
gemini_messages = [{"role": "assistant", "content": "Please select at least one provider."}]
|
132 |
+
logger.info("Yielding no providers response")
|
133 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
134 |
+
return
|
135 |
+
|
136 |
+
history = []
|
137 |
+
|
138 |
+
async for response_chunk, openai_messages, anthropic_messages, gemini_messages in query_model(query, providers, history):
|
139 |
+
logger.info(f"Submitting update to UI: OpenAI: {openai_messages[-1]['content'][:50] if openai_messages else ''}, "
|
140 |
+
f"Anthropic: {anthropic_messages[-1]['content'][:50] if anthropic_messages else ''}, "
|
141 |
+
f"Gemini: {gemini_messages[-1]['content'][:50] if gemini_messages else ''}")
|
142 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
143 |
+
logger.info("Final UI update")
|
144 |
+
yield "", openai_messages, anthropic_messages, gemini_messages
|
145 |
+
|
146 |
+
def clear_history():
|
147 |
+
return [], [], []
|