Ganesh Chintalapati commited on
Commit
c28a361
·
1 Parent(s): 76b6f27

Add ChatGPT-like UI with history and streaming for OpenAI

Browse files
Files changed (3) hide show
  1. README.markdown +14 -0
  2. app.py +73 -34
  3. requirements.txt +3 -11
README.markdown ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Multimodelselector
2
+
3
+ A Gradio-based chat application that allows querying multiple AI models (OpenAI, Anthropic, Gemini) with a ChatGPT-like interface, including history and streaming responses (for OpenAI).
4
+
5
+ ## Usage
6
+ - Open the Space URL in your browser.
7
+ - Select a provider (OpenAI, Anthropic, Gemini) from the dropdown.
8
+ - Type a query and press Enter to chat.
9
+ - Conversation history is preserved, and OpenAI responses stream in real-time.
10
+
11
+ ## Setup
12
+ - Set `OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, and `GEMINI_API_KEY` as secrets in Hugging Face Space settings.
13
+ - Runs on Python 3.8+ with dependencies listed in `requirements.txt`.
14
+ - Framework: Gradio
app.py CHANGED
@@ -3,6 +3,7 @@ import logging
3
  import httpx
4
  from dotenv import load_dotenv
5
  import gradio as gr
 
6
 
7
  # Configure logging
8
  logging.basicConfig(level=logging.INFO)
@@ -15,11 +16,20 @@ logger.info(f"OPENAI_API_KEY present: {'OPENAI_API_KEY' in os.environ}")
15
  logger.info(f"ANTHROPIC_API_KEY present: {'ANTHROPIC_API_KEY' in os.environ}")
16
  logger.info(f"GEMINI_API_KEY present: {'GEMINI_API_KEY' in os.environ}")
17
 
18
- async def ask_openai(query: str):
19
  openai_api_key = os.getenv("OPENAI_API_KEY")
20
  if not openai_api_key:
21
  logger.error("OpenAI API key not provided")
22
- return "Error: OpenAI API key not provided."
 
 
 
 
 
 
 
 
 
23
 
24
  headers = {
25
  "Authorization": f"Bearer {openai_api_key}",
@@ -28,30 +38,54 @@ async def ask_openai(query: str):
28
 
29
  payload = {
30
  "model": "gpt-3.5-turbo",
31
- "messages": [{"role": "user", "content": query}]
 
32
  }
33
 
34
  try:
35
  async with httpx.AsyncClient() as client:
36
- response = await client.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
37
-
38
- response.raise_for_status()
39
- answer = response.json()['choices'][0]['message']['content']
40
- return answer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  except httpx.HTTPStatusError as e:
43
  logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}")
44
- return f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}"
45
  except Exception as e:
46
  logger.error(f"OpenAI Error: {str(e)}")
47
- return f"Error: OpenAI Error: {str(e)}"
48
 
49
- async def ask_anthropic(query: str):
50
  anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
51
  if not anthropic_api_key:
52
  logger.error("Anthropic API key not provided")
53
  return "Error: Anthropic API key not provided."
54
 
 
 
 
 
 
 
 
 
55
  headers = {
56
  "x-api-key": anthropic_api_key,
57
  "anthropic-version": "2023-06-01",
@@ -61,7 +95,7 @@ async def ask_anthropic(query: str):
61
  payload = {
62
  "model": "claude-3-5-sonnet-20241022",
63
  "max_tokens": 1024,
64
- "messages": [{"role": "user", "content": query}]
65
  }
66
 
67
  try:
@@ -71,8 +105,7 @@ async def ask_anthropic(query: str):
71
 
72
  response.raise_for_status()
73
  logger.info(f"Anthropic response: {response.json()}")
74
- answer = response.json()['content'][0]['text']
75
- return answer
76
 
77
  except httpx.HTTPStatusError as e:
78
  logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}")
@@ -81,18 +114,24 @@ async def ask_anthropic(query: str):
81
  logger.error(f"Anthropic Error: {str(e)}")
82
  return f"Error: Anthropic Error: {str(e)}"
83
 
84
- async def ask_gemini(query: str):
85
  gemini_api_key = os.getenv("GEMINI_API_KEY")
86
  if not gemini_api_key:
87
  logger.error("Gemini API key not provided")
88
  return "Error: Gemini API key not provided."
89
 
 
 
 
 
 
 
90
  headers = {
91
  "Content-Type": "application/json"
92
  }
93
 
94
  payload = {
95
- "contents": [{"parts": [{"text": query}]}]
96
  }
97
 
98
  try:
@@ -104,8 +143,7 @@ async def ask_gemini(query: str):
104
  )
105
 
106
  response.raise_for_status()
107
- answer = response.json()['candidates'][0]['content']['parts'][0]['text']
108
- return answer
109
 
110
  except httpx.HTTPStatusError as e:
111
  logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}")
@@ -114,31 +152,32 @@ async def ask_gemini(query: str):
114
  logger.error(f"Gemini Error: {str(e)}")
115
  return f"Error: Gemini Error: {str(e)}"
116
 
117
- async def query_model(query: str, provider: str):
118
  provider = provider.lower()
119
  if provider == "openai":
120
- return await ask_openai(query)
 
121
  elif provider == "anthropic":
122
- return await ask_anthropic(query)
123
  elif provider == "gemini":
124
- return await ask_gemini(query)
125
  else:
126
- return f"Error: Unknown provider: {provider}"
127
 
128
  # Gradio interface
129
- with gr.Blocks() as demo:
130
- gr.Markdown("# Multi-Model Selector")
131
- gr.Markdown("Select a provider and enter a query to get a response from the chosen AI model.")
132
-
133
- provider = gr.Dropdown(choices=["OpenAI", "Anthropic", "Gemini"], label="Select Provider")
134
- query = gr.Textbox(label="Enter your query", placeholder="e.g., What is the capital of the United States?")
135
- submit_button = gr.Button("Submit")
136
- output = gr.Textbox(label="Response", interactive=False)
137
 
138
- submit_button.click(
 
139
  fn=query_model,
140
- inputs=[query, provider],
141
- outputs=output
 
 
 
 
142
  )
143
 
144
  # Launch the Gradio app
 
3
  import httpx
4
  from dotenv import load_dotenv
5
  import gradio as gr
6
+ from typing import AsyncGenerator, List, Dict
7
 
8
  # Configure logging
9
  logging.basicConfig(level=logging.INFO)
 
16
  logger.info(f"ANTHROPIC_API_KEY present: {'ANTHROPIC_API_KEY' in os.environ}")
17
  logger.info(f"GEMINI_API_KEY present: {'GEMINI_API_KEY' in os.environ}")
18
 
19
+ async def ask_openai(query: str, history: List[Dict[str, str]]) -> AsyncGenerator[str, None]:
20
  openai_api_key = os.getenv("OPENAI_API_KEY")
21
  if not openai_api_key:
22
  logger.error("OpenAI API key not provided")
23
+ yield "Error: OpenAI API key not provided."
24
+ return
25
+
26
+ # Build message history
27
+ messages = []
28
+ for msg in history:
29
+ messages.append({"role": "user", "content": msg["user"]})
30
+ if msg["bot"]:
31
+ messages.append({"role": "assistant", "content": msg["bot"]})
32
+ messages.append({"role": "user", "content": query})
33
 
34
  headers = {
35
  "Authorization": f"Bearer {openai_api_key}",
 
38
 
39
  payload = {
40
  "model": "gpt-3.5-turbo",
41
+ "messages": messages,
42
+ "stream": True
43
  }
44
 
45
  try:
46
  async with httpx.AsyncClient() as client:
47
+ async with client.stream("POST", "https://api.openai.com/v1/chat/completions", headers=headers, json=payload) as response:
48
+ response.raise_for_status()
49
+ async for chunk in response.aiter_text():
50
+ if chunk:
51
+ # Parse the streaming chunk (JSON lines)
52
+ lines = chunk.splitlines()
53
+ for line in lines:
54
+ if line.startswith("data: "):
55
+ data = line[6:] # Remove "data: " prefix
56
+ if data == "[DONE]":
57
+ break
58
+ try:
59
+ json_data = eval(data) # Safely parse JSON
60
+ if "choices" in json_data and json_data["choices"]:
61
+ delta = json_data["choices"][0].get("delta", {})
62
+ if "content" in delta:
63
+ yield delta["content"]
64
+ except Exception as e:
65
+ logger.error(f"Error parsing OpenAI stream chunk: {str(e)}")
66
+ yield f"Error parsing stream: {str(e)}"
67
 
68
  except httpx.HTTPStatusError as e:
69
  logger.error(f"OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}")
70
+ yield f"Error: OpenAI HTTP Status Error: {e.response.status_code}, {e.response.text}"
71
  except Exception as e:
72
  logger.error(f"OpenAI Error: {str(e)}")
73
+ yield f"Error: OpenAI Error: {str(e)}"
74
 
75
+ async def ask_anthropic(query: str, history: List[Dict[str, str]]) -> str:
76
  anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
77
  if not anthropic_api_key:
78
  logger.error("Anthropic API key not provided")
79
  return "Error: Anthropic API key not provided."
80
 
81
+ # Build message history
82
+ messages = []
83
+ for msg in history:
84
+ messages.append({"role": "user", "content": msg["user"]})
85
+ if msg["bot"]:
86
+ messages.append({"role": "assistant", "content": msg["bot"]})
87
+ messages.append({"role": "user", "content": query})
88
+
89
  headers = {
90
  "x-api-key": anthropic_api_key,
91
  "anthropic-version": "2023-06-01",
 
95
  payload = {
96
  "model": "claude-3-5-sonnet-20241022",
97
  "max_tokens": 1024,
98
+ "messages": messages
99
  }
100
 
101
  try:
 
105
 
106
  response.raise_for_status()
107
  logger.info(f"Anthropic response: {response.json()}")
108
+ return response.json()['content'][0]['text']
 
109
 
110
  except httpx.HTTPStatusError as e:
111
  logger.error(f"Anthropic HTTP Status Error: {e.response.status_code}, {e.response.text}")
 
114
  logger.error(f"Anthropic Error: {str(e)}")
115
  return f"Error: Anthropic Error: {str(e)}"
116
 
117
+ async def ask_gemini(query: str, history: List[Dict[str, str]]) -> str:
118
  gemini_api_key = os.getenv("GEMINI_API_KEY")
119
  if not gemini_api_key:
120
  logger.error("Gemini API key not provided")
121
  return "Error: Gemini API key not provided."
122
 
123
+ # Gemini doesn't natively support chat history in the same way, so we concatenate history as text
124
+ history_text = ""
125
+ for msg in history:
126
+ history_text += f"User: {msg['user']}\nAssistant: {msg['bot']}\n" if msg["bot"] else f"User: {msg['user']}\n"
127
+ full_query = history_text + f"User: {query}\n"
128
+
129
  headers = {
130
  "Content-Type": "application/json"
131
  }
132
 
133
  payload = {
134
+ "contents": [{"parts": [{"text": full_query}]}]
135
  }
136
 
137
  try:
 
143
  )
144
 
145
  response.raise_for_status()
146
+ return response.json()['candidates'][0]['content']['parts'][0]['text']
 
147
 
148
  except httpx.HTTPStatusError as e:
149
  logger.error(f"Gemini HTTP Status Error: {e.response.status_code}, {e.response.text}")
 
152
  logger.error(f"Gemini Error: {str(e)}")
153
  return f"Error: Gemini Error: {str(e)}"
154
 
155
+ async def query_model(query: str, provider: str, history: List[Dict[str, str]]):
156
  provider = provider.lower()
157
  if provider == "openai":
158
+ async for chunk in ask_openai(query, history):
159
+ yield chunk
160
  elif provider == "anthropic":
161
+ yield await ask_anthropic(query, history)
162
  elif provider == "gemini":
163
+ yield await ask_gemini(query, history)
164
  else:
165
+ yield f"Error: Unknown provideratom: {provider}"
166
 
167
  # Gradio interface
168
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
169
+ gr.Markdown("# Multi-Model Chat")
170
+ gr.Markdown("Chat with OpenAI, Anthropic, or Gemini. Select a provider and start typing!")
 
 
 
 
 
171
 
172
+ provider = gr.Dropdown(choices=["OpenAI", "Anthropic", "Gemini"], label="Select Provider", value="OpenAI")
173
+ chatbot = gr.ChatInterface(
174
  fn=query_model,
175
+ additional_inputs=[provider],
176
+ retry_btn=None,
177
+ undo_btn=None,
178
+ clear_btn="Clear",
179
+ title="",
180
+ description=""
181
  )
182
 
183
  # Launch the Gradio app
requirements.txt CHANGED
@@ -1,11 +1,3 @@
1
- fastapi
2
- uvicorn
3
- openai
4
- google-generativeai
5
- anthropic
6
- python-dotenv
7
- requests
8
- gradio
9
- httpx
10
-
11
-
 
1
+ gradio==4.44.0
2
+ httpx==0.27.2
3
+ python-dotenv==1.0.1