Ganesh Chintalapati commited on
Commit
c406c1a
·
1 Parent(s): f9b91f5
Files changed (2) hide show
  1. app.py +68 -46
  2. requirements.txt +6 -5
app.py CHANGED
@@ -4,13 +4,23 @@ import anthropic
4
  import google.generativeai as genai
5
  import os
6
  import asyncio
 
7
  from dotenv import load_dotenv
8
 
 
 
 
 
9
  # Load environment variables
10
  load_dotenv()
11
- openai.api_key = os.getenv("OPENAI_API_KEY")
12
- anthropic_client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
13
- genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
 
 
 
 
 
14
 
15
  # Initialize conversation history
16
  def initialize_chat():
@@ -30,6 +40,7 @@ async def get_openai_response(messages):
30
  )
31
  return "ChatGPT (OpenAI)", response.choices[0].message["content"]
32
  except Exception as e:
 
33
  return "ChatGPT (OpenAI)", f"Error: {str(e)}"
34
 
35
  async def get_claude_response(messages):
@@ -46,6 +57,7 @@ async def get_claude_response(messages):
46
  )
47
  return "Claude (Anthropic)", response.content[0].text
48
  except Exception as e:
 
49
  return "Claude (Anthropic)", f"Error: {str(e)}"
50
 
51
  async def get_gemini_response(messages):
@@ -61,6 +73,7 @@ async def get_gemini_response(messages):
61
  )
62
  return "Gemini (Google)", response.text
63
  except Exception as e:
 
64
  return "Gemini (Google)", f"Error: {str(e)}"
65
 
66
  # Main async function to query selected models
@@ -90,7 +103,11 @@ async def query_selected_models(message, history, use_openai, use_claude, use_ge
90
  tasks.append(get_gemini_response(messages))
91
 
92
  # Run selected API calls concurrently
93
- responses = await asyncio.gather(*tasks, return_exceptions=True)
 
 
 
 
94
 
95
  # Format responses
96
  response_text = ""
@@ -99,49 +116,54 @@ async def query_selected_models(message, history, use_openai, use_claude, use_ge
99
 
100
  # Update history
101
  history.append((message, response_text.strip()))
 
102
 
103
  return "", history
104
 
105
  # Gradio interface
106
- with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Model AI Selector") as demo:
107
- gr.Markdown(
108
- """
109
- # Multi-Model AI Chat Interface
110
- Select one or more models to query and enter your question below. Responses will appear in the chat window.
111
- """
112
- )
113
-
114
- # Model selection checkboxes
115
- with gr.Row():
116
- use_openai = gr.Checkbox(label="ChatGPT (OpenAI)", value=True)
117
- use_claude = gr.Checkbox(label="Claude (Anthropic)", value=True)
118
- use_gemini = gr.Checkbox(label="Gemini (Google)", value=True)
119
-
120
- # Chat interface
121
- chatbot = gr.Chatbot(label="Conversation", height=400)
122
- msg = gr.Textbox(placeholder="Type your query...", label="Your Query")
123
- with gr.Row():
124
- submit = gr.Button("Submit Query")
125
- clear = gr.Button("Clear Chat")
126
-
127
- # Bind query function to submit button and textbox (Enter key)
128
- submit.click(
129
- fn=query_selected_models,
130
- inputs=[msg, chatbot, use_openai, use_claude, use_gemini],
131
- outputs=[msg, chatbot]
132
- )
133
- msg.submit(
134
- fn=query_selected_models,
135
- inputs=[msg, chatbot, use_openai, use_claude, use_gemini],
136
- outputs=[msg, chatbot]
137
- )
138
-
139
- # Clear chat history
140
- clear.click(
141
- fn=lambda: (None, []),
142
- inputs=None,
143
- outputs=[msg, chatbot]
144
- )
145
-
146
- # Launch the app (commented out for Hugging Face deployment)
147
- # demo.launch()
 
 
 
 
 
4
  import google.generativeai as genai
5
  import os
6
  import asyncio
7
+ import logging
8
  from dotenv import load_dotenv
9
 
10
+ # Set up logging to diagnose initialization issues
11
+ logging.basicConfig(level=logging.INFO)
12
+ logger = logging.getLogger(__name__)
13
+
14
  # Load environment variables
15
  load_dotenv()
16
+ try:
17
+ openai.api_key = os.getenv("OPENAI_API_KEY")
18
+ anthropic_client = anthropic.Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
19
+ genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
20
+ logger.info("Environment variables loaded successfully")
21
+ except Exception as e:
22
+ logger.error(f"Error loading environment variables: {str(e)}")
23
+ raise
24
 
25
  # Initialize conversation history
26
  def initialize_chat():
 
40
  )
41
  return "ChatGPT (OpenAI)", response.choices[0].message["content"]
42
  except Exception as e:
43
+ logger.error(f"OpenAI error: {str(e)}")
44
  return "ChatGPT (OpenAI)", f"Error: {str(e)}"
45
 
46
  async def get_claude_response(messages):
 
57
  )
58
  return "Claude (Anthropic)", response.content[0].text
59
  except Exception as e:
60
+ logger.error(f"Claude error: {str(e)}")
61
  return "Claude (Anthropic)", f"Error: {str(e)}"
62
 
63
  async def get_gemini_response(messages):
 
73
  )
74
  return "Gemini (Google)", response.text
75
  except Exception as e:
76
+ logger.error(f"Gemini error: {str(e)}")
77
  return "Gemini (Google)", f"Error: {str(e)}"
78
 
79
  # Main async function to query selected models
 
103
  tasks.append(get_gemini_response(messages))
104
 
105
  # Run selected API calls concurrently
106
+ try:
107
+ responses = await asyncio.gather(*tasks, return_exceptions=True)
108
+ except Exception as e:
109
+ logger.error(f"Error in asyncio.gather: {str(e)}")
110
+ return f"Error querying models: {str(e)}", history
111
 
112
  # Format responses
113
  response_text = ""
 
116
 
117
  # Update history
118
  history.append((message, response_text.strip()))
119
+ logger.info("Query processed successfully")
120
 
121
  return "", history
122
 
123
  # Gradio interface
124
+ try:
125
+ with gr.Blocks(theme=gr.themes.Soft(), title="Multi-Model AI Selector") as demo:
126
+ gr.Markdown(
127
+ """
128
+ # Multi-Model AI Chat Interface
129
+ Select one or more models to query and enter your question below. Responses will appear in the chat window.
130
+ """
131
+ )
132
+
133
+ # Model selection checkboxes
134
+ with gr.Row():
135
+ use_openai = gr.Checkbox(label="ChatGPT (OpenAI)", value=True)
136
+ use_claude = gr.Checkbox(label="Claude (Anthropic)", value=True)
137
+ use_gemini = gr.Checkbox(label="Gemini (Google)", value=True)
138
+
139
+ # Chat interface
140
+ chatbot = gr.Chatbot(label="Conversation", height=400)
141
+ msg = gr.Textbox(placeholder="Type your query...", label="Your Query")
142
+ with gr.Row():
143
+ submit = gr.Button("Submit Query")
144
+ clear = gr.Button("Clear Chat")
145
+
146
+ # Bind query function to submit button and textbox (Enter key)
147
+ submit.click(
148
+ fn=query_selected_models,
149
+ inputs=[msg, chatbot, use_openai, use_claude, use_gemini],
150
+ outputs=[msg, chatbot]
151
+ )
152
+ msg.submit(
153
+ fn=query_selected_models,
154
+ inputs=[msg, chatbot, use_openai, use_claude, use_gemini],
155
+ outputs=[msg, chatbot]
156
+ )
157
+
158
+ # Clear chat history
159
+ clear.click(
160
+ fn=lambda: (None, []),
161
+ inputs=None,
162
+ outputs=[msg, chatbot]
163
+ )
164
+ logger.info("Gradio interface initialized successfully")
165
+ except Exception as e:
166
+ logger.error(f"Error initializing Gradio interface: {str(e)}")
167
+ raise
168
+
169
+ # Do not include demo.launch() for Hugging Face Spaces
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
- gradio
2
- openai
3
- anthropic
4
- google-generativeai
5
- python-dotenv
 
 
1
+ gradio==4.42.0
2
+ openai==1.51.0
3
+ anthropic==0.37.1
4
+ google-generativeai==0.8.3
5
+ python-dotenv==1.0.1
6
+ httpx==0.27.2