elismasilva commited on
Commit
e66ed0d
Β·
1 Parent(s): df27c8c

support for roundtable component

Browse files
Files changed (4) hide show
  1. .gitignore +4 -1
  2. README.md +0 -1
  3. app.py +307 -187
  4. requirements.txt +2 -1
.gitignore CHANGED
@@ -8,4 +8,7 @@ venv/
8
  *.log
9
  .DS_Store
10
  .gradio
11
- Makefile
 
 
 
 
8
  *.log
9
  .DS_Store
10
  .gradio
11
+ .env
12
+ Makefile
13
+ app1.py
14
+ app2.py
README.md CHANGED
@@ -16,4 +16,3 @@ tags:
16
  short_description: A multi-agent chat application and Gradio MCP Server
17
  ---
18
 
19
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
16
  short_description: A multi-agent chat application and Gradio MCP Server
17
  ---
18
 
 
app.py CHANGED
@@ -1,15 +1,20 @@
 
1
  import hashlib
2
  import re
3
- from threading import Lock
4
  from typing import Dict, List, Optional, Tuple, Union
5
  import gradio as gr
 
 
6
  from gemini_model_api import call_gemini_api
7
 
 
8
  MODERATION_PROMPT = """
9
  You are a content safety AI. Your only job is to analyze the user's message and determine if it violates content policies.
10
  Check for hate speech, harassment, bullying, self-harm encouragement, and explicit content.
11
  Your output MUST be a single word: either `[OK]` or `[VIOLATION]`.
12
  """
 
13
  TRIAGE_PROMPT = """
14
  You are a fast, logical decision-making AI. Your only job is to analyze a conversation history and decide if the AI participant named 'Gemini' should speak.
15
  CRITERIA FOR RESPONDING (You should respond if ANY of these are true):
@@ -23,6 +28,7 @@ CRITERIA FOR IGNORING:
23
  - A question is clearly directed from one specific user to another.
24
  Your output MUST be a single word: either `[RESPOND]` or `[IGNORE]`.
25
  """
 
26
  SYSTEM_PROMPT_ACTOR = """
27
  You are a helpful and friendly AI assistant named Gemini, participating in a group chat. You will act as a human-like participant.
28
  **CONTEXTUAL AWARENESS (This is how you understand the conversation):**
@@ -34,6 +40,7 @@ You are a helpful and friendly AI assistant named Gemini, participating in a gro
34
  3. **No Meta-Commentary:** Do not make comments about your own thought process.
35
  4. **Language:** Respond in the same language as the conversation.
36
  """
 
37
  SUMMARY_PROMPT = """
38
  You are a factual reporting tool. Your only task is to read the following chat history and summarize **who said what**.
39
  ABSOLUTE RULES:
@@ -46,6 +53,7 @@ Example output format:
46
  - **Eliseu** understood the advice and said he would try it.
47
  Now, generate a factual summary for the following conversation:
48
  """
 
49
  OPINION_PROMPT = """
50
  You are a social and emotional intelligence analyst. Your only task is to read the following chat history and provide your opinion on the **dynamics and mood** of the conversation.
51
  ABSOLUTE RULES:
@@ -69,16 +77,19 @@ chat_histories = {
69
  }
70
  active_users = {channel: set() for channel in AVAILABLE_CHANNELS_LIST}
71
  USER_COLORS = [
72
- "#FF6347",
73
- "#4682B4",
74
- "#32CD32",
75
- "#FFD700",
76
- "#6A5ACD",
77
- "#FF69B4",
78
- "chocolate",
79
- "indigo",
80
  ]
81
 
 
 
 
 
 
 
 
 
 
 
82
 
83
  def get_user_color(username: str) -> str:
84
  base_username = re.sub(r"_\d+$", "", username)
@@ -88,14 +99,12 @@ def get_user_color(username: str) -> str:
88
  color_index = hash_int % len(USER_COLORS)
89
  return USER_COLORS[color_index]
90
 
91
-
92
  def clean_html_for_llm(text: str) -> str:
93
  clean_text = re.sub("<[^<]+?>", "", text)
94
  clean_text = re.sub(r"^\s*\*\*[a-zA-Z0-9_]+:\*\*\s*", "", clean_text)
95
  clean_text = clean_text.replace("**", "")
96
  return clean_text.strip()
97
 
98
-
99
  def consolidate_history_for_gemini(history: List[Dict]) -> List[Dict]:
100
  if not history:
101
  return []
@@ -131,7 +140,6 @@ def consolidate_history_for_gemini(history: List[Dict]) -> List[Dict]:
131
  block.pop("username", None)
132
  return consolidated
133
 
134
-
135
  def moderate_with_llm(message_text: str) -> Optional[str]:
136
  moderation_payload = [
137
  {"role": "system", "content": MODERATION_PROMPT},
@@ -142,16 +150,18 @@ def moderate_with_llm(message_text: str) -> Optional[str]:
142
  return "Message blocked by content safety policy."
143
  return None
144
 
145
-
146
- def login_user(channel: str, username: str) -> Tuple[str, str, List[Dict]]:
147
- """Handles login logic. Returns final username, channel, and the unformatted history."""
148
-
149
  if not username:
150
  username = "User"
151
  final_channel = channel if channel else "general"
152
- with history_lock:
 
153
  if final_channel not in active_users:
154
  active_users[final_channel] = set()
 
 
 
155
  users_in_channel = active_users.get(final_channel)
156
  final_username = username
157
  i = 2
@@ -159,187 +169,274 @@ def login_user(channel: str, username: str) -> Tuple[str, str, List[Dict]]:
159
  final_username = f"{username}_{i}"
160
  i += 1
161
  users_in_channel.add(final_username)
162
- join_message = {
163
- "role": "system_join_leave",
164
- "content": f"<em>{final_username} has joined the chat.</em>",
165
- }
166
- chat_histories.setdefault(final_channel, []).append(join_message)
 
 
 
 
 
 
 
 
 
 
 
 
167
  updated_history = chat_histories.get(final_channel)
168
- return final_username, final_channel, updated_history
169
-
 
170
 
171
- def exit_chat(channel: str, username: str) -> bool:
172
- """Handles logout logic. Returns True on completion."""
173
  with history_lock:
174
  if channel in active_users and username in active_users[channel]:
175
  active_users[channel].remove(username)
176
- exit_message = {
177
- "role": "system_join_leave",
178
- "content": f"<em>{username} has left the chat.</em>",
179
- }
 
 
 
 
 
 
 
 
 
 
 
180
  if channel in chat_histories:
181
  chat_histories[channel].append(exit_message)
182
- return True
183
-
184
-
185
- def send_message(channel: str, username: str, message: str) -> List[Dict]:
186
- """Handles new messages. Returns the full, unformatted history."""
187
-
188
- if not message or not username:
189
- with history_lock:
190
- return chat_histories.get(channel, [])
191
- moderation_result = moderate_with_llm(message)
192
- if moderation_result:
 
 
 
 
 
193
  with history_lock:
194
- chat_histories[channel].append({"role": "system_error", "content": moderation_result})
195
- return chat_histories.get(channel, [])
 
 
 
 
196
  with history_lock:
197
- chat_histories[channel].append({"role": "user", "username": username, "content": message})
198
- history_for_llm = list(chat_histories[channel])
199
- history_for_triage = [
200
- {"role": "system", "content": TRIAGE_PROMPT}
201
- ] + consolidate_history_for_gemini(history_for_llm)
 
 
 
 
 
 
202
  decision = call_gemini_api(history_for_triage, stream=False, temperature=0.0)
203
- if decision and "[RESPOND]" in decision:
204
- history_for_actor = [
205
- {"role": "system", "content": SYSTEM_PROMPT_ACTOR}
206
- ] + consolidate_history_for_gemini(history_for_llm)
207
  bot_response_text = call_gemini_api(history_for_actor, stream=False, temperature=0.7)
208
- if (
209
- bot_response_text
210
- and "Error:" not in bot_response_text
211
- and "[BLOCKED" not in bot_response_text
212
- ):
213
- cleaned_response = re.sub(r"^\s*gemini:\s*", "", bot_response_text, flags=re.IGNORECASE)
214
- with history_lock:
215
- chat_histories[channel].append(
216
- {"role": "assistant", "username": "Gemini", "content": cleaned_response}
217
- )
 
 
 
 
 
 
 
 
 
 
 
 
218
  with history_lock:
219
- return chat_histories.get(channel, [])
 
 
 
 
 
220
 
 
 
221
 
222
- def get_summary_or_opinion(channel: str, prompt_template: str) -> List[Dict]:
223
- """Handles summary and opnion chat tool. Returns the full, unformatted history."""
224
  with history_lock:
225
  history_copy = chat_histories.get(channel, []).copy()
226
- history_for_llm = [
227
- {"role": "system", "content": prompt_template}
228
- ] + consolidate_history_for_gemini(history_copy)
229
  response_text = call_gemini_api(history_for_llm, stream=False)
 
230
  is_summary = "summary" in prompt_template.lower()
231
  role = "system_summary" if is_summary else "system_opinion"
232
- content = (
233
- response_text
234
- if response_text and "Error:" not in response_text
235
- else "Could not generate the response."
236
- )
237
  with history_lock:
238
- chat_histories[channel].append({"role": role, "content": content})
239
- return chat_histories.get(channel, [])
 
240
 
241
-
242
- def format_history_for_display(history: List[Dict]) -> List[Dict]:
243
- """Applies HTML formatting to a clean history list for display."""
244
  formatted_history = []
245
- for msg in history:
246
- new_msg = msg.copy()
247
  role, content, username = (
248
  new_msg.get("role"),
249
  new_msg.get("content", ""),
250
  new_msg.get("username"),
251
  )
 
 
 
 
252
  if role == "user" and username:
253
  color = get_user_color(username)
254
- new_msg["content"] = (
255
- f"<span style='color:{color}; font-weight: bold;'>{username}:</span> {content}"
256
- )
257
  elif role == "assistant" and username:
258
- new_msg["content"] = f"**{username}:** {content}"
259
  elif role == "system_join_leave":
260
- new_msg["content"] = f"<div style='text-align: center; color: grey;'>{content}</div>"
261
- new_msg["role"] = "user"
262
  elif role == "system_error":
263
- new_msg["content"] = f"<span style='color:red;'>**System:** {content}</span>"
264
- new_msg["role"] = "user"
265
  elif role == "system_summary" or role == "system_opinion":
266
  is_summary = role == "system_summary"
267
  title = "Conversation Summary" if is_summary else "Gemini's Opinion"
268
- color = "#6c757d" if is_summary else "#007bff"
269
  response_content = content.replace("**", "")
270
  if is_summary:
271
  formatted_list = re.sub(r"-\s*", "<br>- ", response_content).strip()
272
  if formatted_list.startswith("<br>- "):
273
  formatted_list = formatted_list[5:]
274
  response_content = "- " + formatted_list
275
- new_msg["content"] = (
276
- f"<div style='background-color:#f8f9fa;...'><b>{title}:</b><br>{response_content}</div>"
 
277
  )
278
- new_msg["role"] = "user"
279
- formatted_history.append(new_msg)
280
- return formatted_history
281
 
 
 
 
 
282
 
283
- def get_and_format_history(
284
- channel: str, current_ui_history: List[Dict]
285
- ) -> Union[List[Dict], gr.skip]:
286
- """UI helper: Intelligently gets and formats history."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  with history_lock:
288
  backend_history = chat_histories.get(channel, [])
289
- if len(backend_history) == len(current_ui_history):
290
- return gr.skip()
291
- else:
292
- return format_history_for_display(backend_history)
293
-
 
 
 
 
 
294
 
295
- def update_ui_after_login(
296
- final_username: str, final_channel: str, unformatted_history: List[Dict]
297
- ) -> Tuple:
298
  """UI-only function to switch views and update components after login."""
299
  return (
300
  gr.update(visible=False),
301
  gr.update(visible=True),
302
  final_username,
303
  final_channel,
304
- format_history_for_display(unformatted_history),
 
305
  )
306
 
307
-
308
- def update_ui_after_logout() -> Tuple:
309
  """UI-only function to switch views after logout."""
310
- return gr.update(visible=True), gr.update(visible=False)
311
-
312
-
313
- def get_summary(channel):
314
- return get_summary_or_opinion(channel, SUMMARY_PROMPT)
315
-
316
-
317
- def get_opinion(channel):
318
- return get_summary_or_opinion(channel, OPINION_PROMPT)
319
-
320
-
321
- def clear_textbox():
322
- return ""
323
 
 
 
 
 
 
 
324
 
325
  with gr.Blocks(theme=gr.themes.Ocean(), title="Multi-Agent Chat") as demo:
326
 
 
 
 
 
 
 
327
  with gr.Column(visible=True) as login_view:
328
- gr.Markdown("# πŸš€ Welcome to Multi-Agent Chat")
329
  username_input_login = gr.Textbox(label="Your Name", placeholder="e.g., Lucy")
330
- channel_choice_dropdown = gr.Dropdown(
331
- choices=AVAILABLE_CHANNELS_LIST, label="Choose a Channel", value="general"
332
- )
333
  login_button = gr.Button("Enter Chat", variant="primary")
334
 
335
  with gr.Column(visible=False) as chat_view:
336
- gr.Markdown("# πŸš€ Welcome to Multi-Agent Chat")
337
- gr.Markdown("""### πŸ’¬ Interacting with the Gemini Agent
338
- The AI agent, Gemini, is always listening to the conversation but is designed to be reserved. To get its attention, you need to address it directly.
339
- - **To ask a question or get a response:** Simply mention **"Gemini"** in your message. The agent is smart enough to understand context and even some typos!
340
- > **Example:** "That's a great point, Lucy. What do you think, **Gemini**?" πŸ€”
341
- - **For general chat:** Just talk normally with other users. Gemini will remain silent unless it feels its participation is highly valuable.
342
- """)
 
 
 
343
  with gr.Row():
344
  with gr.Column(scale=1):
345
  gr.Markdown("## βš™οΈ Session Data")
@@ -349,77 +446,100 @@ with gr.Blocks(theme=gr.themes.Ocean(), title="Multi-Agent Chat") as demo:
349
  summary_button = gr.Button("πŸ“„ Generate Chat Summary")
350
  opinion_button = gr.Button("πŸ€” Ask for LLM's Opinion")
351
  exit_button = gr.Button("πŸšͺ Exit Chat")
 
352
  with gr.Column(scale=3):
353
- chatbot = gr.Chatbot(
 
 
 
 
 
 
 
354
  label="Conversation",
355
  height=600,
356
- type="messages",
 
357
  group_consecutive_messages=False,
 
358
  )
 
359
  with gr.Row():
360
- msg_input = gr.Textbox(
361
- show_label=False, placeholder="Type your message...", scale=5
362
- )
363
  send_button = gr.Button("Send", variant="primary", scale=1)
364
 
365
  chat_timer = gr.Timer(5)
366
- chat_timer.tick(fn=get_and_format_history, inputs=[channel_display, chatbot], outputs=chatbot)
367
- unformatted_history_state = gr.State()
368
- dumb_state = gr.State(value=None)
 
 
 
369
 
370
- login_event = login_button.click(
 
371
  fn=login_user,
372
  inputs=[channel_choice_dropdown, username_input_login],
373
- outputs=[
374
- username_display,
375
- channel_display,
376
- unformatted_history_state,
377
- ],
378
- api_name="login_user",
379
- )
380
- login_event.then(
381
  fn=update_ui_after_login,
382
- inputs=[username_display, channel_display, unformatted_history_state],
383
- outputs=[login_view, chat_view, username_display, channel_display, chatbot],
384
- )
385
-
386
- exit_event = exit_button.click(
387
  fn=exit_chat,
388
  inputs=[channel_display, username_display],
389
- outputs=dumb_state,
390
- api_name="exit_chat",
391
- )
392
- exit_event.then(fn=update_ui_after_logout, inputs=None, outputs=[login_view, chat_view])
393
-
394
- summary_event = summary_button.click(
395
- fn=get_summary, inputs=[channel_display], outputs=dumb_state, api_name="get_summary"
396
- ).then(fn=format_history_for_display, inputs=chatbot, outputs=chatbot)
397
-
398
- opinion_event = opinion_button.click(
399
- fn=get_opinion, inputs=[channel_display], outputs=dumb_state, api_name="get_opinion"
400
- ).then(fn=format_history_for_display, inputs=chatbot, outputs=chatbot)
401
-
402
- send_event = (
403
- send_button.click(
404
- fn=send_message,
405
- inputs=[channel_display, username_display, msg_input],
406
- outputs=dumb_state,
407
- api_name="send_message",
408
- )
409
- .then(fn=format_history_for_display, inputs=chatbot, outputs=chatbot)
410
- .then(fn=clear_textbox, inputs=None, outputs=msg_input)
411
  )
412
 
413
- submit_event = (
414
- msg_input.submit(
415
- fn=send_message,
416
- inputs=[channel_display, username_display, msg_input],
417
- outputs=dumb_state,
418
- api_name="send_message",
419
- )
420
- .then(fn=format_history_for_display, inputs=chatbot, outputs=chatbot)
421
- .then(fn=clear_textbox, inputs=None, outputs=msg_input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
  )
423
 
424
  if __name__ == "__main__":
425
- demo.launch(mcp_server=True)
 
1
+ import time
2
  import hashlib
3
  import re
4
+ from threading import Lock, Thread
5
  from typing import Dict, List, Optional, Tuple, Union
6
  import gradio as gr
7
+ from gradio_consilium_roundtable import consilium_roundtable
8
+ import json
9
  from gemini_model_api import call_gemini_api
10
 
11
+
12
  MODERATION_PROMPT = """
13
  You are a content safety AI. Your only job is to analyze the user's message and determine if it violates content policies.
14
  Check for hate speech, harassment, bullying, self-harm encouragement, and explicit content.
15
  Your output MUST be a single word: either `[OK]` or `[VIOLATION]`.
16
  """
17
+
18
  TRIAGE_PROMPT = """
19
  You are a fast, logical decision-making AI. Your only job is to analyze a conversation history and decide if the AI participant named 'Gemini' should speak.
20
  CRITERIA FOR RESPONDING (You should respond if ANY of these are true):
 
28
  - A question is clearly directed from one specific user to another.
29
  Your output MUST be a single word: either `[RESPOND]` or `[IGNORE]`.
30
  """
31
+
32
  SYSTEM_PROMPT_ACTOR = """
33
  You are a helpful and friendly AI assistant named Gemini, participating in a group chat. You will act as a human-like participant.
34
  **CONTEXTUAL AWARENESS (This is how you understand the conversation):**
 
40
  3. **No Meta-Commentary:** Do not make comments about your own thought process.
41
  4. **Language:** Respond in the same language as the conversation.
42
  """
43
+
44
  SUMMARY_PROMPT = """
45
  You are a factual reporting tool. Your only task is to read the following chat history and summarize **who said what**.
46
  ABSOLUTE RULES:
 
53
  - **Eliseu** understood the advice and said he would try it.
54
  Now, generate a factual summary for the following conversation:
55
  """
56
+
57
  OPINION_PROMPT = """
58
  You are a social and emotional intelligence analyst. Your only task is to read the following chat history and provide your opinion on the **dynamics and mood** of the conversation.
59
  ABSOLUTE RULES:
 
77
  }
78
  active_users = {channel: set() for channel in AVAILABLE_CHANNELS_LIST}
79
  USER_COLORS = [
80
+ "#FF6347", "#4682B4", "#32CD32", "#FFD700", "#6A5ACD", "#FF69B4", "chocolate", "indigo",
 
 
 
 
 
 
 
81
  ]
82
 
83
+ # Roundtable state management
84
+ roundtable_states = {
85
+ channel: {
86
+ "participants": ["Gemini"],
87
+ "messages": [],
88
+ "currentSpeaker": None,
89
+ "showBubbles": [],
90
+ "avatarImages": {}
91
+ } for channel in AVAILABLE_CHANNELS_LIST
92
+ }
93
 
94
  def get_user_color(username: str) -> str:
95
  base_username = re.sub(r"_\d+$", "", username)
 
99
  color_index = hash_int % len(USER_COLORS)
100
  return USER_COLORS[color_index]
101
 
 
102
  def clean_html_for_llm(text: str) -> str:
103
  clean_text = re.sub("<[^<]+?>", "", text)
104
  clean_text = re.sub(r"^\s*\*\*[a-zA-Z0-9_]+:\*\*\s*", "", clean_text)
105
  clean_text = clean_text.replace("**", "")
106
  return clean_text.strip()
107
 
 
108
  def consolidate_history_for_gemini(history: List[Dict]) -> List[Dict]:
109
  if not history:
110
  return []
 
140
  block.pop("username", None)
141
  return consolidated
142
 
 
143
  def moderate_with_llm(message_text: str) -> Optional[str]:
144
  moderation_payload = [
145
  {"role": "system", "content": MODERATION_PROMPT},
 
150
  return "Message blocked by content safety policy."
151
  return None
152
 
153
+ def login_user(channel: str, username: str) -> Tuple[str, str, List[Dict], str]:
154
+ """Handles login logic. Returns final username, channel, unformatted history, and roundtable state."""
 
 
155
  if not username:
156
  username = "User"
157
  final_channel = channel if channel else "general"
158
+
159
+ with history_lock:
160
  if final_channel not in active_users:
161
  active_users[final_channel] = set()
162
+ chat_histories[final_channel] = [{"role": "assistant", "content": f"Welcome to the #{final_channel} channel!"}]
163
+ roundtable_states[final_channel] = {"participants": ["Gemini"], "messages": [], "currentSpeaker": None, "thinking": [], "showBubbles": [], "avatarImages": {}}
164
+
165
  users_in_channel = active_users.get(final_channel)
166
  final_username = username
167
  i = 2
 
169
  final_username = f"{username}_{i}"
170
  i += 1
171
  users_in_channel.add(final_username)
172
+
173
+
174
+ state = roundtable_states[final_channel]
175
+
176
+ if "Gemini" not in state["participants"]:
177
+ state["participants"].insert(0, "Gemini")
178
+
179
+ if final_username not in state["participants"]:
180
+ state["participants"].append(final_username)
181
+
182
+ join_message_content = f"<em>{final_username} has joined the chat.</em>"
183
+ join_message = {"role": "system_join_leave", "content": join_message_content}
184
+ chat_histories[final_channel].append(join_message)
185
+
186
+ join_roundtable_msg = {"speaker": "System", "text": f"{final_username} has joined the chat."}
187
+ state["messages"].append(join_roundtable_msg)
188
+
189
  updated_history = chat_histories.get(final_channel)
190
+ roundtable_json = json.dumps(state)
191
+
192
+ return final_username, final_channel, updated_history, roundtable_json
193
 
194
+ def exit_chat(channel: str, username: str) -> Tuple[bool, str]:
195
+ """Handles logout logic. Returns completion status and updated roundtable state."""
196
  with history_lock:
197
  if channel in active_users and username in active_users[channel]:
198
  active_users[channel].remove(username)
199
+
200
+ if channel in roundtable_states:
201
+ state = roundtable_states[channel]
202
+
203
+ if username in state.get("participants", []):
204
+ state["participants"].remove(username)
205
+
206
+ thinking_list = state.get("thinking", [])
207
+ if username in thinking_list:
208
+ thinking_list.remove(username)
209
+
210
+ if state.get("currentSpeaker") == username:
211
+ state["currentSpeaker"] = None
212
+
213
+ exit_message = {"role": "system_join_leave", "content": f"<em>{username} has left the chat.</em>"}
214
  if channel in chat_histories:
215
  chat_histories[channel].append(exit_message)
216
+
217
+ if channel in roundtable_states:
218
+ exit_roundtable_msg = {"speaker": "System", "text": f"{username} has left the chat."}
219
+ roundtable_states[channel]["messages"].append(exit_roundtable_msg)
220
+ roundtable_json = json.dumps(roundtable_states[channel])
221
+ else:
222
+ roundtable_json = "{}"
223
+
224
+ return True, roundtable_json
225
+
226
+ def send_message(channel: str, username: str, message: str):
227
+ """
228
+ Processes the user message and, if necessary, the Gemini response synchronously.
229
+ Returns the final, complete state to the UI in a single update.
230
+ """
231
+ if not message or not username:
232
  with history_lock:
233
+ current_history = chat_histories.get(channel, [])
234
+ roundtable_json = json.dumps(roundtable_states.get(channel, {}))
235
+ chatbot_formatted = format_history_for_chatbot_display(current_history)
236
+ return current_history, roundtable_json, chatbot_formatted, roundtable_json, ""
237
+
238
+ user_msg = {"role": "user", "username": username, "content": message}
239
  with history_lock:
240
+ chat_histories[channel].append(user_msg)
241
+ state = roundtable_states[channel]
242
+ state["messages"].append({"speaker": username, "text": clean_html_for_llm(message)})
243
+ if username not in state["showBubbles"]:
244
+ state["showBubbles"].append(username)
245
+ if len(state["showBubbles"]) > 4:
246
+ state["showBubbles"] = state["showBubbles"][-4:]
247
+ state["currentSpeaker"] = username
248
+
249
+ history_for_llm = list(chat_histories[channel])
250
+ history_for_triage = [{"role": "system", "content": TRIAGE_PROMPT}] + consolidate_history_for_gemini(history_for_llm)
251
  decision = call_gemini_api(history_for_triage, stream=False, temperature=0.0)
252
+ should_gemini_respond = decision and "[RESPOND]" in decision
253
+
254
+ if should_gemini_respond:
255
+ history_for_actor = [{"role": "system", "content": SYSTEM_PROMPT_ACTOR}] + consolidate_history_for_gemini(history_for_llm)
256
  bot_response_text = call_gemini_api(history_for_actor, stream=False, temperature=0.7)
257
+
258
+ with history_lock:
259
+ state = roundtable_states[channel]
260
+ if bot_response_text and "Error:" not in bot_response_text and "[BLOCKED" not in bot_response_text:
261
+ cleaned_response = re.sub(r"^\s*gemini:\s*", "", bot_response_text, flags=re.IGNORECASE)
262
+ gemini_msg = {"role": "assistant", "username": "Gemini", "content": cleaned_response}
263
+
264
+ chat_histories[channel].append(gemini_msg)
265
+ state["messages"].append({"speaker": "Gemini", "text": clean_html_for_llm(cleaned_response)})
266
+
267
+ if "Gemini" not in state["showBubbles"]:
268
+ state["showBubbles"].append("Gemini")
269
+ if len(state["showBubbles"]) > 4:
270
+ state["showBubbles"] = state["showBubbles"][-4:]
271
+ state["currentSpeaker"] = "Gemini"
272
+ else:
273
+ state["currentSpeaker"] = None
274
+ else:
275
+ with history_lock:
276
+ state = roundtable_states[channel]
277
+ state["currentSpeaker"] = None
278
+
279
  with history_lock:
280
+ final_history = chat_histories.get(channel, [])
281
+ final_roundtable_json = json.dumps(roundtable_states[channel])
282
+
283
+ final_chatbot_formatted = format_history_for_chatbot_display(final_history)
284
+
285
+ return final_history, final_roundtable_json, final_chatbot_formatted, final_roundtable_json, ""
286
 
287
+ def get_summary_or_opinion(channel: str, prompt_template: str) -> Tuple[List[Dict], str]:
288
+ """Handles summary/opinion. Returns full unformatted history and updated roundtable state."""
289
 
 
 
290
  with history_lock:
291
  history_copy = chat_histories.get(channel, []).copy()
292
+
293
+ history_for_llm = [{"role": "system", "content": prompt_template}] + consolidate_history_for_gemini(history_copy)
 
294
  response_text = call_gemini_api(history_for_llm, stream=False)
295
+
296
  is_summary = "summary" in prompt_template.lower()
297
  role = "system_summary" if is_summary else "system_opinion"
298
+ content = response_text if response_text and "Error:" not in response_text else "Could not generate the response."
299
+
300
+ system_msg = {"role": role, "content": content}
 
 
301
  with history_lock:
302
+ chat_histories[channel].append(system_msg)
303
+ roundtable_json = json.dumps(roundtable_states[channel])
304
+ return chat_histories.get(channel, []), roundtable_json
305
 
306
+ def format_history_for_chatbot_display(history: List[Dict]) -> List[Dict]:
307
+ """Applies HTML formatting for gr.Chatbot display using the 'messages' format."""
 
308
  formatted_history = []
309
+ for msg in history:
310
+ new_msg = msg.copy()
311
  role, content, username = (
312
  new_msg.get("role"),
313
  new_msg.get("content", ""),
314
  new_msg.get("username"),
315
  )
316
+
317
+ display_role = "assistant" if role == "assistant" else "user"
318
+ display_content = ""
319
+
320
  if role == "user" and username:
321
  color = get_user_color(username)
322
+ display_content = f"<span style='color:{color}; font-weight: bold;'>{username}:</span> {content}"
 
 
323
  elif role == "assistant" and username:
324
+ display_content = f"**{username}:** {content}"
325
  elif role == "system_join_leave":
326
+ display_content = f"<div style='text-align: center; color: grey;'>{content}</div>"
 
327
  elif role == "system_error":
328
+ display_content = f"<span style='color:red;'>**System:** {content}</span>"
 
329
  elif role == "system_summary" or role == "system_opinion":
330
  is_summary = role == "system_summary"
331
  title = "Conversation Summary" if is_summary else "Gemini's Opinion"
 
332
  response_content = content.replace("**", "")
333
  if is_summary:
334
  formatted_list = re.sub(r"-\s*", "<br>- ", response_content).strip()
335
  if formatted_list.startswith("<br>- "):
336
  formatted_list = formatted_list[5:]
337
  response_content = "- " + formatted_list
338
+ display_content = (
339
+ f"<div style='background-color:#f8f9fa; border-left: 5px solid #ccc; padding: 10px; margin: 10px 0; border-radius: 5px;'>"
340
+ f"<b>{title}:</b><br>{response_content}</div>"
341
  )
342
+ else:
343
+ display_content = content
 
344
 
345
+ if display_content:
346
+ formatted_history.append({"role": display_role, "content": display_content})
347
+
348
+ return formatted_history
349
 
350
+ def get_summary(channel: str) -> Tuple[List[Dict], str]:
351
+ """
352
+ Returns the conversation summary data.
353
+ """
354
+ unformatted_history, roundtable_json = get_summary_or_opinion(channel, SUMMARY_PROMPT)
355
+ api_data = {"history": unformatted_history, "roundtable": roundtable_json}
356
+ return unformatted_history, roundtable_json, api_data
357
+
358
+ def get_opinion(channel: str) -> Tuple[List[Dict], str]:
359
+ """
360
+ Returns the opinion data generated by LLM.
361
+ """
362
+ unformatted_history, roundtable_json = get_summary_or_opinion(channel, OPINION_PROMPT)
363
+ api_data = {"history": unformatted_history, "roundtable": roundtable_json}
364
+ return unformatted_history, roundtable_json, api_data
365
+
366
+ def format_all_views_from_state(unformatted_history, roundtable_json):
367
+ """
368
+ UI-only function to formats histories for visual components.
369
+ """
370
+ chatbot_formatted = format_history_for_chatbot_display(unformatted_history)
371
+ return chatbot_formatted, roundtable_json
372
+
373
+ def get_live_updates(channel: str):
374
+ """
375
+ Fetches and formats the latest data from the backend for both views.
376
+ """
377
+ if not channel:
378
+ return gr.skip(), gr.skip()
379
+
380
  with history_lock:
381
  backend_history = chat_histories.get(channel, [])
382
+ roundtable_data = roundtable_states.get(channel, {})
383
+ roundtable_data_with_timestamp = {
384
+ **roundtable_data,
385
+ "update_timestamp": str(time.time())
386
+ }
387
+ roundtable_json = json.dumps(roundtable_data_with_timestamp)
388
+
389
+ chatbot_formatted = format_history_for_chatbot_display(backend_history)
390
+
391
+ return chatbot_formatted, roundtable_json
392
 
393
+ def update_ui_after_login(final_username: str, final_channel: str, unformatted_history: List[Dict], roundtable_json: str):
 
 
394
  """UI-only function to switch views and update components after login."""
395
  return (
396
  gr.update(visible=False),
397
  gr.update(visible=True),
398
  final_username,
399
  final_channel,
400
+ format_history_for_chatbot_display(unformatted_history),
401
+ roundtable_json,
402
  )
403
 
404
+ def update_ui_after_logout():
 
405
  """UI-only function to switch views after logout."""
406
+ return gr.update(visible=True), gr.update(visible=False), [], "{}"
 
 
 
 
 
 
 
 
 
 
 
 
407
 
408
+ def toggle_chat_view(view_choice: str, unformatted_history: List[Dict], roundtable_json: str):
409
+ """Hides/shows the correct chat component and populates it with data."""
410
+ if view_choice == "Roundtable":
411
+ return gr.update(visible=True, value=roundtable_json), gr.update(visible=False)
412
+ else:
413
+ return gr.update(visible=False), gr.update(visible=True, value=format_history_for_chatbot_display(unformatted_history))
414
 
415
  with gr.Blocks(theme=gr.themes.Ocean(), title="Multi-Agent Chat") as demo:
416
 
417
+ # --- State Management ---
418
+ unformatted_history_state = gr.State([])
419
+ roundtable_state_json = gr.State("{}")
420
+ dumb_state = gr.State(None)
421
+ mcp_api_return = gr.JSON(visible=False)
422
+
423
  with gr.Column(visible=True) as login_view:
424
+ gr.Markdown("# πŸš€ Welcome to Multi-Agent Chat")
425
  username_input_login = gr.Textbox(label="Your Name", placeholder="e.g., Lucy")
426
+ channel_choice_dropdown = gr.Dropdown(choices=AVAILABLE_CHANNELS_LIST, label="Choose a Channel", value="general")
 
 
427
  login_button = gr.Button("Enter Chat", variant="primary")
428
 
429
  with gr.Column(visible=False) as chat_view:
430
+ gr.Markdown("# πŸš€ Multi-Agent Chat")
431
+ gr.Markdown("""
432
+ ### πŸ’¬ Interacting with the Gemini Agent
433
+ Our AI agent, Gemini, plays two key roles in the chat: a **helpful participant** and a **silent moderator**.
434
+ - **To ask a question or get a response:** Simply mention **"Gemini"** in your message. The agent is smart enough to understand context and even some typos!
435
+ > **Example:** "That's a great point, Alice. What do you think, **Gemini**?" πŸ€”
436
+ - **As a Moderator πŸ›‘οΈ:** Gemini is always monitoring the conversation in the background to ensure a safe and respectful environment. It will automatically detect and block messages containing hate speech, harassment, or other policy violations.
437
+ - **For general chat:** Just talk normally with other users. Gemini will remain silent unless its participation is directly requested or highly valuable.
438
+ """)
439
+
440
  with gr.Row():
441
  with gr.Column(scale=1):
442
  gr.Markdown("## βš™οΈ Session Data")
 
446
  summary_button = gr.Button("πŸ“„ Generate Chat Summary")
447
  opinion_button = gr.Button("πŸ€” Ask for LLM's Opinion")
448
  exit_button = gr.Button("πŸšͺ Exit Chat")
449
+
450
  with gr.Column(scale=3):
451
+ view_switch = gr.Radio(["Chatbot", "Roundtable"], label="Chat View", value="Chatbot")
452
+
453
+ roundtable_display = consilium_roundtable(
454
+ label="🎭 Live Discussion Roundtable",
455
+ visible=True,
456
+ )
457
+
458
+ chatbot_display = gr.Chatbot(
459
  label="Conversation",
460
  height=600,
461
+ visible=False,
462
+ bubble_full_width=False,
463
  group_consecutive_messages=False,
464
+ type="messages"
465
  )
466
+
467
  with gr.Row():
468
+ msg_input = gr.Textbox(show_label=False, placeholder="Type your message...", scale=5)
 
 
469
  send_button = gr.Button("Send", variant="primary", scale=1)
470
 
471
  chat_timer = gr.Timer(5)
472
+ chat_timer.tick(
473
+ fn=get_live_updates,
474
+ inputs=[channel_display],
475
+ outputs=[chatbot_display, roundtable_display],
476
+ show_progress=False
477
+ )
478
 
479
+ event_toogle_chat_view = {"fn":toggle_chat_view,"inputs":[view_switch, unformatted_history_state, roundtable_state_json],"outputs":[roundtable_display, chatbot_display]}
480
+ login_button.click(
481
  fn=login_user,
482
  inputs=[channel_choice_dropdown, username_input_login],
483
+ outputs=[username_display, channel_display, unformatted_history_state, roundtable_state_json],
484
+ ).then(
 
 
 
 
 
 
485
  fn=update_ui_after_login,
486
+ inputs=[username_display, channel_display, unformatted_history_state, roundtable_state_json],
487
+ outputs=[login_view, chat_view, username_display, channel_display, chatbot_display, roundtable_display],
488
+ ).then(**event_toogle_chat_view)
489
+
490
+ exit_button.click(
491
  fn=exit_chat,
492
  inputs=[channel_display, username_display],
493
+ outputs=[dumb_state, roundtable_state_json],
494
+ ).then(
495
+ fn=update_ui_after_logout,
496
+ inputs=None,
497
+ outputs=[login_view, chat_view, unformatted_history_state, roundtable_state_json]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
498
  )
499
 
500
+ view_switch.change(**event_toogle_chat_view)
501
+
502
+ send_button.click(
503
+ fn=send_message,
504
+ inputs=[channel_display, username_display, msg_input],
505
+ outputs=[unformatted_history_state, roundtable_state_json, chatbot_display, roundtable_display, msg_input],
506
+ api_name="send_message"
507
+ )
508
+ msg_input.submit(
509
+ fn=send_message,
510
+ inputs=[channel_display, username_display, msg_input],
511
+ outputs=[unformatted_history_state, roundtable_state_json, chatbot_display, roundtable_display, msg_input],
512
+ api_name="send_message_submit"
513
+ )
514
+
515
+ def run_tool_and_update(tool_function, channel):
516
+ history, roundtable_json = tool_function(channel)
517
+ chatbot_formatted = format_history_for_chatbot_display(history)
518
+ return history, roundtable_json, chatbot_formatted, roundtable_json
519
+
520
+ summary_button.click(
521
+ fn=get_summary,
522
+ inputs=[channel_display],
523
+ outputs=[unformatted_history_state, roundtable_state_json, mcp_api_return],
524
+ show_progress=False,
525
+ api_name="get_summary"
526
+ ).then(
527
+ fn=format_all_views_from_state,
528
+ inputs=[unformatted_history_state, roundtable_state_json],
529
+ outputs=[chatbot_display, roundtable_display]
530
+ )
531
+
532
+ opinion_button.click(
533
+ fn=get_opinion,
534
+ inputs=[channel_display],
535
+ outputs=[unformatted_history_state, roundtable_state_json, mcp_api_return],
536
+ show_progress=False,
537
+ api_name="get_opinion"
538
+ ).then(
539
+ fn=format_all_views_from_state,
540
+ inputs=[unformatted_history_state, roundtable_state_json],
541
+ outputs=[chatbot_display, roundtable_display]
542
  )
543
 
544
  if __name__ == "__main__":
545
+ demo.launch(mcp_server=True)
requirements.txt CHANGED
@@ -1 +1,2 @@
1
- huggingface_hub==0.25.2
 
 
1
+ huggingface_hub==0.25.2
2
+ gradio_consilium_roundtable