elismasilva's picture
adjust Readme.md
6ac9b20
import time
import hashlib
import re
from threading import Lock, Thread
from typing import Dict, List, Optional, Tuple, Union
import gradio as gr
from gradio_consilium_roundtable import consilium_roundtable
import json
from gemini_model_api import call_gemini_api
MODERATION_PROMPT = """
You are a content safety AI. Your only job is to analyze the user's message and determine if it violates content policies.
Check for hate speech, harassment, bullying, self-harm encouragement, and explicit content.
Your output MUST be a single word: either `[OK]` or `[VIOLATION]`.
"""
TRIAGE_PROMPT = """
You are a fast, logical decision-making AI. Your only job is to analyze a conversation history and decide if the AI participant named 'Gemini' should speak.
CRITERIA FOR RESPONDING (You should respond if ANY of these are true):
- **Direct Mention:** Gemini is addressed directly by name, even with typos (e.g., "Gemini", "Gmni").
- **Implicit Reference:** Gemini is clearly referred to implicitly as part of a group (e.g., "what about you guys?").
- **Question to Group:** A user asks a direct question to the group that is not directed at a specific person.
- **Reply to Your Question:** A user's message is a direct and logical answer to a question YOU (Gemini) asked in the previous turn or if it was a response to a topic you suggested and you understood it was directed at you.
- **Request for Help:** A user expresses a clear need for help or information.
CRITERIA FOR IGNORING:
- The conversation is a simple social exchange between other users.
- A question is clearly directed from one specific user to another.
Your output MUST be a single word: either `[RESPOND]` or `[IGNORE]`.
"""
SYSTEM_PROMPT_ACTOR = """
You are a helpful and friendly AI assistant named Gemini, participating in a group chat. You will act as a human-like participant.
**CONTEXTUAL AWARENESS (This is how you understand the conversation):**
- When you see the name "Gemini" in the text, it is referring to **YOU**.
- Your task is to formulate a response based on the last few messages, where you were mentioned or if it was a response to a topic you suggested and you understood it was directed at you.
**RESPONSE RULES (This is how you MUST formulate your answer):**
1. **Grounding:** You are a language model. You do not have a physical body, personal experiences, or feelings. **Do not invent stories about yourself** (like falling down stairs or having stomach aches). If asked about a personal experience, politely clarify that as an AI, you don't have them, but you can help with information.
2. **No Prefix:** **ABSOLUTELY DO NOT** start your response with your name (e.g., "Gemini:"). This is a strict rule.
3. **No Meta-Commentary:** Do not make comments about your own thought process.
4. **Language:** Respond in the same language as the conversation.
"""
SUMMARY_PROMPT = """
You are a factual reporting tool. Your only task is to read the following chat history and summarize **who said what**.
ABSOLUTE RULES:
1. Your response **MUST** be in the primary language used in the conversation.
2. **DO NOT** provide any opinion, analysis, or interpretation.
3. Your output **MUST** be a list of key points, attributing each point to the user who made it.
Example output format:
- **Alice** asked for a way to cook eggs without the oil splashing.
- **Gemini** explained that this happens due to water in the pan and suggested drying it first.
- **Eliseu** understood the advice and said he would try it.
Now, generate a factual summary for the following conversation:
"""
OPINION_PROMPT = """
You are a social and emotional intelligence analyst. Your only task is to read the following chat history and provide your opinion on the **dynamics and mood** of the conversation.
ABSOLUTE RULES:
1. Your response **MUST** be in the primary language used in the conversation.
2. **DO NOT** summarize who said what. Focus only on the underlying feeling and interaction style.
3. **DO NOT** be academic or technical. Speak like an insightful person.
4. Your output **MUST** be a short, reflective paragraph.
Focus on answering questions like:
- What was the overall tone? (e.g., helpful, tense, humorous)
- How were the participants interacting? (e.g., collaboratively, arguing, supporting each other)
- What is your general emotional takeaway from the exchange?
Now, provide your opinion on the following conversation:
"""
# --- State and Helper functions ---
history_lock = Lock()
AVAILABLE_CHANNELS_LIST = ["general", "dev", "agents", "mcp"]
chat_histories = {
channel: [{"role": "assistant", "content": f"Welcome to the #{channel} channel!"}]
for channel in AVAILABLE_CHANNELS_LIST
}
active_users = {channel: set() for channel in AVAILABLE_CHANNELS_LIST}
USER_COLORS = [
"#FF6347", "#4682B4", "#32CD32", "#FFD700", "#6A5ACD", "#FF69B4", "chocolate", "indigo",
]
# Roundtable state management
roundtable_states = {
channel: {
"participants": ["Gemini"],
"messages": [],
"currentSpeaker": None,
"showBubbles": [],
"avatarImages": {}
} for channel in AVAILABLE_CHANNELS_LIST
}
def get_user_color(username: str) -> str:
base_username = re.sub(r"_\d+$", "", username)
hash_object = hashlib.sha256(base_username.encode())
hash_digest = hash_object.hexdigest()
hash_int = int(hash_digest, 16)
color_index = hash_int % len(USER_COLORS)
return USER_COLORS[color_index]
def clean_html_for_llm(text: str) -> str:
clean_text = re.sub("<[^<]+?>", "", text)
clean_text = re.sub(r"^\s*\*\*[a-zA-Z0-9_]+:\*\*\s*", "", clean_text)
clean_text = clean_text.replace("**", "")
return clean_text.strip()
def consolidate_history_for_gemini(history: List[Dict]) -> List[Dict]:
if not history:
return []
prepared_history = []
for msg in history:
if msg.get("role") not in ["user", "assistant"]:
continue
role = "model" if msg.get("role") == "assistant" else "user"
content = (
f"{msg.get('username', '')}: {msg.get('content', '')}"
if msg.get("username")
else msg.get("content", "")
)
prepared_history.append(
{"role": role, "username": msg.get("username"), "content": clean_html_for_llm(content)}
)
if not prepared_history:
return []
consolidated = []
current_block = prepared_history[0]
for msg in prepared_history[1:]:
if (
msg["role"] == "user"
and current_block["role"] == "user"
and msg.get("username") == current_block.get("username")
):
current_block["content"] += "\n" + msg["content"]
else:
consolidated.append(current_block)
current_block = msg
consolidated.append(current_block)
for block in consolidated:
block.pop("username", None)
return consolidated
def moderate_with_llm(message_text: str) -> Optional[str]:
moderation_payload = [
{"role": "system", "content": MODERATION_PROMPT},
{"role": "user", "content": message_text},
]
decision = call_gemini_api(moderation_payload, stream=False, temperature=0.0)
if decision and "[VIOLATION]" in decision:
return "Message blocked by content safety policy."
return None
def login_user(channel: str, username: str) -> Tuple[str, str, List[Dict], str]:
"""Handles login logic. Returns final username, channel, unformatted history, and roundtable state."""
if not username:
username = "User"
final_channel = channel if channel else "general"
with history_lock:
if final_channel not in active_users:
active_users[final_channel] = set()
chat_histories[final_channel] = [{"role": "assistant", "content": f"Welcome to the #{final_channel} channel!"}]
roundtable_states[final_channel] = {"participants": ["Gemini"], "messages": [], "currentSpeaker": None, "thinking": [], "showBubbles": [], "avatarImages": {}}
users_in_channel = active_users.get(final_channel)
final_username = username
i = 2
while final_username in users_in_channel:
final_username = f"{username}_{i}"
i += 1
users_in_channel.add(final_username)
state = roundtable_states[final_channel]
if "Gemini" not in state["participants"]:
state["participants"].insert(0, "Gemini")
if final_username not in state["participants"]:
state["participants"].append(final_username)
join_message_content = f"<em>{final_username} has joined the chat.</em>"
join_message = {"role": "system_join_leave", "content": join_message_content}
chat_histories[final_channel].append(join_message)
join_roundtable_msg = {"speaker": "System", "text": f"{final_username} has joined the chat."}
state["messages"].append(join_roundtable_msg)
updated_history = chat_histories.get(final_channel)
roundtable_json = json.dumps(state)
return final_username, final_channel, updated_history, roundtable_json
def exit_chat(channel: str, username: str) -> Tuple[bool, str]:
"""Handles logout logic. Returns completion status and updated roundtable state."""
with history_lock:
if channel in active_users and username in active_users[channel]:
active_users[channel].remove(username)
if channel in roundtable_states:
state = roundtable_states[channel]
if username in state.get("participants", []):
state["participants"].remove(username)
thinking_list = state.get("thinking", [])
if username in thinking_list:
thinking_list.remove(username)
if state.get("currentSpeaker") == username:
state["currentSpeaker"] = None
exit_message = {"role": "system_join_leave", "content": f"<em>{username} has left the chat.</em>"}
if channel in chat_histories:
chat_histories[channel].append(exit_message)
if channel in roundtable_states:
exit_roundtable_msg = {"speaker": "System", "text": f"{username} has left the chat."}
roundtable_states[channel]["messages"].append(exit_roundtable_msg)
roundtable_json = json.dumps(roundtable_states[channel])
else:
roundtable_json = "{}"
return True, roundtable_json
def send_message(channel: str, username: str, message: str):
"""
Processes the user message and, if necessary, the Gemini response synchronously.
Returns the final, complete state to the UI in a single update.
"""
if not message or not username:
with history_lock:
current_history = chat_histories.get(channel, [])
roundtable_json = json.dumps(roundtable_states.get(channel, {}))
chatbot_formatted = format_history_for_chatbot_display(current_history)
return current_history, roundtable_json, chatbot_formatted, roundtable_json, ""
moderation_result = moderate_with_llm(message)
if moderation_result:
with history_lock:
system_msg = {"role": "system_error", "content": moderation_result}
chat_histories[channel].append(system_msg)
final_history = chat_histories.get(channel, [])
final_roundtable_json = json.dumps(roundtable_states.get(channel, {}))
final_chatbot_formatted = format_history_for_chatbot_display(final_history)
return final_history, final_roundtable_json, final_chatbot_formatted, final_roundtable_json, ""
user_msg = {"role": "user", "username": username, "content": message}
with history_lock:
chat_histories[channel].append(user_msg)
state = roundtable_states[channel]
state["messages"].append({"speaker": username, "text": clean_html_for_llm(message)})
if username not in state["showBubbles"]:
state["showBubbles"].append(username)
if len(state["showBubbles"]) > 4:
state["showBubbles"] = state["showBubbles"][-4:]
state["currentSpeaker"] = username
history_for_llm = list(chat_histories[channel])
history_for_triage = [{"role": "system", "content": TRIAGE_PROMPT}] + consolidate_history_for_gemini(history_for_llm)
decision = call_gemini_api(history_for_triage, stream=False, temperature=0.0)
should_gemini_respond = decision and "[RESPOND]" in decision
if should_gemini_respond:
history_for_actor = [{"role": "system", "content": SYSTEM_PROMPT_ACTOR}] + consolidate_history_for_gemini(history_for_llm)
bot_response_text = call_gemini_api(history_for_actor, stream=False, temperature=0.7)
with history_lock:
state = roundtable_states[channel]
if bot_response_text and "Error:" not in bot_response_text and "[BLOCKED" not in bot_response_text:
cleaned_response = re.sub(r"^\s*gemini:\s*", "", bot_response_text, flags=re.IGNORECASE)
gemini_msg = {"role": "assistant", "username": "Gemini", "content": cleaned_response}
chat_histories[channel].append(gemini_msg)
state["messages"].append({"speaker": "Gemini", "text": clean_html_for_llm(cleaned_response)})
if "Gemini" not in state["showBubbles"]:
state["showBubbles"].append("Gemini")
if len(state["showBubbles"]) > 4:
state["showBubbles"] = state["showBubbles"][-4:]
state["currentSpeaker"] = None
if "thinking" in state and "Gemini" in state["thinking"]:
state["thinking"].remove("Gemini")
else:
state["currentSpeaker"] = None
else:
with history_lock:
state = roundtable_states[channel]
state["currentSpeaker"] = None
with history_lock:
final_history = chat_histories.get(channel, [])
final_roundtable_json = json.dumps(roundtable_states[channel])
final_chatbot_formatted = format_history_for_chatbot_display(final_history)
return final_history, final_roundtable_json, final_chatbot_formatted, final_roundtable_json, ""
def get_summary_or_opinion(channel: str, prompt_template: str) -> Tuple[List[Dict], str]:
"""Handles summary/opinion and updates BOTH chat histories."""
with history_lock:
history_copy = chat_histories.get(channel, []).copy()
history_for_llm = [{"role": "system", "content": prompt_template}] + consolidate_history_for_gemini(history_copy)
response_text = call_gemini_api(history_for_llm, stream=False)
is_summary = "summary" in prompt_template.lower()
role = "system_summary" if is_summary else "system_opinion"
content = response_text if response_text and "Error:" not in response_text else "Could not generate the response."
system_msg = {"role": role, "content": content}
with history_lock:
chat_histories[channel].append(system_msg)
state = roundtable_states[channel]
title = "Conversation Summary" if is_summary else "Gemini's Opinion"
roundtable_text = f"**{title}**:\n\n{clean_html_for_llm(content)}"
roundtable_msg = {"speaker": "Gemini", "text": roundtable_text}
state["messages"].append(roundtable_msg)
if "Gemini" not in state["showBubbles"]:
state["showBubbles"].append("Gemini")
if len(state["showBubbles"]) > 4:
state["showBubbles"] = state["showBubbles"][-4:]
state["currentSpeaker"] = None
if "thinking" in state and "Gemini" in state["thinking"]:
state["thinking"].remove("Gemini")
roundtable_json = json.dumps(state)
return chat_histories.get(channel, []), roundtable_json
def format_history_for_chatbot_display(history: List[Dict]) -> List[Dict]:
"""Applies HTML formatting for gr.Chatbot display using the 'messages' format."""
formatted_history = []
for msg in history:
new_msg = msg.copy()
role, content, username = (
new_msg.get("role"),
new_msg.get("content", ""),
new_msg.get("username"),
)
if (role == "assistant" or role.startswith("system_")) and role != "system_join_leave":
display_role = "assistant"
else:
display_role = "user"
display_content = ""
if role == "user" and username:
color = get_user_color(username)
display_content = f"<span style='color:{color}; font-weight: bold;'>{username}:</span> {content}"
elif role == "assistant" and username:
display_content = f"**{username}:** {content}"
elif role == "system_join_leave":
display_content = f"<div style='text-align: center; color: grey;'>{content}</div>"
elif role == "system_error":
display_content = f"<span style='color:red;'>**System:** {content}</span>"
elif role == "system_summary" or role == "system_opinion":
is_summary = role == "system_summary"
title = "Conversation Summary" if is_summary else "Gemini's Opinion"
response_content = content.replace("**", "")
if is_summary:
formatted_list = re.sub(r"-\s*", "<br>- ", response_content).strip()
if formatted_list.startswith("<br>- "):
formatted_list = formatted_list[5:]
response_content = "- " + formatted_list
display_content = (
f"<div style='background-color:#f8f9fa; border-left: 5px solid #ccc; padding: 10px; margin: 10px 0; border-radius: 5px;'>"
f"<b>{title}:</b><br>{response_content}</div>"
)
else:
display_content = content
if display_content:
formatted_history.append({"role": display_role, "content": display_content})
return formatted_history
def get_summary(channel: str) -> Tuple[List[Dict], str]:
"""
Returns the conversation summary data.
"""
unformatted_history, roundtable_json = get_summary_or_opinion(channel, SUMMARY_PROMPT)
api_data = {"history": unformatted_history, "roundtable": roundtable_json}
return unformatted_history, roundtable_json, api_data
def get_opinion(channel: str) -> Tuple[List[Dict], str]:
"""
Returns the opinion data generated by LLM.
"""
unformatted_history, roundtable_json = get_summary_or_opinion(channel, OPINION_PROMPT)
api_data = {"history": unformatted_history, "roundtable": roundtable_json}
return unformatted_history, roundtable_json, api_data
def format_all_views_from_state(unformatted_history, roundtable_json):
"""
UI-only function to formats histories for visual components.
"""
chatbot_formatted = format_history_for_chatbot_display(unformatted_history)
return chatbot_formatted, roundtable_json
def get_live_updates(channel: str):
"""
Fetches and formats the latest data from the backend for both views.
"""
if not channel:
return gr.skip(), gr.skip()
with history_lock:
backend_history = chat_histories.get(channel, [])
roundtable_data = roundtable_states.get(channel, {})
roundtable_data_with_timestamp = {
**roundtable_data,
"update_timestamp": str(time.time())
}
roundtable_json = json.dumps(roundtable_data_with_timestamp)
chatbot_formatted = format_history_for_chatbot_display(backend_history)
return chatbot_formatted, roundtable_json
def update_ui_after_login(final_username: str, final_channel: str, unformatted_history: List[Dict], roundtable_json: str):
"""UI-only function to switch views and update components after login."""
return (
gr.update(visible=False),
gr.update(visible=True),
final_username,
final_channel,
format_history_for_chatbot_display(unformatted_history),
roundtable_json,
)
def update_ui_after_logout():
"""UI-only function to switch views after logout."""
return gr.update(visible=True), gr.update(visible=False), [], "{}"
def toggle_chat_view(view_choice: str, unformatted_history: List[Dict], roundtable_json: str):
"""Hides/shows the correct chat component and populates it with data."""
if view_choice == "Roundtable":
return gr.update(visible=True, value=roundtable_json), gr.update(visible=False)
else:
return gr.update(visible=False), gr.update(visible=True, value=format_history_for_chatbot_display(unformatted_history))
with gr.Blocks(theme=gr.themes.Ocean(), title="Multi-Agent Chat") as demo:
# --- State Management ---
unformatted_history_state = gr.State([])
roundtable_state_json = gr.State("{}")
dumb_state = gr.State(None)
mcp_api_return = gr.JSON(visible=False)
with gr.Column(visible=True) as login_view:
gr.Markdown("# πŸš€ Welcome to Multi-Agent Chat")
username_input_login = gr.Textbox(label="Your Name", placeholder="e.g., Lucy")
channel_choice_dropdown = gr.Dropdown(choices=AVAILABLE_CHANNELS_LIST, label="Choose a Channel", value="general")
login_button = gr.Button("Enter Chat", variant="primary")
with gr.Column(visible=False) as chat_view:
gr.Markdown("# πŸš€ Multi-Agent Chat")
gr.Markdown("""
### πŸ’¬ Interacting with the Gemini Agent
Our AI agent, Gemini, plays two key roles in the chat: a **helpful participant** and a **silent moderator**.
- **To ask a question or get a response:** Simply mention **"Gemini"** in your message. The agent is smart enough to understand context and even some typos!
> **Example:** "That's a great point, Alice. What do you think, **Gemini**?" πŸ€”
- **As a Moderator πŸ›‘οΈ:** Gemini is always monitoring the conversation in the background to ensure a safe and respectful environment. It will automatically detect and block messages containing hate speech, harassment, or other policy violations.
- **For general chat:** Just talk normally with other users. Gemini will remain silent unless its participation is directly requested or highly valuable.
""")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("## βš™οΈ Session Data")
username_display = gr.Textbox(label="Logged in as", interactive=False)
channel_display = gr.Textbox(label="Current Channel", interactive=False)
gr.Markdown("## πŸ€– MCP Tools")
summary_button = gr.Button("πŸ“„ Generate Chat Summary")
opinion_button = gr.Button("πŸ€” Ask for LLM's Opinion")
exit_button = gr.Button("πŸšͺ Exit Chat")
with gr.Column(scale=3):
view_switch = gr.Radio(["Roundtable", "Chat"], label="Chat View", value="Roundtable")
roundtable_display = consilium_roundtable(
label="🎭 Live Discussion Roundtable",
visible=True,
)
chatbot_display = gr.Chatbot(
label="Conversation",
height=600,
visible=False,
bubble_full_width=False,
group_consecutive_messages=False,
type="messages"
)
with gr.Row():
msg_input = gr.Textbox(show_label=False, placeholder="Type your message...", scale=5)
send_button = gr.Button("Send", variant="primary", scale=1)
chat_timer = gr.Timer(5)
chat_timer.tick(
fn=get_live_updates,
inputs=[channel_display],
outputs=[chatbot_display, roundtable_display],
show_progress=False
)
event_toogle_chat_view = {"fn":toggle_chat_view,"inputs":[view_switch, unformatted_history_state, roundtable_state_json],"outputs":[roundtable_display, chatbot_display]}
login_button.click(
fn=login_user,
inputs=[channel_choice_dropdown, username_input_login],
outputs=[username_display, channel_display, unformatted_history_state, roundtable_state_json],
).then(
fn=update_ui_after_login,
inputs=[username_display, channel_display, unformatted_history_state, roundtable_state_json],
outputs=[login_view, chat_view, username_display, channel_display, chatbot_display, roundtable_display],
).then(**event_toogle_chat_view)
exit_button.click(
fn=exit_chat,
inputs=[channel_display, username_display],
outputs=[dumb_state, roundtable_state_json],
).then(
fn=update_ui_after_logout,
inputs=None,
outputs=[login_view, chat_view, unformatted_history_state, roundtable_state_json]
)
view_switch.change(**event_toogle_chat_view)
send_button.click(
fn=send_message,
inputs=[channel_display, username_display, msg_input],
outputs=[unformatted_history_state, roundtable_state_json, chatbot_display, roundtable_display, msg_input],
show_progress=False,
api_name="send_message"
)
msg_input.submit(
fn=send_message,
inputs=[channel_display, username_display, msg_input],
outputs=[unformatted_history_state, roundtable_state_json, chatbot_display, roundtable_display, msg_input],
show_progress=False,
api_name="send_message_submit"
)
def run_tool_and_update(tool_function, channel):
history, roundtable_json = tool_function(channel)
chatbot_formatted = format_history_for_chatbot_display(history)
return history, roundtable_json, chatbot_formatted, roundtable_json
summary_button.click(
fn=get_summary,
inputs=[channel_display],
outputs=[unformatted_history_state, roundtable_state_json, mcp_api_return],
show_progress=False,
api_name="get_summary"
).then(
fn=format_all_views_from_state,
inputs=[unformatted_history_state, roundtable_state_json],
show_progress=False,
outputs=[chatbot_display, roundtable_display]
)
opinion_button.click(
fn=get_opinion,
inputs=[channel_display],
outputs=[unformatted_history_state, roundtable_state_json, mcp_api_return],
show_progress=False,
api_name="get_opinion"
).then(
fn=format_all_views_from_state,
inputs=[unformatted_history_state, roundtable_state_json],
show_progress=False,
outputs=[chatbot_display, roundtable_display]
)
if __name__ == "__main__":
demo.launch(mcp_server=True)