elismasilva commited on
Commit
ad9da03
·
0 Parent(s):

initial commit

Browse files
Files changed (7) hide show
  1. .gitattributes +35 -0
  2. .gitignore +11 -0
  3. Makefile +3 -0
  4. README.md +19 -0
  5. app.py +425 -0
  6. gemini_model_api.py +159 -0
  7. requirements.txt +1 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.py[cod]
3
+ /.vs
4
+ .vscode/
5
+ .idea/
6
+ venv/
7
+ .venv/
8
+ *.log
9
+ .DS_Store
10
+ .gradio
11
+ Makeile
Makefile ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ format:
2
+ isort --profile black -l 100 ./
3
+ black -l 100 ./
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Multi Agent Chat
3
+ emoji: 💬
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 5.33.0
8
+ app_file: app.py
9
+ pinned: true
10
+ license: apache-2.0
11
+ tags:
12
+ - Agents-MCP-Hackathon
13
+ - mcp-server-track
14
+ - agent-demo-track
15
+
16
+ short_description: A multi-agent chat application and Gradio MCP Server
17
+ ---
18
+
19
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
app.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import re
3
+ from threading import Lock
4
+ from typing import Dict, List, Optional, Tuple, Union
5
+ import gradio as gr
6
+ from gemini_model_api import call_gemini_api
7
+
8
+ MODERATION_PROMPT = """
9
+ You are a content safety AI. Your only job is to analyze the user's message and determine if it violates content policies.
10
+ Check for hate speech, harassment, bullying, self-harm encouragement, and explicit content.
11
+ Your output MUST be a single word: either `[OK]` or `[VIOLATION]`.
12
+ """
13
+ TRIAGE_PROMPT = """
14
+ You are a fast, logical decision-making AI. Your only job is to analyze a conversation history and decide if the AI participant named 'Gemini' should speak.
15
+ CRITERIA FOR RESPONDING (You should respond if ANY of these are true):
16
+ - **Direct Mention:** Gemini is addressed directly by name, even with typos (e.g., "Gemini", "Gmni").
17
+ - **Implicit Reference:** Gemini is clearly referred to implicitly as part of a group (e.g., "what about you guys?").
18
+ - **Question to Group:** A user asks a direct question to the group that is not directed at a specific person.
19
+ - **Reply to Your Question:** A user's message is a direct and logical answer to a question YOU (Gemini) asked in the previous turn.
20
+ - **Request for Help:** A user expresses a clear need for help or information.
21
+ CRITERIA FOR IGNORING:
22
+ - The conversation is a simple social exchange between other users.
23
+ - A question is clearly directed from one specific user to another.
24
+ Your output MUST be a single word: either `[RESPOND]` or `[IGNORE]`.
25
+ """
26
+ SYSTEM_PROMPT_ACTOR = """
27
+ You are a helpful and friendly AI assistant named Gemini, participating in a group chat. You will act as a human-like participant.
28
+ **CONTEXTUAL AWARENESS (This is how you understand the conversation):**
29
+ - When you see the name "Gemini" in the text, it is referring to **YOU**.
30
+ - Your task is to formulate a response based on the last few messages, where you were mentioned.
31
+ **RESPONSE RULES (This is how you MUST formulate your answer):**
32
+ 1. **Grounding:** You are a language model. You do not have a physical body, personal experiences, or feelings. **Do not invent stories about yourself** (like falling down stairs or having stomach aches). If asked about a personal experience, politely clarify that as an AI, you don't have them, but you can help with information.
33
+ 2. **No Prefix:** **ABSOLUTELY DO NOT** start your response with your name (e.g., "Gemini:"). This is a strict rule.
34
+ 3. **No Meta-Commentary:** Do not make comments about your own thought process.
35
+ 4. **Language:** Respond in the same language as the conversation.
36
+ """
37
+ SUMMARY_PROMPT = """
38
+ You are a factual reporting tool. Your only task is to read the following chat history and summarize **who said what**.
39
+ ABSOLUTE RULES:
40
+ 1. Your response **MUST** be in the primary language used in the conversation.
41
+ 2. **DO NOT** provide any opinion, analysis, or interpretation.
42
+ 3. Your output **MUST** be a list of key points, attributing each point to the user who made it.
43
+ Example output format:
44
+ - **Alice** asked for a way to cook eggs without the oil splashing.
45
+ - **Gemini** explained that this happens due to water in the pan and suggested drying it first.
46
+ - **Eliseu** understood the advice and said he would try it.
47
+ Now, generate a factual summary for the following conversation:
48
+ """
49
+ OPINION_PROMPT = """
50
+ You are a social and emotional intelligence analyst. Your only task is to read the following chat history and provide your opinion on the **dynamics and mood** of the conversation.
51
+ ABSOLUTE RULES:
52
+ 1. Your response **MUST** be in the primary language used in the conversation.
53
+ 2. **DO NOT** summarize who said what. Focus only on the underlying feeling and interaction style.
54
+ 3. **DO NOT** be academic or technical. Speak like an insightful person.
55
+ 4. Your output **MUST** be a short, reflective paragraph.
56
+ Focus on answering questions like:
57
+ - What was the overall tone? (e.g., helpful, tense, humorous)
58
+ - How were the participants interacting? (e.g., collaboratively, arguing, supporting each other)
59
+ - What is your general emotional takeaway from the exchange?
60
+ Now, provide your opinion on the following conversation:
61
+ """
62
+
63
+ # --- State and Helper functions ---
64
+ history_lock = Lock()
65
+ AVAILABLE_CHANNELS_LIST = ["general", "dev", "agents", "mcp"]
66
+ chat_histories = {
67
+ channel: [{"role": "assistant", "content": f"Welcome to the #{channel} channel!"}]
68
+ for channel in AVAILABLE_CHANNELS_LIST
69
+ }
70
+ active_users = {channel: set() for channel in AVAILABLE_CHANNELS_LIST}
71
+ USER_COLORS = [
72
+ "#FF6347",
73
+ "#4682B4",
74
+ "#32CD32",
75
+ "#FFD700",
76
+ "#6A5ACD",
77
+ "#FF69B4",
78
+ "chocolate",
79
+ "indigo",
80
+ ]
81
+
82
+
83
+ def get_user_color(username: str) -> str:
84
+ base_username = re.sub(r"_\d+$", "", username)
85
+ hash_object = hashlib.sha256(base_username.encode())
86
+ hash_digest = hash_object.hexdigest()
87
+ hash_int = int(hash_digest, 16)
88
+ color_index = hash_int % len(USER_COLORS)
89
+ return USER_COLORS[color_index]
90
+
91
+
92
+ def clean_html_for_llm(text: str) -> str:
93
+ clean_text = re.sub("<[^<]+?>", "", text)
94
+ clean_text = re.sub(r"^\s*\*\*[a-zA-Z0-9_]+:\*\*\s*", "", clean_text)
95
+ clean_text = clean_text.replace("**", "")
96
+ return clean_text.strip()
97
+
98
+
99
+ def consolidate_history_for_gemini(history: List[Dict]) -> List[Dict]:
100
+ if not history:
101
+ return []
102
+ prepared_history = []
103
+ for msg in history:
104
+ if msg.get("role") not in ["user", "assistant"]:
105
+ continue
106
+ role = "model" if msg.get("role") == "assistant" else "user"
107
+ content = (
108
+ f"{msg.get('username', '')}: {msg.get('content', '')}"
109
+ if msg.get("username")
110
+ else msg.get("content", "")
111
+ )
112
+ prepared_history.append(
113
+ {"role": role, "username": msg.get("username"), "content": clean_html_for_llm(content)}
114
+ )
115
+ if not prepared_history:
116
+ return []
117
+ consolidated = []
118
+ current_block = prepared_history[0]
119
+ for msg in prepared_history[1:]:
120
+ if (
121
+ msg["role"] == "user"
122
+ and current_block["role"] == "user"
123
+ and msg.get("username") == current_block.get("username")
124
+ ):
125
+ current_block["content"] += "\n" + msg["content"]
126
+ else:
127
+ consolidated.append(current_block)
128
+ current_block = msg
129
+ consolidated.append(current_block)
130
+ for block in consolidated:
131
+ block.pop("username", None)
132
+ return consolidated
133
+
134
+
135
+ def moderate_with_llm(message_text: str) -> Optional[str]:
136
+ moderation_payload = [
137
+ {"role": "system", "content": MODERATION_PROMPT},
138
+ {"role": "user", "content": message_text},
139
+ ]
140
+ decision = call_gemini_api(moderation_payload, stream=False, temperature=0.0)
141
+ if decision and "[VIOLATION]" in decision:
142
+ return "Message blocked by content safety policy."
143
+ return None
144
+
145
+
146
+ def login_user(channel: str, username: str) -> Tuple[str, str, List[Dict]]:
147
+ """Handles login logic. Returns final username, channel, and the unformatted history."""
148
+
149
+ if not username:
150
+ username = "User"
151
+ final_channel = channel if channel else "general"
152
+ with history_lock:
153
+ if final_channel not in active_users:
154
+ active_users[final_channel] = set()
155
+ users_in_channel = active_users.get(final_channel)
156
+ final_username = username
157
+ i = 2
158
+ while final_username in users_in_channel:
159
+ final_username = f"{username}_{i}"
160
+ i += 1
161
+ users_in_channel.add(final_username)
162
+ join_message = {
163
+ "role": "system_join_leave",
164
+ "content": f"<em>{final_username} has joined the chat.</em>",
165
+ }
166
+ chat_histories.setdefault(final_channel, []).append(join_message)
167
+ updated_history = chat_histories.get(final_channel)
168
+ return final_username, final_channel, updated_history
169
+
170
+
171
+ def exit_chat(channel: str, username: str) -> bool:
172
+ """Handles logout logic. Returns True on completion."""
173
+ with history_lock:
174
+ if channel in active_users and username in active_users[channel]:
175
+ active_users[channel].remove(username)
176
+ exit_message = {
177
+ "role": "system_join_leave",
178
+ "content": f"<em>{username} has left the chat.</em>",
179
+ }
180
+ if channel in chat_histories:
181
+ chat_histories[channel].append(exit_message)
182
+ return True
183
+
184
+
185
+ def send_message(channel: str, username: str, message: str) -> List[Dict]:
186
+ """Handles new messages. Returns the full, unformatted history."""
187
+
188
+ if not message or not username:
189
+ with history_lock:
190
+ return chat_histories.get(channel, [])
191
+ moderation_result = moderate_with_llm(message)
192
+ if moderation_result:
193
+ with history_lock:
194
+ chat_histories[channel].append({"role": "system_error", "content": moderation_result})
195
+ return chat_histories.get(channel, [])
196
+ with history_lock:
197
+ chat_histories[channel].append({"role": "user", "username": username, "content": message})
198
+ history_for_llm = list(chat_histories[channel])
199
+ history_for_triage = [
200
+ {"role": "system", "content": TRIAGE_PROMPT}
201
+ ] + consolidate_history_for_gemini(history_for_llm)
202
+ decision = call_gemini_api(history_for_triage, stream=False, temperature=0.0)
203
+ if decision and "[RESPOND]" in decision:
204
+ history_for_actor = [
205
+ {"role": "system", "content": SYSTEM_PROMPT_ACTOR}
206
+ ] + consolidate_history_for_gemini(history_for_llm)
207
+ bot_response_text = call_gemini_api(history_for_actor, stream=False, temperature=0.7)
208
+ if (
209
+ bot_response_text
210
+ and "Error:" not in bot_response_text
211
+ and "[BLOCKED" not in bot_response_text
212
+ ):
213
+ cleaned_response = re.sub(r"^\s*gemini:\s*", "", bot_response_text, flags=re.IGNORECASE)
214
+ with history_lock:
215
+ chat_histories[channel].append(
216
+ {"role": "assistant", "username": "Gemini", "content": cleaned_response}
217
+ )
218
+ with history_lock:
219
+ return chat_histories.get(channel, [])
220
+
221
+
222
+ def get_summary_or_opinion(channel: str, prompt_template: str) -> List[Dict]:
223
+ """Handles summary and opnion chat tool. Returns the full, unformatted history."""
224
+ with history_lock:
225
+ history_copy = chat_histories.get(channel, []).copy()
226
+ history_for_llm = [
227
+ {"role": "system", "content": prompt_template}
228
+ ] + consolidate_history_for_gemini(history_copy)
229
+ response_text = call_gemini_api(history_for_llm, stream=False)
230
+ is_summary = "summary" in prompt_template.lower()
231
+ role = "system_summary" if is_summary else "system_opinion"
232
+ content = (
233
+ response_text
234
+ if response_text and "Error:" not in response_text
235
+ else "Could not generate the response."
236
+ )
237
+ with history_lock:
238
+ chat_histories[channel].append({"role": role, "content": content})
239
+ return chat_histories.get(channel, [])
240
+
241
+
242
+ def format_history_for_display(history: List[Dict]) -> List[Dict]:
243
+ """Applies HTML formatting to a clean history list for display."""
244
+ formatted_history = []
245
+ for msg in history:
246
+ new_msg = msg.copy()
247
+ role, content, username = (
248
+ new_msg.get("role"),
249
+ new_msg.get("content", ""),
250
+ new_msg.get("username"),
251
+ )
252
+ if role == "user" and username:
253
+ color = get_user_color(username)
254
+ new_msg["content"] = (
255
+ f"<span style='color:{color}; font-weight: bold;'>{username}:</span> {content}"
256
+ )
257
+ elif role == "assistant" and username:
258
+ new_msg["content"] = f"**{username}:** {content}"
259
+ elif role == "system_join_leave":
260
+ new_msg["content"] = f"<div style='text-align: center; color: grey;'>{content}</div>"
261
+ new_msg["role"] = "user"
262
+ elif role == "system_error":
263
+ new_msg["content"] = f"<span style='color:red;'>**System:** {content}</span>"
264
+ new_msg["role"] = "user"
265
+ elif role == "system_summary" or role == "system_opinion":
266
+ is_summary = role == "system_summary"
267
+ title = "Conversation Summary" if is_summary else "Gemini's Opinion"
268
+ color = "#6c757d" if is_summary else "#007bff"
269
+ response_content = content.replace("**", "")
270
+ if is_summary:
271
+ formatted_list = re.sub(r"-\s*", "<br>- ", response_content).strip()
272
+ if formatted_list.startswith("<br>- "):
273
+ formatted_list = formatted_list[5:]
274
+ response_content = "- " + formatted_list
275
+ new_msg["content"] = (
276
+ f"<div style='background-color:#f8f9fa;...'><b>{title}:</b><br>{response_content}</div>"
277
+ )
278
+ new_msg["role"] = "user"
279
+ formatted_history.append(new_msg)
280
+ return formatted_history
281
+
282
+
283
+ def get_and_format_history(
284
+ channel: str, current_ui_history: List[Dict]
285
+ ) -> Union[List[Dict], gr.skip]:
286
+ """UI helper: Intelligently gets and formats history."""
287
+ with history_lock:
288
+ backend_history = chat_histories.get(channel, [])
289
+ if len(backend_history) == len(current_ui_history):
290
+ return gr.skip()
291
+ else:
292
+ return format_history_for_display(backend_history)
293
+
294
+
295
+ def update_ui_after_login(
296
+ final_username: str, final_channel: str, unformatted_history: List[Dict]
297
+ ) -> Tuple:
298
+ """UI-only function to switch views and update components after login."""
299
+ return (
300
+ gr.update(visible=False),
301
+ gr.update(visible=True),
302
+ final_username,
303
+ final_channel,
304
+ format_history_for_display(unformatted_history),
305
+ )
306
+
307
+
308
+ def update_ui_after_logout() -> Tuple:
309
+ """UI-only function to switch views after logout."""
310
+ return gr.update(visible=True), gr.update(visible=False)
311
+
312
+
313
+ def get_summary(channel):
314
+ return get_summary_or_opinion(channel, SUMMARY_PROMPT)
315
+
316
+
317
+ def get_opinion(channel):
318
+ return get_summary_or_opinion(channel, OPINION_PROMPT)
319
+
320
+
321
+ def clear_textbox():
322
+ return ""
323
+
324
+
325
+ with gr.Blocks(theme=gr.themes.Ocean(), title="Multi-Agent Chat") as demo:
326
+
327
+ with gr.Column(visible=True) as login_view:
328
+ gr.Markdown("# 🚀 Welcome to Multi-Agent Chat")
329
+ username_input_login = gr.Textbox(label="Your Name", placeholder="e.g., Lucy")
330
+ channel_choice_dropdown = gr.Dropdown(
331
+ choices=AVAILABLE_CHANNELS_LIST, label="Choose a Channel", value="general"
332
+ )
333
+ login_button = gr.Button("Enter Chat", variant="primary")
334
+
335
+ with gr.Column(visible=False) as chat_view:
336
+ gr.Markdown("# 🚀 Welcome to Multi-Agent Chat")
337
+ gr.Markdown("""### 💬 Interacting with the Gemini Agent
338
+ The AI agent, Gemini, is always listening to the conversation but is designed to be reserved. To get its attention, you need to address it directly.
339
+ - **To ask a question or get a response:** Simply mention **"Gemini"** in your message. The agent is smart enough to understand context and even some typos!
340
+ > **Example:** "That's a great point, Lucy. What do you think, **Gemini**?" 🤔
341
+ - **For general chat:** Just talk normally with other users. Gemini will remain silent unless it feels its participation is highly valuable.
342
+ """)
343
+ with gr.Row():
344
+ with gr.Column(scale=1):
345
+ gr.Markdown("## ⚙️ Session Data")
346
+ username_display = gr.Textbox(label="Logged in as", interactive=False)
347
+ channel_display = gr.Textbox(label="Current Channel", interactive=False)
348
+ gr.Markdown("## 🤖 MCP Tools")
349
+ summary_button = gr.Button("📄 Generate Chat Summary")
350
+ opinion_button = gr.Button("🤔 Ask for LLM's Opinion")
351
+ exit_button = gr.Button("🚪 Exit Chat")
352
+ with gr.Column(scale=3):
353
+ chatbot = gr.Chatbot(
354
+ label="Conversation",
355
+ height=600,
356
+ type="messages",
357
+ group_consecutive_messages=False,
358
+ )
359
+ with gr.Row():
360
+ msg_input = gr.Textbox(
361
+ show_label=False, placeholder="Type your message...", scale=5
362
+ )
363
+ send_button = gr.Button("Send", variant="primary", scale=1)
364
+
365
+ chat_timer = gr.Timer(5)
366
+ chat_timer.tick(fn=get_and_format_history, inputs=[channel_display, chatbot], outputs=chatbot)
367
+ unformatted_history_state = gr.State()
368
+ dumb_state = gr.State(value=None)
369
+
370
+ login_event = login_button.click(
371
+ fn=login_user,
372
+ inputs=[channel_choice_dropdown, username_input_login],
373
+ outputs=[
374
+ username_display,
375
+ channel_display,
376
+ unformatted_history_state,
377
+ ],
378
+ api_name="login_user",
379
+ )
380
+ login_event.then(
381
+ fn=update_ui_after_login,
382
+ inputs=[username_display, channel_display, unformatted_history_state],
383
+ outputs=[login_view, chat_view, username_display, channel_display, chatbot],
384
+ )
385
+
386
+ exit_event = exit_button.click(
387
+ fn=exit_chat,
388
+ inputs=[channel_display, username_display],
389
+ outputs=dumb_state,
390
+ api_name="exit_chat",
391
+ )
392
+ exit_event.then(fn=update_ui_after_logout, inputs=None, outputs=[login_view, chat_view])
393
+
394
+ summary_event = summary_button.click(
395
+ fn=get_summary, inputs=[channel_display], outputs=dumb_state, api_name="get_summary"
396
+ ).then(fn=format_history_for_display, inputs=chatbot, outputs=chatbot)
397
+
398
+ opinion_event = opinion_button.click(
399
+ fn=get_opinion, inputs=[channel_display], outputs=dumb_state, api_name="get_opinion"
400
+ ).then(fn=format_history_for_display, inputs=chatbot, outputs=chatbot)
401
+
402
+ send_event = (
403
+ send_button.click(
404
+ fn=send_message,
405
+ inputs=[channel_display, username_display, msg_input],
406
+ outputs=dumb_state,
407
+ api_name="send_message",
408
+ )
409
+ .then(fn=format_history_for_display, inputs=chatbot, outputs=chatbot)
410
+ .then(fn=clear_textbox, inputs=None, outputs=msg_input)
411
+ )
412
+
413
+ submit_event = (
414
+ msg_input.submit(
415
+ fn=send_message,
416
+ inputs=[channel_display, username_display, msg_input],
417
+ outputs=dumb_state,
418
+ api_name="send_message",
419
+ )
420
+ .then(fn=format_history_for_display, inputs=chatbot, outputs=chatbot)
421
+ .then(fn=clear_textbox, inputs=None, outputs=msg_input)
422
+ )
423
+
424
+ if __name__ == "__main__":
425
+ demo.launch(mcp_server=True)
gemini_model_api.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ from typing import Dict, Iterator, List, Optional, Union
4
+
5
+ import requests
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables from .env file
9
+ load_dotenv()
10
+
11
+ # Setup logging
12
+ logging.basicConfig(
13
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
14
+ )
15
+ logger = logging.getLogger(__name__)
16
+
17
+ # Gemini API Configuration
18
+ API_KEY_ENV_VAR = "GOOGLE_API_KEY"
19
+ BASE_URL = "https://generativelanguage.googleapis.com/v1beta/models/"
20
+ DEFAULT_MODEL_ID = "gemini-2.0-flash"
21
+
22
+
23
+ def _get_api_key() -> Optional[str]:
24
+ """
25
+ Retrieves the Google API key from environment variables.
26
+
27
+ Returns:
28
+ Optional[str]: The API key if found, otherwise None.
29
+ """
30
+ api_key = os.getenv(API_KEY_ENV_VAR)
31
+ if not api_key:
32
+ logger.error(f"API key not found. Set the variable '{API_KEY_ENV_VAR}'.")
33
+ return api_key
34
+
35
+
36
+ def _format_payload_for_gemini(
37
+ messages: List[Dict], temperature: float, max_tokens: int
38
+ ) -> Optional[Dict]:
39
+ """
40
+ Formats the message history and configuration into a valid payload for the Gemini REST API.
41
+
42
+ This function performs two critical tasks:
43
+ 1. Separates the 'system' instruction from the main conversation history.
44
+ 2. Consolidates consecutive 'user' messages into a single block to comply with
45
+ the Gemini API's requirement of alternating 'user' and 'model' roles.
46
+
47
+ Args:
48
+ messages (List[Dict]): A list of message dictionaries, potentially including a 'system' role.
49
+ temperature (float): The generation temperature.
50
+ max_tokens (int): The maximum number of tokens to generate.
51
+
52
+ Returns:
53
+ Optional[Dict]: A fully formed payload dictionary ready for the API, or None if the
54
+ conversation history is empty.
55
+ """
56
+ system_instruction = None
57
+ conversation_history = []
58
+
59
+ for msg in messages:
60
+ if msg.get("role") == "system":
61
+ system_instruction = {"parts": [{"text": msg.get("content", "")}]}
62
+ else:
63
+ conversation_history.append(msg)
64
+
65
+ if not conversation_history:
66
+ return None
67
+
68
+ consolidated_contents = []
69
+ current_block = None
70
+ for msg in conversation_history:
71
+ role = "model" if msg.get("role") == "assistant" else "user"
72
+ content = msg.get("content", "")
73
+
74
+ if current_block and current_block["role"] == "user" and role == "user":
75
+ current_block["parts"][0]["text"] += "\n" + content
76
+ else:
77
+ if current_block:
78
+ consolidated_contents.append(current_block)
79
+ current_block = {"role": role, "parts": [{"text": content}]}
80
+
81
+ if current_block:
82
+ consolidated_contents.append(current_block)
83
+
84
+ payload = {
85
+ "contents": consolidated_contents,
86
+ "safetySettings": [
87
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
88
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
89
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
90
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
91
+ ],
92
+ "generationConfig": {"temperature": temperature, "maxOutputTokens": max_tokens},
93
+ }
94
+ if system_instruction:
95
+ payload["system_instruction"] = system_instruction
96
+
97
+ return payload
98
+
99
+
100
+ def call_gemini_api(
101
+ messages: List[Dict], stream: bool = False, temperature: float = 0.7, max_tokens: int = 2048
102
+ ) -> Union[Iterator[str], str]:
103
+ """
104
+ Calls the Google Gemini REST API with the provided messages and parameters.
105
+
106
+ This is the main public function of the module. It handles API key retrieval,
107
+ payload formatting, making the HTTP request, and processing the response.
108
+
109
+ Args:
110
+ messages (List[Dict]): The list of messages forming the conversation context.
111
+ stream (bool): If True, streams the response. (Currently not implemented).
112
+ temperature (float): The generation temperature (creativity).
113
+ max_tokens (int): The maximum number of tokens for the response.
114
+
115
+ Returns:
116
+ Union[Iterator[str], str]: An iterator of response chunks if streaming, or a single
117
+ response string otherwise. Returns an error string on failure.
118
+ """
119
+ api_key = _get_api_key()
120
+ if not api_key:
121
+ error_msg = "Error: Google API key not configured."
122
+ return iter([error_msg]) if stream else error_msg
123
+
124
+ payload = _format_payload_for_gemini(messages, temperature, max_tokens)
125
+ if not payload or not payload.get("contents"):
126
+ error_msg = "Error: Conversation is empty or malformed after processing."
127
+ return iter([error_msg]) if stream else error_msg
128
+
129
+ stream_param = "streamGenerateContent" if stream else "generateContent"
130
+ request_url = f"{BASE_URL}{DEFAULT_MODEL_ID}:{stream_param}?key={api_key}"
131
+ headers = {"Content-Type": "application/json"}
132
+
133
+ try:
134
+ response = requests.post(
135
+ request_url, headers=headers, json=payload, stream=stream, timeout=180
136
+ )
137
+ response.raise_for_status()
138
+
139
+ if stream:
140
+ # TODO: Implement robust stream processing logic here.
141
+ pass
142
+ else:
143
+ data = response.json()
144
+ # Safely access nested keys to prevent KeyErrors
145
+ if data.get("candidates") and data["candidates"][0].get("content", {}).get("parts"):
146
+ return data["candidates"][0]["content"]["parts"][0]["text"]
147
+ else:
148
+ logger.warning(
149
+ f"Gemini's response does not contain 'candidates'. Full response: {data}"
150
+ )
151
+ return "[BLOCKED OR EMPTY RESPONSE]"
152
+
153
+ except requests.exceptions.HTTPError as e:
154
+ err_msg = f"API HTTP Error ({e.response.status_code}): {e.response.text[:500]}"
155
+ logger.error(err_msg, exc_info=False)
156
+ return f"Error: {err_msg}"
157
+ except Exception as e:
158
+ logger.exception("Unexpected error while calling Gemini API:")
159
+ return f"Error: {e}"
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ huggingface_hub==0.25.2