Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -371,7 +371,6 @@ class VisualConsensusEngine:
|
|
371 |
mistral_key = session_keys.get("mistral") or MISTRAL_API_KEY
|
372 |
sambanova_key = session_keys.get("sambanova") or SAMBANOVA_API_KEY
|
373 |
|
374 |
-
# Research Agent stays visible but is no longer an active participant
|
375 |
self.models = {
|
376 |
'mistral': {
|
377 |
'name': 'Mistral Large',
|
@@ -472,27 +471,6 @@ class VisualConsensusEngine:
|
|
472 |
"""Update the visual roundtable state for this session"""
|
473 |
if self.update_callback:
|
474 |
self.update_callback(state_update)
|
475 |
-
|
476 |
-
def log_research_activity(self, speaker: str, function: str, query: str, result: str, log_function=None):
|
477 |
-
"""Log research activity to the discussion log"""
|
478 |
-
if log_function:
|
479 |
-
# Log the research request
|
480 |
-
log_function('research_request',
|
481 |
-
speaker="Research Agent",
|
482 |
-
content=f"Research requested by {speaker}: {function.replace('_', ' ').title()} - '{query}'",
|
483 |
-
function=function,
|
484 |
-
query=query,
|
485 |
-
requesting_expert=speaker)
|
486 |
-
|
487 |
-
# Log the research result (truncated for readability)
|
488 |
-
result_preview = result[:300] + "..." if len(result) > 300 else result
|
489 |
-
log_function('research_result',
|
490 |
-
speaker="Research Agent",
|
491 |
-
content=f"Research completed: {function.replace('_', ' ').title()}\n\n{result_preview}",
|
492 |
-
function=function,
|
493 |
-
query=query,
|
494 |
-
full_result=result,
|
495 |
-
requesting_expert=speaker)
|
496 |
|
497 |
def handle_function_calls(self, completion, original_prompt: str, calling_model: str) -> str:
|
498 |
"""UNIFIED function call handler with enhanced research capabilities"""
|
@@ -581,11 +559,6 @@ class VisualConsensusEngine:
|
|
581 |
'timestamp': datetime.now().strftime('%H:%M:%S'),
|
582 |
**kwargs
|
583 |
})
|
584 |
-
|
585 |
-
# Get query parameter for logging
|
586 |
-
query_param = arguments.get("query") or arguments.get("topic") or arguments.get("technology") or arguments.get("company")
|
587 |
-
if query_param and result:
|
588 |
-
self.log_research_activity(calling_model_name, function_name, query_param, result, session_log_function)
|
589 |
|
590 |
# Add function result to conversation
|
591 |
messages.append({
|
@@ -925,36 +898,44 @@ class VisualConsensusEngine:
|
|
925 |
return time_estimates.get(function_name, "1-3 minutes")
|
926 |
|
927 |
def show_research_error(self, function: str, query: str, error: str, requesting_model_name: str = None):
|
928 |
-
"""Show research error"""
|
929 |
-
|
930 |
-
|
931 |
-
|
932 |
-
|
933 |
-
|
934 |
-
|
935 |
-
# Ensure both Research Agent AND the calling model stay visible
|
936 |
-
if "Research Agent" not in existing_bubbles:
|
937 |
-
existing_bubbles.append("Research Agent")
|
938 |
-
# Keep the current speaker (the one who requested research) visible
|
939 |
-
current_speaker = current_state.get("currentSpeaker")
|
940 |
-
if current_speaker and current_speaker not in existing_bubbles and current_speaker != "Research Agent":
|
941 |
-
existing_bubbles.append(current_speaker)
|
942 |
-
|
943 |
-
message = {
|
944 |
-
"speaker": "Research Agent",
|
945 |
-
"text": f"β **Research Error**: {function.replace('_', ' ').title()}\nπ Query: \"{query}\"\nβ οΈ Error: {error}\nπ Continuing with available data",
|
946 |
-
"type": "research_error"
|
947 |
}
|
948 |
-
all_messages.append(message)
|
949 |
|
950 |
-
|
951 |
-
|
952 |
-
|
953 |
-
|
954 |
-
|
955 |
-
|
956 |
-
|
957 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
958 |
|
959 |
def update_research_progress(self, progress_text: str, function_name: str = None):
|
960 |
"""Update research progress from the specific active research agent"""
|
@@ -1264,7 +1245,7 @@ class VisualConsensusEngine:
|
|
1264 |
# Show latest position from each expert
|
1265 |
latest_positions = {}
|
1266 |
for msg in all_messages:
|
1267 |
-
if msg["speaker"] != current_model_name and msg["speaker"]
|
1268 |
latest_positions[msg["speaker"]] = {
|
1269 |
'text': msg['text'][:150] + "..." if len(msg['text']) > 150 else msg['text'],
|
1270 |
'confidence': msg.get('confidence', 5)
|
@@ -1307,15 +1288,13 @@ class VisualConsensusEngine:
|
|
1307 |
log_function=None):
|
1308 |
"""Run expert consensus with protocol-appropriate intensity and Research Agent integration"""
|
1309 |
|
1310 |
-
# Get only active models (Research Agent is visual-only now)
|
1311 |
available_models = [model for model, info in self.models.items() if info['available']]
|
1312 |
if not available_models:
|
1313 |
return "β No AI models available"
|
1314 |
|
1315 |
model_roles = self.assign_roles(available_models, role_assignment)
|
1316 |
|
1317 |
-
|
1318 |
-
visual_participant_names = [self.models[model]['name'] for model in available_models] + ["Research Agent"]
|
1319 |
|
1320 |
# Get protocol-appropriate style
|
1321 |
protocol_style = self.protocol_styles.get(decision_protocol, self.protocol_styles['consensus'])
|
@@ -1329,7 +1308,6 @@ class VisualConsensusEngine:
|
|
1329 |
log_event('phase', content=f"π― Starting Expert Analysis: {question}")
|
1330 |
log_event('phase', content=f"π Configuration: {len(available_models)} experts, {decision_protocol} protocol, {role_assignment} roles, {topology} topology")
|
1331 |
|
1332 |
-
# Initialize visual state with Research Agent visible
|
1333 |
self.update_visual_state({
|
1334 |
"participants": visual_participant_names,
|
1335 |
"messages": [],
|
@@ -1341,7 +1319,6 @@ class VisualConsensusEngine:
|
|
1341 |
|
1342 |
all_messages = []
|
1343 |
|
1344 |
-
# Phase 1: Initial expert analysis (Research Agent activates only through function calls)
|
1345 |
log_event('phase', content="π Phase 1: Expert Initial Analysis")
|
1346 |
|
1347 |
for model in available_models:
|
@@ -1451,7 +1428,7 @@ Provide your expert analysis:"""
|
|
1451 |
all_messages.append(message)
|
1452 |
|
1453 |
# Update with new message
|
1454 |
-
responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"]
|
1455 |
|
1456 |
self.update_visual_state({
|
1457 |
"participants": visual_participant_names,
|
@@ -1571,7 +1548,7 @@ Your expert response:"""
|
|
1571 |
all_messages.append(message)
|
1572 |
|
1573 |
# Update visual state
|
1574 |
-
responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"]
|
1575 |
|
1576 |
self.update_visual_state({
|
1577 |
"participants": visual_participant_names,
|
@@ -1621,9 +1598,13 @@ Your expert response:"""
|
|
1621 |
final_positions = {}
|
1622 |
confidence_scores = []
|
1623 |
|
|
|
|
|
|
|
1624 |
for msg in all_messages:
|
1625 |
speaker = msg["speaker"]
|
1626 |
-
if speaker not in [moderator_title, 'Consilium'
|
|
|
1627 |
if speaker not in final_positions:
|
1628 |
final_positions[speaker] = []
|
1629 |
final_positions[speaker].append(msg)
|
@@ -1731,7 +1712,7 @@ Provide your synthesis:"""
|
|
1731 |
content=consensus_result,
|
1732 |
confidence=avg_confidence)
|
1733 |
|
1734 |
-
responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and msg["speaker"]
|
1735 |
|
1736 |
self.update_visual_state({
|
1737 |
"participants": visual_participant_names,
|
@@ -1885,18 +1866,14 @@ def check_model_status_session(session_id_state: str = None, request: gr.Request
|
|
1885 |
'Mistral Large': mistral_key,
|
1886 |
'DeepSeek-R1': sambanova_key,
|
1887 |
'Meta-Llama-3.3-70B-Instruct': sambanova_key,
|
1888 |
-
'QwQ-32B': sambanova_key
|
1889 |
-
'Research Agent': True
|
1890 |
}
|
1891 |
|
1892 |
for model_name, available in models.items():
|
1893 |
-
if
|
1894 |
-
status = "β
Available (
|
1895 |
else:
|
1896 |
-
|
1897 |
-
status = f"β
Available (Key: {available[:3]}...)"
|
1898 |
-
else:
|
1899 |
-
status = "β Not configured"
|
1900 |
status_info += f"**{model_name}:** {status}\n\n"
|
1901 |
|
1902 |
return status_info
|
@@ -1907,15 +1884,11 @@ with gr.Blocks(title="π Consilium: Multi-AI Expert Consensus Platform - OFP (
|
|
1907 |
# π Consilium: Multi-AI Expert Consensus Platform - OFP (Open Floor Protocol) Version
|
1908 |
**Watch expert AI models collaborate with live research to solve your most complex decisions**
|
1909 |
|
1910 |
-
π **Gradio Agents and MCP Hackathon 2025** submission with [custom Gradio component](https://huggingface.co/spaces/Agents-MCP-Hackathon/gradio_consilium_roundtable). πΌ **Demo Videos:** [UI Demo](https://youtu.be/ciYLqI-Nawc) | [MCP Demo](https://youtu.be/r92vFUXNg74)
|
1911 |
-
|
1912 |
-
|
1913 |
### π Features:
|
1914 |
|
1915 |
* Visual roundtable of the AI models, including speech bubbles to see the discussion in real time.
|
1916 |
-
* MCP mode enabled to also use it directly in, for example, Claude Desktop (without the visual table).
|
1917 |
* Includes Mistral (**mistral-large-latest**) via their API and the Models **DeepSeek-R1**, **Meta-Llama-3.3-70B-Instruct** and **QwQ-32B** via the SambaNova API.
|
1918 |
-
* Research
|
1919 |
* Assign different roles to the models, the protocol they should follow, and decide the communication strategy.
|
1920 |
* Pick one model as the lead analyst (had the best results when picking Mistral).
|
1921 |
* Configure the amount of discussion rounds.
|
@@ -2126,19 +2099,6 @@ with gr.Blocks(title="π Consilium: Multi-AI Expert Consensus Platform - OFP (
|
|
2126 |
```bash
|
2127 |
python app.py
|
2128 |
```
|
2129 |
-
|
2130 |
-
### π MCP Integration
|
2131 |
-
Add to your Claude Desktop config:
|
2132 |
-
```json
|
2133 |
-
{
|
2134 |
-
"mcpServers": {
|
2135 |
-
"consilium": {
|
2136 |
-
"command": "npx",
|
2137 |
-
"args": ["mcp-remote", "http://localhost:7860/gradio_api/mcp/sse"]
|
2138 |
-
}
|
2139 |
-
}
|
2140 |
-
}
|
2141 |
-
```
|
2142 |
""")
|
2143 |
|
2144 |
with gr.Tab("π Documentation"):
|
@@ -2224,5 +2184,5 @@ if __name__ == "__main__":
|
|
2224 |
server_port=7860,
|
2225 |
share=False,
|
2226 |
debug=False,
|
2227 |
-
mcp_server=
|
2228 |
)
|
|
|
371 |
mistral_key = session_keys.get("mistral") or MISTRAL_API_KEY
|
372 |
sambanova_key = session_keys.get("sambanova") or SAMBANOVA_API_KEY
|
373 |
|
|
|
374 |
self.models = {
|
375 |
'mistral': {
|
376 |
'name': 'Mistral Large',
|
|
|
471 |
"""Update the visual roundtable state for this session"""
|
472 |
if self.update_callback:
|
473 |
self.update_callback(state_update)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
474 |
|
475 |
def handle_function_calls(self, completion, original_prompt: str, calling_model: str) -> str:
|
476 |
"""UNIFIED function call handler with enhanced research capabilities"""
|
|
|
559 |
'timestamp': datetime.now().strftime('%H:%M:%S'),
|
560 |
**kwargs
|
561 |
})
|
|
|
|
|
|
|
|
|
|
|
562 |
|
563 |
# Add function result to conversation
|
564 |
messages.append({
|
|
|
898 |
return time_estimates.get(function_name, "1-3 minutes")
|
899 |
|
900 |
def show_research_error(self, function: str, query: str, error: str, requesting_model_name: str = None):
|
901 |
+
"""Show research error from the specific agent and dismiss it"""
|
902 |
+
function_to_agent = {
|
903 |
+
"search_web": "web_search",
|
904 |
+
"search_wikipedia": "wikipedia",
|
905 |
+
"search_academic": "arxiv",
|
906 |
+
"search_technology_trends": "github",
|
907 |
+
"search_financial_data": "sec_edgar"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
908 |
}
|
|
|
909 |
|
910 |
+
if function in function_to_agent:
|
911 |
+
agent_name = function_to_agent[function]
|
912 |
+
research_agent = self.research_agents[agent_name]
|
913 |
+
agent_display_name = research_agent.manifest.identification.conversationalName
|
914 |
+
|
915 |
+
session = get_or_create_session_state(self.session_id)
|
916 |
+
current_state = session["roundtable_state"]
|
917 |
+
all_messages = list(current_state.get("messages", []))
|
918 |
+
|
919 |
+
# Show error message from the specific agent
|
920 |
+
error_message = {
|
921 |
+
"speaker": agent_display_name,
|
922 |
+
"text": f"β **Research Error**\nπ¬ {function.replace('_', ' ').title()}\nπ Query: \"{query}\"\nβ οΈ Error: {error}\nπ Research failed, returning to discussion",
|
923 |
+
"type": "research_error"
|
924 |
+
}
|
925 |
+
all_messages.append(error_message)
|
926 |
+
|
927 |
+
existing_bubbles = list(current_state.get("showBubbles", []))
|
928 |
+
self.update_visual_state({
|
929 |
+
"participants": current_state.get("participants", []),
|
930 |
+
"messages": all_messages,
|
931 |
+
"currentSpeaker": requesting_model_name,
|
932 |
+
"thinking": [],
|
933 |
+
"showBubbles": existing_bubbles
|
934 |
+
})
|
935 |
+
time.sleep(1)
|
936 |
+
|
937 |
+
# Dismiss the research agent since research failed
|
938 |
+
self.dismiss_research_agent(agent_name, "current_conversation")
|
939 |
|
940 |
def update_research_progress(self, progress_text: str, function_name: str = None):
|
941 |
"""Update research progress from the specific active research agent"""
|
|
|
1245 |
# Show latest position from each expert
|
1246 |
latest_positions = {}
|
1247 |
for msg in all_messages:
|
1248 |
+
if msg["speaker"] != current_model_name and not msg["speaker"].endswith("Research Agent")):
|
1249 |
latest_positions[msg["speaker"]] = {
|
1250 |
'text': msg['text'][:150] + "..." if len(msg['text']) > 150 else msg['text'],
|
1251 |
'confidence': msg.get('confidence', 5)
|
|
|
1288 |
log_function=None):
|
1289 |
"""Run expert consensus with protocol-appropriate intensity and Research Agent integration"""
|
1290 |
|
|
|
1291 |
available_models = [model for model, info in self.models.items() if info['available']]
|
1292 |
if not available_models:
|
1293 |
return "β No AI models available"
|
1294 |
|
1295 |
model_roles = self.assign_roles(available_models, role_assignment)
|
1296 |
|
1297 |
+
visual_participant_names = [self.models[model]['name'] for model in available_models]
|
|
|
1298 |
|
1299 |
# Get protocol-appropriate style
|
1300 |
protocol_style = self.protocol_styles.get(decision_protocol, self.protocol_styles['consensus'])
|
|
|
1308 |
log_event('phase', content=f"π― Starting Expert Analysis: {question}")
|
1309 |
log_event('phase', content=f"π Configuration: {len(available_models)} experts, {decision_protocol} protocol, {role_assignment} roles, {topology} topology")
|
1310 |
|
|
|
1311 |
self.update_visual_state({
|
1312 |
"participants": visual_participant_names,
|
1313 |
"messages": [],
|
|
|
1319 |
|
1320 |
all_messages = []
|
1321 |
|
|
|
1322 |
log_event('phase', content="π Phase 1: Expert Initial Analysis")
|
1323 |
|
1324 |
for model in available_models:
|
|
|
1428 |
all_messages.append(message)
|
1429 |
|
1430 |
# Update with new message
|
1431 |
+
responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and not msg["speaker"].endswith("Research Agent")))
|
1432 |
|
1433 |
self.update_visual_state({
|
1434 |
"participants": visual_participant_names,
|
|
|
1548 |
all_messages.append(message)
|
1549 |
|
1550 |
# Update visual state
|
1551 |
+
responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and not msg["speaker"].endswith("Research Agent")))
|
1552 |
|
1553 |
self.update_visual_state({
|
1554 |
"participants": visual_participant_names,
|
|
|
1598 |
final_positions = {}
|
1599 |
confidence_scores = []
|
1600 |
|
1601 |
+
# Get list of all research agent names
|
1602 |
+
research_agent_names = [agent.manifest.identification.conversationalName for agent in self.research_agents.values()]
|
1603 |
+
|
1604 |
for msg in all_messages:
|
1605 |
speaker = msg["speaker"]
|
1606 |
+
if (speaker not in [moderator_title, 'Consilium'] and
|
1607 |
+
speaker not in research_agent_names):
|
1608 |
if speaker not in final_positions:
|
1609 |
final_positions[speaker] = []
|
1610 |
final_positions[speaker].append(msg)
|
|
|
1712 |
content=consensus_result,
|
1713 |
confidence=avg_confidence)
|
1714 |
|
1715 |
+
responded_speakers = list(set(msg["speaker"] for msg in all_messages if msg.get("speaker") and not msg["speaker"].endswith("Research Agent")))
|
1716 |
|
1717 |
self.update_visual_state({
|
1718 |
"participants": visual_participant_names,
|
|
|
1866 |
'Mistral Large': mistral_key,
|
1867 |
'DeepSeek-R1': sambanova_key,
|
1868 |
'Meta-Llama-3.3-70B-Instruct': sambanova_key,
|
1869 |
+
'QwQ-32B': sambanova_key
|
|
|
1870 |
}
|
1871 |
|
1872 |
for model_name, available in models.items():
|
1873 |
+
if available:
|
1874 |
+
status = f"β
Available (Key: {available[:3]}...)"
|
1875 |
else:
|
1876 |
+
status = "β Not configured"
|
|
|
|
|
|
|
1877 |
status_info += f"**{model_name}:** {status}\n\n"
|
1878 |
|
1879 |
return status_info
|
|
|
1884 |
# π Consilium: Multi-AI Expert Consensus Platform - OFP (Open Floor Protocol) Version
|
1885 |
**Watch expert AI models collaborate with live research to solve your most complex decisions**
|
1886 |
|
|
|
|
|
|
|
1887 |
### π Features:
|
1888 |
|
1889 |
* Visual roundtable of the AI models, including speech bubbles to see the discussion in real time.
|
|
|
1890 |
* Includes Mistral (**mistral-large-latest**) via their API and the Models **DeepSeek-R1**, **Meta-Llama-3.3-70B-Instruct** and **QwQ-32B** via the SambaNova API.
|
1891 |
+
* Research Agents with 5 sources (**Web Search**, **Wikipedia**, **arXiv**, **GitHub**, **SEC EDGAR**) for comprehensive live research.
|
1892 |
* Assign different roles to the models, the protocol they should follow, and decide the communication strategy.
|
1893 |
* Pick one model as the lead analyst (had the best results when picking Mistral).
|
1894 |
* Configure the amount of discussion rounds.
|
|
|
2099 |
```bash
|
2100 |
python app.py
|
2101 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2102 |
""")
|
2103 |
|
2104 |
with gr.Tab("π Documentation"):
|
|
|
2184 |
server_port=7860,
|
2185 |
share=False,
|
2186 |
debug=False,
|
2187 |
+
mcp_server=False
|
2188 |
)
|