File size: 6,336 Bytes
9b867b0 43a59d8 9b867b0 49fea4f 9b867b0 43a59d8 9b867b0 43a59d8 49fea4f 9b867b0 43a59d8 9b867b0 49fea4f 9b867b0 43a59d8 9b867b0 49fea4f 43a59d8 49fea4f 43a59d8 9b867b0 49fea4f 9b867b0 49fea4f 43a59d8 9b867b0 43a59d8 49fea4f 9b867b0 43a59d8 49fea4f 9b867b0 49fea4f 9b867b0 43a59d8 49fea4f 9b867b0 49fea4f 9b867b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
# app.py (on Hugging Face Spaces)
import gradio as gr
import httpx
import asyncio
import json
# Replace with your Modal API endpoint URL
MODAL_API_ENDPOINT = "https://blastingneurons--collective-hive-backend-orchestrate-hive-api.modal.run"
# Helper function to format chat history for Gradio's 'messages' type
def format_chat_history_for_gradio(log_entries: list[dict]) -> list[dict]:
formatted_messages = []
for entry in log_entries:
role = entry.get("agent", "System")
content = entry.get("text", "")
formatted_messages.append({"role": role, "content": content})
return formatted_messages
async def call_modal_backend(problem_input: str, complexity: int):
full_chat_history = []
current_status = "Connecting to Hive..."
current_solution = ""
current_confidence = ""
current_minority_opinions = ""
yield (
current_status,
format_chat_history_for_gradio([]),
current_solution,
current_confidence,
current_minority_opinions
)
try:
async with httpx.AsyncClient(timeout=600.0) as client:
async with client.stream("POST", MODAL_API_ENDPOINT, json={"problem": problem_input, "complexity": complexity}) as response:
response.raise_for_status()
buffer = ""
async for chunk in response.aiter_bytes():
buffer += chunk.decode('utf-8')
while "\n" in buffer:
line, buffer = buffer.split("\n", 1)
if not line.strip(): continue
try:
data = json.loads(line)
event_type = data.get("event")
if event_type == "status_update":
current_status = data["data"]
elif event_type == "chat_update":
full_chat_history.append(data["data"])
current_status = "In Progress..."
elif event_type == "final_solution":
current_status = "Solution Complete!"
current_solution = data["solution"]
current_confidence = data["confidence"]
current_minority_opinions = data["minority_opinions"]
yield (
current_status,
format_chat_history_for_gradio(full_chat_history + [{"role": "System", "content": "Final solution synthesized."}]),
current_solution,
current_confidence,
current_minority_opinions
)
return
yield (
current_status,
format_chat_history_for_gradio(full_chat_history),
current_solution,
current_confidence,
current_minority_opinions
)
except json.JSONDecodeError as e:
print(f"JSON Decode Error: {e} in line: {line}")
current_status = f"Error decoding: {e}"
yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
# Do not return here if you want to keep trying to parse subsequent chunks
except Exception as e:
print(f"Error processing event: {e}, Data: {data}")
current_status = f"An internal error occurred: {e}"
yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
return # Exit on critical error
except httpx.HTTPStatusError as e:
current_status = f"HTTP Error from Modal backend: {e.response.status_code}"
print(current_status)
except httpx.RequestError as e:
current_status = f"Request Error: Could not connect to Modal backend: {e}"
print(current_status)
except Exception as e:
current_status = f"An unexpected error occurred during API call: {e}"
print(current_status)
yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
with gr.Blocks() as demo:
gr.Markdown("# Collective Intelligence Hive")
gr.Markdown("Enter a problem and watch a hive of AI agents collaborate to solve it! Powered by Modal and Nebius.")
with gr.Row():
problem_input = gr.Textbox(label="Problem to Solve", lines=3, placeholder="e.g., 'Develop a marketing strategy for a new eco-friendly smart home device targeting millennials.'", scale=3)
complexity_slider = gr.Slider(minimum=1, maximum=5, value=3, step=1, label="Problem Complexity", scale=1)
initiate_btn = gr.Button("Initiate Hive", variant="primary")
status_output = gr.Textbox(label="Hive Status", interactive=False)
with gr.Row():
with gr.Column(scale=2):
chat_display = gr.Chatbot(
label="Agent Discussion Log",
height=500,
type='messages',
autoscroll=True
)
with gr.Column(scale=1):
solution_output = gr.Textbox(label="Synthesized Solution", lines=10, interactive=False)
confidence_output = gr.Textbox(label="Solution Confidence", interactive=False)
minority_output = gr.Textbox(label="Minority Opinions", lines=3, interactive=False)
initiate_btn.click(
call_modal_backend,
inputs=[problem_input, complexity_slider],
outputs=[
status_output,
chat_display,
solution_output,
confidence_output,
minority_output
],
queue=True
)
demo.launch() |