Spaces:
Sleeping
Sleeping
# app.py | |
from fastapi import FastAPI | |
import gradio as gr | |
from multi_inference import multi_query as deepseek_query | |
from agents.philosopher import PhilosopherAgent | |
from agents.historian import HistorianAgent | |
from agents.hacker import HackerAgent | |
from agents.comedian import ComedianAgent | |
# Initialize agents | |
philosopher = PhilosopherAgent() | |
historian = HistorianAgent() | |
hacker = HackerAgent() | |
comedian = ComedianAgent() | |
# Initialize FastAPI | |
app = FastAPI() | |
# Chat logic | |
def chat(prompt): | |
responses = {} | |
responses["π§ββοΈ Philosopher"] = philosopher.run(prompt, deepseek_query) | |
responses["π¨βπ« Historian"] = historian.run(prompt, deepseek_query) | |
responses["π» Hacker"] = hacker.run(prompt, deepseek_query) | |
responses["π Comedian"] = comedian.run(prompt, deepseek_query) | |
return responses | |
# Gradio Interface | |
gradio_app = gr.Interface( | |
fn=chat, | |
inputs=gr.Textbox(label="Ask a Question"), | |
outputs=gr.JSON(label="Responses from Agents"), | |
title="Multi-Agent AI Chatroom π€", | |
) | |
# Mount Gradio to FastAPI | |
app = gr.mount_gradio_app(app, gradio_app, path="/") | |
# Optional FastAPI root endpoint | |
def read_status(): | |
return {"status": "running"} | |