|
|
|
|
|
from fastapi import FastAPI |
|
import gradio as gr |
|
from multi_inference import multi_query as deepseek_query |
|
|
|
from agents.philosopher import PhilosopherAgent |
|
from agents.historian import HistorianAgent |
|
from agents.hacker import HackerAgent |
|
from agents.comedian import ComedianAgent |
|
|
|
app = FastAPI() |
|
|
|
philosopher = PhilosopherAgent() |
|
historian = HistorianAgent() |
|
hacker = HackerAgent() |
|
comedian = ComedianAgent() |
|
|
|
|
|
def chat(prompt): |
|
responses = {} |
|
responses["π§ββοΈ Philosopher"] = philosopher.run(prompt, deepseek_query) |
|
responses["π¨βπ« Historian"] = historian.run(prompt, deepseek_query) |
|
responses["π» Hacker"] = hacker.run(prompt, deepseek_query) |
|
responses["π Comedian"] = comedian.run(prompt, deepseek_query) |
|
return responses |
|
|
|
|
|
interface = gr.Interface( |
|
fn=chat, |
|
inputs=gr.Textbox(label="Ask a Question"), |
|
outputs=gr.JSON(label="Responses from Agents"), |
|
title="Multi-Agent AI Chatroom π€", |
|
) |
|
|
|
@app.get("/") |
|
def read_root(): |
|
return {"message": "Welcome to the Multi-Agent AI Chatroom!"} |
|
|
|
app = gr.mount_gradio_app(app, interface, path="/chat") |
|
|