Spaces:
Sleeping
Sleeping
File size: 1,221 Bytes
89784d2 c363697 89784d2 cb3bc65 89784d2 cb3bc65 c363697 cb3bc65 89784d2 cb3bc65 89784d2 c363697 cb3bc65 89784d2 c363697 cb3bc65 c363697 cb3bc65 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
# app.py
from fastapi import FastAPI
import gradio as gr
from multi_inference import multi_query as deepseek_query
from agents.philosopher import PhilosopherAgent
from agents.historian import HistorianAgent
from agents.hacker import HackerAgent
from agents.comedian import ComedianAgent
# Initialize agents
philosopher = PhilosopherAgent()
historian = HistorianAgent()
hacker = HackerAgent()
comedian = ComedianAgent()
# Initialize FastAPI
app = FastAPI()
# Chat logic
def chat(prompt):
responses = {}
responses["π§ββοΈ Philosopher"] = philosopher.run(prompt, deepseek_query)
responses["π¨βπ« Historian"] = historian.run(prompt, deepseek_query)
responses["π» Hacker"] = hacker.run(prompt, deepseek_query)
responses["π Comedian"] = comedian.run(prompt, deepseek_query)
return responses
# Gradio Interface
gradio_app = gr.Interface(
fn=chat,
inputs=gr.Textbox(label="Ask a Question"),
outputs=gr.JSON(label="Responses from Agents"),
title="Multi-Agent AI Chatroom π€",
)
# Mount Gradio to FastAPI
app = gr.mount_gradio_app(app, gradio_app, path="/")
# Optional FastAPI root endpoint
@app.get("/status")
def read_status():
return {"status": "running"}
|