Spaces:
Sleeping
Sleeping
File size: 1,152 Bytes
af31cad d80cfe4 af31cad d80cfe4 af31cad d80cfe4 af31cad d80cfe4 af31cad d80cfe4 af31cad d80cfe4 af31cad d80cfe4 af31cad d80cfe4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import gradio as gr
import asyncio
import os
from minion import config
from minion.main import LocalPythonEnv
from minion.main.rpyc_python_env import RpycPythonEnv
from minion.main.brain import Brain
from minion.providers import create_llm_provider
# 初始化 brain(只初始化一次,避免每次请求都重建)
def build_brain():
model = "gpt-4.1"
llm_config = config.models.get(model)
llm = create_llm_provider(llm_config)
#python_env = RpycPythonEnv(port=3007)
python_env = LocalPythonEnv(verbose=False)
brain = Brain(
python_env=python_env,
llm=llm,
)
return brain
brain = build_brain()
async def minion_respond_async(query):
obs, score, *_ = await brain.step(query=query, route="python", check=False)
return obs
def minion_respond(query):
# gradio sync接口,自动调度async
return asyncio.run(minion_respond_async(query))
demo = gr.Interface(
fn=minion_respond,
inputs="text",
outputs="text",
title="Minion Brain Chat",
description="用 Minion1 Brain 作为后端的智能问答"
)
if __name__ == "__main__":
demo.launch(mcp_server=True)
|