Spaces:
Sleeping
Sleeping
Update app configurations and dependencies
Browse files- .gitignore +5 -1
- README.md +3 -2
- app.py +14 -1
- app1.py +33 -15
- requirements.txt +2 -1
.gitignore
CHANGED
|
@@ -148,4 +148,8 @@ Desktop.ini
|
|
| 148 |
# Gradio specific
|
| 149 |
gradio_cached_examples/
|
| 150 |
*.db
|
| 151 |
-
flagged/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
# Gradio specific
|
| 149 |
gradio_cached_examples/
|
| 150 |
*.db
|
| 151 |
+
flagged/
|
| 152 |
+
|
| 153 |
+
logs/
|
| 154 |
+
|
| 155 |
+
|
README.md
CHANGED
|
@@ -4,12 +4,13 @@ emoji: 💬
|
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.0
|
| 8 |
-
app_file:
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
short_description: minion running in space
|
| 12 |
tags:
|
|
|
|
| 13 |
- agent-demo-track
|
| 14 |
---
|
| 15 |
|
|
|
|
| 4 |
colorFrom: yellow
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.32.0
|
| 8 |
+
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
short_description: minion running in space
|
| 12 |
tags:
|
| 13 |
+
- mcp-server-track
|
| 14 |
- agent-demo-track
|
| 15 |
---
|
| 16 |
|
app.py
CHANGED
|
@@ -15,6 +15,19 @@ def respond(
|
|
| 15 |
temperature,
|
| 16 |
top_p,
|
| 17 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
messages = [{"role": "system", "content": system_message}]
|
| 19 |
|
| 20 |
for val in history:
|
|
@@ -61,4 +74,4 @@ demo = gr.ChatInterface(
|
|
| 61 |
|
| 62 |
|
| 63 |
if __name__ == "__main__":
|
| 64 |
-
demo.launch()
|
|
|
|
| 15 |
temperature,
|
| 16 |
top_p,
|
| 17 |
):
|
| 18 |
+
"""Generate a response from the language model based on user input and chat history.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
message (str): The latest user message to respond to.
|
| 22 |
+
history (list[tuple[str, str]]): List of (user, assistant) message pairs representing the conversation history.
|
| 23 |
+
system_message (str): Instruction or context for the assistant's behavior.
|
| 24 |
+
max_tokens (int): Maximum number of tokens to generate in the response.
|
| 25 |
+
temperature (float): Sampling temperature for response randomness.
|
| 26 |
+
top_p (float): Nucleus sampling probability threshold.
|
| 27 |
+
|
| 28 |
+
Yields:
|
| 29 |
+
str: The progressively generated response from the assistant.
|
| 30 |
+
"""
|
| 31 |
messages = [{"role": "system", "content": system_message}]
|
| 32 |
|
| 33 |
for val in history:
|
|
|
|
| 74 |
|
| 75 |
|
| 76 |
if __name__ == "__main__":
|
| 77 |
+
demo.launch(mcp_server=True)
|
app1.py
CHANGED
|
@@ -1,25 +1,43 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
-
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
|
| 8 |
-
word: The word or phrase to analyze
|
| 9 |
-
letter: The letter to count occurrences of
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
return word.lower().count(letter.lower())
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
demo = gr.Interface(
|
| 18 |
-
fn=
|
| 19 |
-
inputs=
|
| 20 |
-
outputs="
|
| 21 |
-
title="
|
| 22 |
-
description="
|
| 23 |
)
|
| 24 |
|
| 25 |
-
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import asyncio
|
| 3 |
+
import os
|
| 4 |
|
| 5 |
+
from minion import config
|
| 6 |
+
from minion.main import LocalPythonEnv
|
| 7 |
+
from minion.main.rpyc_python_env import RpycPythonEnv
|
| 8 |
+
from minion.main.brain import Brain
|
| 9 |
+
from minion.providers import create_llm_provider
|
| 10 |
|
| 11 |
+
# 初始化 brain(只初始化一次,避免每次请求都重建)
|
| 12 |
+
def build_brain():
|
| 13 |
+
model = "gpt-4.1"
|
| 14 |
+
llm_config = config.models.get(model)
|
| 15 |
+
llm = create_llm_provider(llm_config)
|
| 16 |
+
#python_env = RpycPythonEnv(port=3007)
|
| 17 |
+
python_env = LocalPythonEnv(verbose=False)
|
| 18 |
+
brain = Brain(
|
| 19 |
+
python_env=python_env,
|
| 20 |
+
llm=llm,
|
| 21 |
+
)
|
| 22 |
+
return brain
|
| 23 |
|
| 24 |
+
brain = build_brain()
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
async def minion_respond_async(query):
|
| 27 |
+
obs, score, *_ = await brain.step(query=query, route="python", check=False)
|
| 28 |
+
return obs
|
|
|
|
| 29 |
|
| 30 |
+
def minion_respond(query):
|
| 31 |
+
# gradio sync接口,自动调度async
|
| 32 |
+
return asyncio.run(minion_respond_async(query))
|
| 33 |
|
| 34 |
demo = gr.Interface(
|
| 35 |
+
fn=minion_respond,
|
| 36 |
+
inputs="text",
|
| 37 |
+
outputs="text",
|
| 38 |
+
title="Minion Brain Chat",
|
| 39 |
+
description="用 Minion1 Brain 作为后端的智能问答"
|
| 40 |
)
|
| 41 |
|
| 42 |
+
if __name__ == "__main__":
|
| 43 |
+
demo.launch(mcp_server=True)
|
requirements.txt
CHANGED
|
@@ -1,2 +1,3 @@
|
|
| 1 |
gradio[mcp]==5.32.0
|
| 2 |
-
huggingface_hub>=0.28.1
|
|
|
|
|
|
| 1 |
gradio[mcp]==5.32.0
|
| 2 |
+
huggingface_hub>=0.28.1
|
| 3 |
+
minionx>=0.1.1
|