Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,28 +2,37 @@ import os, json
|
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
4 |
|
5 |
-
# Load
|
6 |
with open("system_prompts.json", encoding="utf-8") as f:
|
7 |
SYSTEM_PROMPTS = json.load(f)
|
8 |
|
9 |
client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN"))
|
10 |
|
11 |
def run_model(prompt: str, text: str) -> str:
|
|
|
12 |
resp = client.chat.completions.create(
|
13 |
-
messages=[{"role":"system","content":prompt},{"role":"user","content":text}],
|
14 |
-
max_tokens=512,
|
|
|
15 |
)
|
16 |
return resp.choices[0].message.content
|
17 |
|
18 |
-
|
19 |
-
with demo:
|
20 |
gr.Markdown("# 🌱 Ecolinguistic MCP Server")
|
21 |
for name, prompt in SYSTEM_PROMPTS.items():
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
|
29 |
-
demo.launch(mcp_server=True, share=True)
|
|
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
4 |
|
5 |
+
# Load prompts
|
6 |
with open("system_prompts.json", encoding="utf-8") as f:
|
7 |
SYSTEM_PROMPTS = json.load(f)
|
8 |
|
9 |
client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN"))
|
10 |
|
11 |
def run_model(prompt: str, text: str) -> str:
|
12 |
+
"""Run Mistral with a system prompt + user text."""
|
13 |
resp = client.chat.completions.create(
|
14 |
+
messages=[{"role":"system","content":prompt}, {"role":"user","content":text}],
|
15 |
+
max_tokens=512,
|
16 |
+
temperature=0.3
|
17 |
)
|
18 |
return resp.choices[0].message.content
|
19 |
|
20 |
+
# Create Gradio blocks
|
21 |
+
with gr.Blocks() as demo:
|
22 |
gr.Markdown("# 🌱 Ecolinguistic MCP Server")
|
23 |
for name, prompt in SYSTEM_PROMPTS.items():
|
24 |
+
fn_name = name.lower().replace(" ", "_")
|
25 |
+
description = f"Tool: {name}"
|
26 |
+
|
27 |
+
# Define a function for each prompt
|
28 |
+
def make_fn(p):
|
29 |
+
def tool_fn(text: str) -> str:
|
30 |
+
return run_model(p, text)
|
31 |
+
return tool_fn
|
32 |
+
|
33 |
+
tool_fn = make_fn(prompt)
|
34 |
+
|
35 |
+
# Register as MCP tool using gr.api
|
36 |
+
gr.api(fn=tool_fn, api_name=fn_name, api_description=description)
|
37 |
|
38 |
+
demo.launch(mcp_server=True, share=True)
|
|