Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,57 +1,43 @@
|
|
1 |
import os
|
2 |
import json
|
3 |
-
import requests
|
4 |
import gradio as gr
|
|
|
5 |
|
6 |
# Load tool prompts
|
7 |
with open("system_prompts.json", "r", encoding="utf-8") as f:
|
8 |
SYSTEM_PROMPTS = json.load(f)
|
9 |
TOOLS = list(SYSTEM_PROMPTS.keys())
|
10 |
|
11 |
-
#
|
12 |
-
|
13 |
-
|
14 |
|
15 |
-
|
|
|
|
|
|
|
16 |
"""
|
17 |
-
|
18 |
-
Sends 'prompt' as system message and 'text' as user message.
|
19 |
-
For reference, HF Inference endpoints accept JSON payloads like this :contentReference[oaicite:1]{index=1}.
|
20 |
"""
|
21 |
-
|
22 |
-
|
23 |
-
"Content-Type": "application/json"
|
24 |
-
}
|
25 |
-
payload = {
|
26 |
-
"inputs": [
|
27 |
{"role": "system", "content": prompt},
|
28 |
{"role": "user", "content": text}
|
29 |
],
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
}
|
35 |
-
}
|
36 |
-
resp = requests.post(HF_API_URL, headers=headers, json=payload, timeout=60)
|
37 |
-
resp.raise_for_status()
|
38 |
-
out = resp.json()
|
39 |
-
# HF returns a list for chat models
|
40 |
-
if isinstance(out, list) and "generated_text" in out[0]:
|
41 |
-
return out[0]["generated_text"]
|
42 |
-
# else a dict
|
43 |
-
return out.get("generated_text", json.dumps(out))
|
44 |
|
45 |
def ecoling_tool(tool: str, text: str) -> str:
|
46 |
-
|
47 |
-
return run_mistral(SYSTEM_PROMPTS[tool], text)
|
48 |
|
49 |
demo = gr.Interface(
|
50 |
fn=ecoling_tool,
|
51 |
inputs=[gr.Dropdown(TOOLS, label="Tool"), gr.Textbox(lines=8, label="Input Text")],
|
52 |
outputs=[gr.Textbox(label="Response")],
|
53 |
title="🌱 Ecolinguistic MCP Server",
|
54 |
-
description="
|
55 |
)
|
56 |
|
57 |
if __name__ == "__main__":
|
|
|
1 |
import os
|
2 |
import json
|
|
|
3 |
import gradio as gr
|
4 |
+
from huggingface_hub import InferenceClient
|
5 |
|
6 |
# Load tool prompts
|
7 |
with open("system_prompts.json", "r", encoding="utf-8") as f:
|
8 |
SYSTEM_PROMPTS = json.load(f)
|
9 |
TOOLS = list(SYSTEM_PROMPTS.keys())
|
10 |
|
11 |
+
# Set these environment variables in your HF Space or local env
|
12 |
+
HF_TOKEN = os.environ["HF_TOKEN"]
|
13 |
+
HF_MODEL = os.environ.get("HF_MODEL")
|
14 |
|
15 |
+
# Initialize HF InferenceClient
|
16 |
+
client = InferenceClient(model=HF_MODEL, token=HF_TOKEN)
|
17 |
+
|
18 |
+
def run_model(prompt: str, text: str) -> str:
|
19 |
"""
|
20 |
+
Uses InferenceClient.chat.completions to call mnodel.
|
|
|
|
|
21 |
"""
|
22 |
+
resp = client.chat.completions.create(
|
23 |
+
messages=[
|
|
|
|
|
|
|
|
|
24 |
{"role": "system", "content": prompt},
|
25 |
{"role": "user", "content": text}
|
26 |
],
|
27 |
+
max_tokens=512,
|
28 |
+
temperature=0.3
|
29 |
+
)
|
30 |
+
return resp.choices[0].message.content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
def ecoling_tool(tool: str, text: str) -> str:
|
33 |
+
return run_model(SYSTEM_PROMPTS[tool], text)
|
|
|
34 |
|
35 |
demo = gr.Interface(
|
36 |
fn=ecoling_tool,
|
37 |
inputs=[gr.Dropdown(TOOLS, label="Tool"), gr.Textbox(lines=8, label="Input Text")],
|
38 |
outputs=[gr.Textbox(label="Response")],
|
39 |
title="🌱 Ecolinguistic MCP Server",
|
40 |
+
description="UI + MCP server"
|
41 |
)
|
42 |
|
43 |
if __name__ == "__main__":
|