Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
|
6 |
+
# Load system prompts
|
7 |
+
with open("system_prompts.json", "r", encoding="utf-8") as f:
|
8 |
+
SYSTEM_PROMPTS = json.load(f)
|
9 |
+
|
10 |
+
TOOLS = list(SYSTEM_PROMPTS.keys())
|
11 |
+
|
12 |
+
# Hugging Face Endpoint details (Set these as secrets/environment variables on HF Spaces)
|
13 |
+
HF_API_URL = os.environ.get("HF_API_URL", "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2")
|
14 |
+
HF_API_TOKEN = os.environ.get("HF_API_TOKEN", "hf_xxx") # Replace with your token or set as secret
|
15 |
+
|
16 |
+
def query_hf_mistral(system_prompt, user_input):
|
17 |
+
headers = {
|
18 |
+
"Authorization": f"Bearer {HF_API_TOKEN}",
|
19 |
+
"Content-Type": "application/json"
|
20 |
+
}
|
21 |
+
# For conversational models, use the 'inputs' key with a conversation
|
22 |
+
payload = {
|
23 |
+
"inputs": [
|
24 |
+
{"role": "system", "content": system_prompt},
|
25 |
+
{"role": "user", "content": user_input}
|
26 |
+
],
|
27 |
+
"parameters": {
|
28 |
+
"max_new_tokens": 1024,
|
29 |
+
"temperature": 0.3,
|
30 |
+
"do_sample": False
|
31 |
+
}
|
32 |
+
}
|
33 |
+
response = requests.post(HF_API_URL, headers=headers, json=payload, timeout=90)
|
34 |
+
if response.status_code == 200:
|
35 |
+
result = response.json()
|
36 |
+
# Output parsing can differ by model, this is standard for chat endpoints
|
37 |
+
if isinstance(result, list) and len(result) > 0 and "generated_text" in result[0]:
|
38 |
+
return result[0]["generated_text"]
|
39 |
+
# Newer endpoints may return {'generated_text': '...'}
|
40 |
+
if "generated_text" in result:
|
41 |
+
return result["generated_text"]
|
42 |
+
# If it's the new chat template, output is a list of chat turns
|
43 |
+
if "conversation" in result and "generated_responses" in result["conversation"]:
|
44 |
+
return result["conversation"]["generated_responses"][-1]
|
45 |
+
# Fallback to string conversion
|
46 |
+
return str(result)
|
47 |
+
else:
|
48 |
+
return f"Error: {response.status_code}\n{response.text}"
|
49 |
+
|
50 |
+
def ecoling_tool(tool, user_input):
|
51 |
+
system_prompt = SYSTEM_PROMPTS[tool]
|
52 |
+
return query_hf_mistral(system_prompt, user_input)
|
53 |
+
|
54 |
+
with gr.Blocks(title="Ecolinguistic MCP Server") as demo:
|
55 |
+
gr.Markdown(
|
56 |
+
"# 🌱 Ecolinguistic MCP Server\nSelect a tool and enter your text below. Powered by Hugging Face Mistral endpoint."
|
57 |
+
)
|
58 |
+
|
59 |
+
with gr.Row():
|
60 |
+
tool = gr.Dropdown(label="Choose Ecolinguistic Tool", choices=TOOLS, value=TOOLS[0])
|
61 |
+
user_input = gr.Textbox(lines=7, label="Input Text", placeholder="Paste your text here...")
|
62 |
+
|
63 |
+
output = gr.Textbox(label="AI Response", lines=12)
|
64 |
+
|
65 |
+
btn = gr.Button("Analyse")
|
66 |
+
|
67 |
+
btn.click(fn=ecoling_tool, inputs=[tool, user_input], outputs=output)
|
68 |
+
|
69 |
+
demo.launch()
|