neovalle commited on
Commit
c0722e5
·
verified ·
1 Parent(s): 5a57f28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -29
app.py CHANGED
@@ -1,44 +1,43 @@
1
- import os
2
- import json
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
 
6
- # Load tool prompts
7
- with open("system_prompts.json", "r", encoding="utf-8") as f:
8
  SYSTEM_PROMPTS = json.load(f)
9
- TOOLS = list(SYSTEM_PROMPTS.keys())
10
-
11
- # Set these environment variables in your HF Space or local env
12
- HF_TOKEN = os.environ["HF_TOKEN"]
13
- HF_MODEL = os.environ.get("HF_MODEL")
14
 
15
  # Initialize HF InferenceClient
16
- client = InferenceClient(model=HF_MODEL, token=HF_TOKEN)
17
 
 
18
  def run_model(prompt: str, text: str) -> str:
19
- """
20
- Uses InferenceClient.chat.completions to call mnodel.
21
- """
22
  resp = client.chat.completions.create(
23
- messages=[
24
- {"role": "system", "content": prompt},
25
- {"role": "user", "content": text}
26
- ],
27
- max_tokens=512,
28
- temperature=0.3
29
  )
30
  return resp.choices[0].message.content
31
 
32
- def ecoling_tool(tool: str, text: str) -> str:
33
- return run_model(SYSTEM_PROMPTS[tool], text)
 
 
 
 
 
 
34
 
35
- demo = gr.Interface(
36
- fn=ecoling_tool,
37
- inputs=[gr.Dropdown(TOOLS, label="Tool"), gr.Textbox(lines=8, label="Input Text")],
38
- outputs=[gr.Textbox(label="Response")],
39
- title="🌱 Ecolinguistic MCP Server",
40
- description="UI + MCP server"
41
- )
 
 
 
 
42
 
 
43
  if __name__ == "__main__":
44
- demo.launch(mcp_server=True,share=True)
 
1
+ import os, json
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
+ # Load prompts
6
+ with open("system_prompts.json", encoding="utf-8") as f:
7
  SYSTEM_PROMPTS = json.load(f)
 
 
 
 
 
8
 
9
  # Initialize HF InferenceClient
10
+ client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN"))
11
 
12
+ # Common runner
13
  def run_model(prompt: str, text: str) -> str:
 
 
 
14
  resp = client.chat.completions.create(
15
+ messages=[{"role":"system","content":prompt},{"role":"user","content":text}],
16
+ max_tokens=512, temperature=0.3
 
 
 
 
17
  )
18
  return resp.choices[0].message.content
19
 
20
+ # Dynamically create tool functions
21
+ def make_tool_fn(prompt):
22
+ return lambda text: run_model(prompt, text)
23
+
24
+ tool_fns = {
25
+ name.lower().replace(" ", "_"): make_tool_fn(prompt)
26
+ for name, prompt in SYSTEM_PROMPTS.items()
27
+ }
28
 
29
+ # Build Gradio interface
30
+ demo = gr.Blocks()
31
+ with demo:
32
+ gr.Markdown("# 🌱 Ecolinguistic MCP Server")
33
+ tabs = gr.Tabs()
34
+ for badge, fn in tool_fns.items():
35
+ with tabs:
36
+ with gr.Tab(badge.replace("_", " ").title()):
37
+ inp = gr.Textbox(lines=5, label="Text")
38
+ out = gr.Textbox(label="Response")
39
+ gr.Button("Run").click(fn, inp, out)
40
 
41
+ # Expose as MCP server
42
  if __name__ == "__main__":
43
+ demo.launch(mcp_server=True, share=True)