neovalle commited on
Commit
3ffa4a4
·
verified ·
1 Parent(s): 4dcc3bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -20
app.py CHANGED
@@ -1,13 +1,11 @@
1
- import os
2
- import json
3
  import gradio as gr
4
  from huggingface_hub import InferenceClient
5
 
6
- # Load prompts
7
  with open("system_prompts.json", encoding="utf-8") as f:
8
  SYSTEM_PROMPTS = json.load(f)
9
 
10
- # Initialize InferenceClient
11
  client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN"))
12
 
13
  def run_model(prompt: str, text: str) -> str:
@@ -18,22 +16,48 @@ def run_model(prompt: str, text: str) -> str:
18
  )
19
  return resp.choices[0].message.content
20
 
21
- # Build a separate Gradio Interface per tool
22
- tools = []
23
- for name, prompt in SYSTEM_PROMPTS.items():
24
- tool_name = name.lower().replace(" ", "_")
25
- def make_fn(p):
26
- def fn(text: str) -> str:
27
- """Tool for: """ + name
28
- return run_model(p, text)
29
- return fn
30
- fn = make_fn(prompt)
31
- iface = gr.Interface(fn=fn, inputs=gr.Textbox(lines=5), outputs=gr.Textbox(),
32
- title=name, description=f"{name} tool")
33
- tools.append(iface)
34
-
35
- # Launch all tools under one app
36
- demo = gr.TabbedInterface(interface_list=tools, tab_names=list(SYSTEM_PROMPTS.keys()))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  if __name__ == "__main__":
39
  demo.launch(mcp_server=True, share=True)
 
1
+ import os, json
 
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
+ # Load your tool prompts
6
  with open("system_prompts.json", encoding="utf-8") as f:
7
  SYSTEM_PROMPTS = json.load(f)
8
 
 
9
  client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN"))
10
 
11
  def run_model(prompt: str, text: str) -> str:
 
16
  )
17
  return resp.choices[0].message.content
18
 
19
+ # --- Define one named function per tool prompt ---
20
+ def ecolinguistic_analysis(text: str) -> str:
21
+ """Ecolinguistic Analysis"""
22
+ prompt = SYSTEM_PROMPTS["Ecolinguistic Analysis"]
23
+ return run_model(prompt, text)
24
+
25
+ def narrative_detection(text: str) -> str:
26
+ """Narrative Detection"""
27
+ prompt = SYSTEM_PROMPTS["Narrative Detection"]
28
+ return run_model(prompt, text)
29
+
30
+ def critical_discourse_analysis(text: str) -> str:
31
+ """Critical Discourse Analysis"""
32
+ prompt = SYSTEM_PROMPTS["Critical Discourse Analysis"]
33
+ return run_model(prompt, text)
34
+
35
+ def sfl_analysis(text: str) -> str:
36
+ """SFL Analysis"""
37
+ prompt = SYSTEM_PROMPTS["SFL Analysis"]
38
+ return run_model(prompt, text)
39
+
40
+ def ecosophy_scoring(text: str) -> str:
41
+ """Ecosophy Scoring"""
42
+ prompt = SYSTEM_PROMPTS["Ecosophy Scoring"]
43
+ return run_model(prompt, text)
44
+
45
+ # --- Build the Gradio interface with separate interfaces per tool ---
46
+ iface1 = gr.Interface(fn=ecolinguistic_analysis, inputs="text", outputs="text",
47
+ title="Ecolinguistic Analysis", description=ecolinguistic_analysis.__doc__)
48
+ iface2 = gr.Interface(fn=narrative_detection, inputs="text", outputs="text",
49
+ title="Narrative Detection", description=narrative_detection.__doc__)
50
+ iface3 = gr.Interface(fn=critical_discourse_analysis, inputs="text", outputs="text",
51
+ title="Critical Discourse Analysis", description=critical_discourse_analysis.__doc__)
52
+ iface4 = gr.Interface(fn=sfl_analysis, inputs="text", outputs="text",
53
+ title="SFL Analysis", description=sfl_analysis.__doc__)
54
+ iface5 = gr.Interface(fn=ecosophy_scoring, inputs="text", outputs="text",
55
+ title="Ecosophy Scoring", description=ecosophy_scoring.__doc__)
56
+
57
+ demo = gr.TabbedInterface(
58
+ interface_list=[iface1, iface2, iface3, iface4, iface5],
59
+ tab_names=list(SYSTEM_PROMPTS.keys())
60
+ )
61
 
62
  if __name__ == "__main__":
63
  demo.launch(mcp_server=True, share=True)