Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,11 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
import gradio as gr
|
4 |
from huggingface_hub import InferenceClient
|
5 |
|
6 |
-
# Load prompts
|
7 |
with open("system_prompts.json", encoding="utf-8") as f:
|
8 |
SYSTEM_PROMPTS = json.load(f)
|
9 |
|
10 |
-
# Initialize InferenceClient
|
11 |
client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN"))
|
12 |
|
13 |
def run_model(prompt: str, text: str) -> str:
|
@@ -18,22 +16,48 @@ def run_model(prompt: str, text: str) -> str:
|
|
18 |
)
|
19 |
return resp.choices[0].message.content
|
20 |
|
21 |
-
#
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
if __name__ == "__main__":
|
39 |
demo.launch(mcp_server=True, share=True)
|
|
|
1 |
+
import os, json
|
|
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
4 |
|
5 |
+
# Load your tool prompts
|
6 |
with open("system_prompts.json", encoding="utf-8") as f:
|
7 |
SYSTEM_PROMPTS = json.load(f)
|
8 |
|
|
|
9 |
client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN"))
|
10 |
|
11 |
def run_model(prompt: str, text: str) -> str:
|
|
|
16 |
)
|
17 |
return resp.choices[0].message.content
|
18 |
|
19 |
+
# --- Define one named function per tool prompt ---
|
20 |
+
def ecolinguistic_analysis(text: str) -> str:
|
21 |
+
"""Ecolinguistic Analysis"""
|
22 |
+
prompt = SYSTEM_PROMPTS["Ecolinguistic Analysis"]
|
23 |
+
return run_model(prompt, text)
|
24 |
+
|
25 |
+
def narrative_detection(text: str) -> str:
|
26 |
+
"""Narrative Detection"""
|
27 |
+
prompt = SYSTEM_PROMPTS["Narrative Detection"]
|
28 |
+
return run_model(prompt, text)
|
29 |
+
|
30 |
+
def critical_discourse_analysis(text: str) -> str:
|
31 |
+
"""Critical Discourse Analysis"""
|
32 |
+
prompt = SYSTEM_PROMPTS["Critical Discourse Analysis"]
|
33 |
+
return run_model(prompt, text)
|
34 |
+
|
35 |
+
def sfl_analysis(text: str) -> str:
|
36 |
+
"""SFL Analysis"""
|
37 |
+
prompt = SYSTEM_PROMPTS["SFL Analysis"]
|
38 |
+
return run_model(prompt, text)
|
39 |
+
|
40 |
+
def ecosophy_scoring(text: str) -> str:
|
41 |
+
"""Ecosophy Scoring"""
|
42 |
+
prompt = SYSTEM_PROMPTS["Ecosophy Scoring"]
|
43 |
+
return run_model(prompt, text)
|
44 |
+
|
45 |
+
# --- Build the Gradio interface with separate interfaces per tool ---
|
46 |
+
iface1 = gr.Interface(fn=ecolinguistic_analysis, inputs="text", outputs="text",
|
47 |
+
title="Ecolinguistic Analysis", description=ecolinguistic_analysis.__doc__)
|
48 |
+
iface2 = gr.Interface(fn=narrative_detection, inputs="text", outputs="text",
|
49 |
+
title="Narrative Detection", description=narrative_detection.__doc__)
|
50 |
+
iface3 = gr.Interface(fn=critical_discourse_analysis, inputs="text", outputs="text",
|
51 |
+
title="Critical Discourse Analysis", description=critical_discourse_analysis.__doc__)
|
52 |
+
iface4 = gr.Interface(fn=sfl_analysis, inputs="text", outputs="text",
|
53 |
+
title="SFL Analysis", description=sfl_analysis.__doc__)
|
54 |
+
iface5 = gr.Interface(fn=ecosophy_scoring, inputs="text", outputs="text",
|
55 |
+
title="Ecosophy Scoring", description=ecosophy_scoring.__doc__)
|
56 |
+
|
57 |
+
demo = gr.TabbedInterface(
|
58 |
+
interface_list=[iface1, iface2, iface3, iface4, iface5],
|
59 |
+
tab_names=list(SYSTEM_PROMPTS.keys())
|
60 |
+
)
|
61 |
|
62 |
if __name__ == "__main__":
|
63 |
demo.launch(mcp_server=True, share=True)
|