File size: 2,487 Bytes
3ffa4a4
512a568
d56584f
e40a764
b0f328f
c0722e5
e40a764
 
c0722e5
d56584f
 
 
4dcc3bb
52e9b0b
 
d56584f
 
512a568
b0f328f
3ffa4a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b0f328f
3ffa4a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4dcc3bb
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os, json
import gradio as gr
from huggingface_hub import InferenceClient

# Load tool prompts
with open("system_prompts.json", encoding="utf-8") as f:
    SYSTEM_PROMPTS = json.load(f)

client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN"))

def run_model(prompt: str, text: str) -> str:
    resp = client.chat.completions.create(
        messages=[{"role": "system", "content": prompt}, {"role": "user", "content": text}],
        max_tokens=512,
        temperature=0.3
    )
    return resp.choices[0].message.content

# Define one named function per tool prompt
def ecolinguistic_analysis(text: str) -> str:
    """Ecolinguistic Analysis"""
    prompt = SYSTEM_PROMPTS["Ecolinguistic Analysis"]
    return run_model(prompt, text)

def narrative_detection(text: str) -> str:
    """Narrative Detection"""
    prompt = SYSTEM_PROMPTS["Narrative Detection"]
    return run_model(prompt, text)

def critical_discourse_analysis(text: str) -> str:
    """Critical Discourse Analysis"""
    prompt = SYSTEM_PROMPTS["Critical Discourse Analysis"]
    return run_model(prompt, text)

def sfl_analysis(text: str) -> str:
    """SFL Analysis"""
    prompt = SYSTEM_PROMPTS["SFL Analysis"]
    return run_model(prompt, text)

def ecosophy_scoring(text: str) -> str:
    """Ecosophy Scoring"""
    prompt = SYSTEM_PROMPTS["Ecosophy Scoring"]
    return run_model(prompt, text)

# Build the Gradio interface
iface1 = gr.Interface(fn=ecolinguistic_analysis, inputs="text", outputs="text",
                      title="Ecolinguistic Analysis", description=ecolinguistic_analysis.__doc__)
iface2 = gr.Interface(fn=narrative_detection, inputs="text", outputs="text",
                      title="Narrative Detection", description=narrative_detection.__doc__)
iface3 = gr.Interface(fn=critical_discourse_analysis, inputs="text", outputs="text",
                      title="Critical Discourse Analysis", description=critical_discourse_analysis.__doc__)
iface4 = gr.Interface(fn=sfl_analysis, inputs="text", outputs="text",
                      title="SFL Analysis", description=sfl_analysis.__doc__)
iface5 = gr.Interface(fn=ecosophy_scoring, inputs="text", outputs="text",
                      title="Ecosophy Scoring", description=ecosophy_scoring.__doc__)

demo = gr.TabbedInterface(
    interface_list=[iface1, iface2, iface3, iface4, iface5],
    tab_names=list(SYSTEM_PROMPTS.keys())
)

if __name__ == "__main__":
    demo.launch(mcp_server=True, share=True)