import os, json import gradio as gr from huggingface_hub import InferenceClient # Load tool prompts with open("system_prompts.json", encoding="utf-8") as f: SYSTEM_PROMPTS = json.load(f) client = InferenceClient(model=os.getenv("HF_MODEL"), token=os.getenv("HF_TOKEN")) def run_model(prompt: str, text: str) -> str: resp = client.chat.completions.create( messages=[{"role": "system", "content": prompt}, {"role": "user", "content": text}], max_tokens=512, temperature=0.3 ) return resp.choices[0].message.content # Define one named function per tool prompt def ecolinguistic_analysis(text: str) -> str: """Ecolinguistic Analysis""" prompt = SYSTEM_PROMPTS["Ecolinguistic Analysis"] return run_model(prompt, text) def narrative_detection(text: str) -> str: """Narrative Detection""" prompt = SYSTEM_PROMPTS["Narrative Detection"] return run_model(prompt, text) def critical_discourse_analysis(text: str) -> str: """Critical Discourse Analysis""" prompt = SYSTEM_PROMPTS["Critical Discourse Analysis"] return run_model(prompt, text) def sfl_analysis(text: str) -> str: """SFL Analysis""" prompt = SYSTEM_PROMPTS["SFL Analysis"] return run_model(prompt, text) def ecosophy_scoring(text: str) -> str: """Ecosophy Scoring""" prompt = SYSTEM_PROMPTS["Ecosophy Scoring"] return run_model(prompt, text) # Build the Gradio interface iface1 = gr.Interface(fn=ecolinguistic_analysis, inputs="text", outputs="text", title="Ecolinguistic Analysis", description=ecolinguistic_analysis.__doc__) iface2 = gr.Interface(fn=narrative_detection, inputs="text", outputs="text", title="Narrative Detection", description=narrative_detection.__doc__) iface3 = gr.Interface(fn=critical_discourse_analysis, inputs="text", outputs="text", title="Critical Discourse Analysis", description=critical_discourse_analysis.__doc__) iface4 = gr.Interface(fn=sfl_analysis, inputs="text", outputs="text", title="SFL Analysis", description=sfl_analysis.__doc__) iface5 = gr.Interface(fn=ecosophy_scoring, inputs="text", outputs="text", title="Ecosophy Scoring", description=ecosophy_scoring.__doc__) demo = gr.TabbedInterface( interface_list=[iface1, iface2, iface3, iface4, iface5], tab_names=list(SYSTEM_PROMPTS.keys()) ) if __name__ == "__main__": demo.launch(mcp_server=True, share=True)