import torch import gradio as gr from transformers import pipeline # Load the model moderator = pipeline("text-classification", model="KoalaAI/Text-Moderation") # Visual styling logic def moderate_text(input_text): result = moderator(input_text) label = result[0]['label'] score = round(result[0]['score'] * 100, 2) # Set color and emoji if label == "toxic": color = "#FF4C4C" # Bright red emoji = "😡" message = "⚠️ Toxic content detected" elif label == "not-toxic": color = "#4CAF50" # Green emoji = "😊" message = "✅ Content is safe" else: color = "#FFD700" # Gold for unsure emoji = "😐" message = "⚠️ Uncertain classification" # HTML-formatted response html_output = f"""
{emoji} {message}
Confidence Score: {score}%
""" return html_output # Gradio interface #demo = gr.Interface(fn=moderate_text, inputs="text", outputs="text", title="AISA - Text Moderation", description="Enter your message in **English or Tamil** to check if it's safe or toxic. :)") demo = gr.Interface( fn=moderate_text, inputs="text", outputs=gr.HTML(), title="AISA - Text Moderation", description="Enter your message in **English or Tamil** to check if it's safe or toxic. 😊" ) demo.launch()