aisa / app.py
sistudent-at-300100160591
Enhanced with Engish/Tamil message and emojis
91a98ef
import torch
import gradio as gr
from transformers import pipeline
# Load the model
moderator = pipeline("text-classification", model="KoalaAI/Text-Moderation")
# Visual styling logic
def moderate_text(input_text):
result = moderator(input_text)
label = result[0]['label']
score = round(result[0]['score'] * 100, 2)
# Set color and emoji
if label == "toxic":
color = "#FF4C4C" # Bright red
emoji = "😑"
message = "⚠️ Toxic content detected"
elif label == "not-toxic":
color = "#4CAF50" # Green
emoji = "😊"
message = "βœ… Content is safe"
else:
color = "#FFD700" # Gold for unsure
emoji = "😐"
message = "⚠️ Uncertain classification"
# HTML-formatted response
html_output = f"""
<div style='padding:1em;border-radius:10px;background-color:{color};color:white;font-weight:bold;font-size:16px'>
{emoji} {message} <br>
Confidence Score: {score}%
</div>
"""
return html_output
# Gradio interface
#demo = gr.Interface(fn=moderate_text, inputs="text", outputs="text", title="AISA - Text Moderation", description="Enter your message in **English or Tamil** to check if it's safe or toxic. :)")
demo = gr.Interface(
fn=moderate_text,
inputs="text",
outputs=gr.HTML(),
title="AISA - Text Moderation",
description="Enter your message in **English or Tamil** to check if it's safe or toxic. 😊"
)
demo.launch()