|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
import gradio as gr |
|
|
|
|
|
model_name = "nateraw/bert-base-uncased-emotion" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
|
|
|
|
labels = ['sadness', 'joy', 'love', 'anger', 'fear', 'surprise'] |
|
|
|
|
|
def predict_emotion(text): |
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
probs = torch.nn.functional.softmax(outputs.logits, dim=1) |
|
pred_class = torch.argmax(probs).item() |
|
emotion = labels[pred_class] |
|
return f"{emotion} ({probs[0][pred_class].item()*100:.2f}% confidence)" |
|
|
|
|
|
interface = gr.Interface( |
|
fn=predict_emotion, |
|
inputs=gr.Textbox(lines=2, placeholder="Type something here..."), |
|
outputs="text", |
|
title="BERT-based Emotion Detection", |
|
description="A web app that uses a fine-tuned BERT model to detect emotions from text." |
|
) |
|
|
|
interface.launch() |
|
|