|
import torch |
|
import gradio as gr |
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
|
|
|
|
model_name = "shukdevdatta123/twitter-distilbert-base-uncased-sentiment-analysis-lora-text-classification" |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
id2label = { |
|
0: "Negative", |
|
1: "Positive" |
|
} |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
model.to(device) |
|
|
|
|
|
def predict_sentiment(text): |
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device) |
|
|
|
with torch.no_grad(): |
|
logits = model(**inputs).logits |
|
|
|
predicted_class = torch.argmax(logits, dim=1).item() |
|
label = id2label[predicted_class] |
|
|
|
|
|
probs = torch.nn.functional.softmax(logits, dim=1) |
|
confidence = probs[0][predicted_class].item() |
|
|
|
return f"{label} (Confidence: {confidence:.2f})" |
|
|
|
|
|
interface = gr.Interface( |
|
fn=predict_sentiment, |
|
inputs=gr.Textbox(lines=2, placeholder="Enter a sentence to analyze sentiment..."), |
|
outputs="text", |
|
title="Twitter Sentiment Classifier", |
|
description="This app uses a fine-tuned DistilBERT model with LoRA adapters to predict whether a tweet or sentence is Positive or Negative." |
|
) |
|
|
|
|
|
interface.launch() |