rviana's picture
Simplified model and dataset loading for testing
2e48c0b
raw
history blame
991 Bytes
import gradio as gr
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# Check if GPU is available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load the IMDb dataset
dataset = load_dataset('imdb', split='test[:1%]') # Load a small portion for testing
# Initialize the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
model = AutoModelForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=2)
model.to(device)
# Function to classify sentiment
def classify_text(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device)
outputs = model(**inputs)
prediction = torch.argmax(outputs.logits, dim=-1).item()
return "Positive" if prediction == 1 else "Negative"
# Set up the Gradio interface
iface = gr.Interface(fn=classify_text, inputs="text", outputs="text")
iface.launch()