|
|
|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
import torch |
|
|
|
|
|
model_name = "ElizabethSrgh/customer-service-multitask" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
|
|
|
|
label_map = { |
|
0: "Complaint - Negative", |
|
1: "Inquiry - Neutral", |
|
2: "Request - Positive" |
|
} |
|
|
|
def predict(text): |
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) |
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
predicted_class_id = torch.argmax(logits, dim=1).item() |
|
return label_map.get(predicted_class_id, "Unknown") |
|
|
|
|
|
interface = gr.Interface( |
|
fn=predict, |
|
inputs=gr.Textbox(lines=4, label="Masukkan Teks Percakapan"), |
|
outputs=gr.Textbox(label="Hasil Prediksi"), |
|
title="Klasifikasi Layanan Pelanggan", |
|
description="Masukkan teks untuk memprediksi topik dan sentimen." |
|
) |
|
|
|
if __name__ == "__main__": |
|
interface.launch() |
|
|