babel_machine / interfaces /emotion9.py
kovacsvi
sketchy variable
ad7818c
raw
history blame
2.72 kB
import gradio as gr
import os
import torch
import numpy as np
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from huggingface_hub import HfApi
from label_dicts import EMOTION9_LABEL_NAMES, EMOTION9_V2_LABEL_NAMES
from .utils import is_disk_full, release_model
HF_TOKEN = os.environ["hf_read"]
languages = [
"Czech", "English", "German", "Hungarian", "Polish", "Slovak"
]
domains = {
"parliamentary speech": "parlspeech",
}
def build_huggingface_path(language: str):
language = language.lower()
base_model_id = f"poltextlab/xlm-roberta-large-pooled-{language}-emotions9"
if language in ["slovak", "polish", "czech", "hungarian"]:
return base_model_id + "-v2"
return base_model_id
def predict(text, model_id, tokenizer_id):
device = torch.device("cpu")
# Load JIT-traced model
jit_model_path = f"/data/jit_models/{model_id.replace('/', '_')}.pt"
model = torch.jit.load(jit_model_path).to(device)
model.eval()
# Load tokenizer (still regular HF)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
# Tokenize input
inputs = tokenizer(
text,
max_length=64,
truncation=True,
padding=True,
return_tensors="pt"
)
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
output = model(inputs["input_ids"], inputs["attention_mask"])
print(output) # debug
logits = output["logits"]
release_model(model, model_id)
probs = torch.nn.functional.softmax(logits, dim=1).cpu().numpy().flatten()
if "v2" in model_id:
label_names = EMOTION9_V2_LABEL_NAMES
else:
label_names = EMOTION9_LABEL_NAMES
output_pred = {f"[{i}] {label_names[i]}": probs[i] for i in np.argsort(probs)[::-1]}
output_info = f'<p style="text-align: center; display: block">Prediction was made using the <a href="https://huggingface.co/{model_id}">{model_id}</a> model.</p>'
return output_pred, output_info
def predict_e6(text, language, domain):
model_id = build_huggingface_path(language)
tokenizer_id = "xlm-roberta-large"
if is_disk_full():
os.system('rm -rf /data/models*')
os.system('rm -r ~/.cache/huggingface/hub')
return predict(text, model_id, tokenizer_id)
demo = gr.Interface(
title="Emotions (9) Babel Demo",
fn=predict_e6,
inputs=[gr.Textbox(lines=6, label="Input"),
gr.Dropdown(languages, label="Language", value=languages[1]),
gr.Dropdown(domains.keys(), label="Domain", value=list(domains.keys())[0])],
outputs=[gr.Label(num_top_classes=5, label="Output"), gr.Markdown()])