babel_machine / interfaces /ontolisst.py
kovacsvi
JIT tracing
fb1a253
raw
history blame
2.72 kB
import gradio as gr
import os
import torch
import numpy as np
from transformers import AutoModelForSequenceClassification
from transformers import AutoTokenizer
from huggingface_hub import HfApi
HF_TOKEN = os.environ["hf_read"]
languages = [
"English"
]
from label_dicts import ONTOLISST_LABEL_NAMES
from .utils import is_disk_full, release_model
# --- DEBUG ---
import shutil
def convert_size(size):
for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB']:
if size < 1024:
return f"{size:.2f} {unit}"
size /= 1024
def get_disk_space(path="/"):
total, used, free = shutil.disk_usage(path)
return {
"Total": convert_size(total),
"Used": convert_size(used),
"Free": convert_size(free)
}
# ---
def build_huggingface_path(language: str):
return "poltextlab/xlm-roberta-large_ontolisst_v1"
def predict(text, model_id, tokenizer_id):
device = torch.device("cpu")
# Load JIT-traced model
jit_model_path = f"/data/jit_models/{model_id.replace('/', '_')}.pt"
model = torch.jit.load(jit_model_path).to(device)
model.eval()
# Load tokenizer (still regular HF)
tokenizer = AutoTokenizer.from_pretrained(tokenizer_id)
# Tokenize input
inputs = tokenizer(
text,
max_length=256,
truncation=True,
padding="do_not_pad",
return_tensors="pt"
)
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
output = model(inputs["input_ids"], inputs["attention_mask"])
print(output) # debug
logits = output["logits"]
release_model(model, model_id)
probs = torch.nn.functional.softmax(logits, dim=1).cpu().numpy().flatten()
predicted_class_id = probs.argmax()
predicted_class_id = {4: 2, 5: 1}.get(predicted_class_id, 0)
output_pred = ONTOLISST_LABEL_NAMES.get(predicted_class_id, predicted_class_id)
output_info = f'<p style="text-align: center; display: block">Prediction was made using the <a href="https://huggingface.co/{model_id}">{model_id}</a> model.</p>'
return output_pred, output_info
def predict_cap(text, language):
model_id = build_huggingface_path(language)
tokenizer_id = "xlm-roberta-large"
if is_disk_full():
os.system('rm -rf /data/models*')
os.system('rm -r ~/.cache/huggingface/hub')
return predict(text, model_id, tokenizer_id)
demo = gr.Interface(
title="ONTOLISST Babel Demo",
fn=predict_cap,
inputs=[gr.Textbox(lines=6, label="Input"),
gr.Dropdown(languages, label="Language", value=languages[0])],
outputs=[gr.Label(num_top_classes=3, label="Output"), gr.Markdown()])