|
import streamlit as st |
|
from transformers import AutoModelWithHeads, AutoTokenizer |
|
import torch |
|
|
|
|
|
st.title("Adapter Transformers for Text Classification") |
|
|
|
@st.cache_resource |
|
def load_model(): |
|
model = AutoModelWithHeads.from_pretrained("bert-base-uncased") |
|
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") |
|
|
|
|
|
adapter_name = "my_adapter" |
|
model.add_adapter(adapter_name) |
|
model.train_adapter(adapter_name) |
|
model.set_active_adapters(adapter_name) |
|
|
|
|
|
model.add_classification_head(adapter_name, num_labels=2) |
|
return model, tokenizer |
|
|
|
|
|
model, tokenizer = load_model() |
|
|
|
|
|
input_text = st.text_input("Enter text for classification:", "Steve Jobs founded Apple") |
|
|
|
if input_text: |
|
|
|
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding=True, max_length=512) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model(**inputs) |
|
logits = outputs.logits |
|
predicted_class = logits.argmax(dim=-1).item() |
|
|
|
|
|
if predicted_class == 0: |
|
st.write("Prediction: Negative") |
|
else: |
|
st.write("Prediction: Positive") |
|
|