|
import streamlit as st |
|
import transformers |
|
from transformers import pipeline, TokenClassificationPipeline, BertForTokenClassification , AutoTokenizer |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("dmis-lab/biobert-large-cased-v1.1", truncation = True, padding=True, model_max_length=512,) |
|
model_checkpoint = BertForTokenClassification.from_pretrained("dexay/Ner2HgF", ) |
|
token_classifier = pipeline("token-classification", tokenizer = tokenizer,model=model_checkpoint, ) |
|
|
|
x = st.text('enter text') |
|
|
|
if text: |
|
out = token_classifier(x) |
|
st.json(out) |
|
|
|
|