File size: 625 Bytes
83b4a34
1ce1c41
 
474a30c
c9917e4
83b4a34
1cf5bdb
474a30c
c9917e4
99b7e58
474a30c
 
 
 
99b7e58
 
 
 
83b4a34
 
99b7e58
83b4a34
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import streamlit as st
import pickle
from tensorflow.keras.models import load_model
from transformers import AutoTokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences

model = load_model('tox_model.h5')
tokenizer = AutoTokenizer.from_pretrained('model')
text = st.text_area('Enter some text')

input_ids = tokenizer.encode(text, return_tensors='pt')


test = pad_sequences(input_ids,
                            maxlen=50, 
                            truncating='post', 
                            padding='post'
                               )

if text:
  out[6] = model.predict(test)
  st.json(out)