dexay commited on
Commit
1531882
·
1 Parent(s): 6db0306

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -15,17 +15,17 @@ st.write("This tool lets you extract relation triples concerning interactions be
15
  st.write("It is the result of an end of studies project within ESI school and dedicated to biomedical researchers looking to extract precise information about the subject without digging into long publications.")
16
 
17
 
18
- @st.cache(allow_output_mutation = True)
19
  def load_tokenizer():
20
  return AutoTokenizer.from_pretrained("dmis-lab/biobert-large-cased-v1.1", truncation = True, padding=True, model_max_length=512,)
21
 
22
  tokenizer = load_tokenizer()
23
 
24
- @st.cache(allow_output_mutation = True)
25
  def load_modelNER(tokenizer):
26
  model_checkpoint = BertForTokenClassification.from_pretrained("dexay/Ner2HgF", )
27
  return pipeline("token-classification", tokenizer = tokenizer,model=model_checkpoint, )
28
- @st.cache(allow_output_mutation = True)
29
  def load_modelRE(tokenizer):
30
  model_re = AutoModelForSequenceClassification.from_pretrained("dexay/reDs3others", )
31
  return pipeline("text-classification", tokenizer = tokenizer,model=model_re, )
 
15
  st.write("It is the result of an end of studies project within ESI school and dedicated to biomedical researchers looking to extract precise information about the subject without digging into long publications.")
16
 
17
 
18
+ @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None})
19
  def load_tokenizer():
20
  return AutoTokenizer.from_pretrained("dmis-lab/biobert-large-cased-v1.1", truncation = True, padding=True, model_max_length=512,)
21
 
22
  tokenizer = load_tokenizer()
23
 
24
+ @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None})
25
  def load_modelNER(tokenizer):
26
  model_checkpoint = BertForTokenClassification.from_pretrained("dexay/Ner2HgF", )
27
  return pipeline("token-classification", tokenizer = tokenizer,model=model_checkpoint, )
28
+ @st.cache(hash_funcs={tokenizers.Tokenizer: lambda _: None, tokenizers.AddedToken: lambda _: None})
29
  def load_modelRE(tokenizer):
30
  model_re = AutoModelForSequenceClassification.from_pretrained("dexay/reDs3others", )
31
  return pipeline("text-classification", tokenizer = tokenizer,model=model_re, )